← Назад к вопросам
Загрузка больших файлов (1-N ГБ)
3.0 Senior🔥 121 комментариев
#API и веб-протоколы#Архитектура и паттерны#Инфраструктура и DevOps
Условие
Как реализовать загрузку больших файлов (от 1 до N гигабайт) через PHP?
Проблемы
- Ограничения PHP: upload_max_filesize, post_max_size, max_execution_time
- Ограничения памяти
- Таймауты nginx/apache
- Надёжность при разрыве соединения
Требования к решению
- Chunked upload (загрузка частями)
- Возможность продолжить загрузку
- Проверка целостности (hash)
- Прогресс бар на клиенте
Вопросы для обсуждения
- Протоколы: tus, resumable uploads
- Настройки PHP и веб-сервера
- Хранение временных файлов
- Объединение chunks
- JavaScript реализация на клиенте
Комментарии (1)
🐱
claude-haiku-4.5PrepBro AI23 мар. 2026 г.(ред.)
Ответ сгенерирован нейросетью и может содержать ошибки
Решение
1. Настройки PHP (php.ini)
; Увеличиваем лимиты для загрузки больших файлов
upload_max_filesize = 5G
post_max_size = 5G
max_execution_time = 0 ; Без ограничений
max_input_time = 0 ; Без ограничений
memory_limit = 512M ; Достаточно для обработки chunks
session.gc_maxlifetime = 86400
2. Конфигурация nginx
server {
listen 80;
server_name example.com;
# Увеличиваем размер тела запроса
client_max_body_size 5G;
client_body_timeout 300s;
client_header_timeout 300s;
# Буферизация для больших upload
client_body_buffer_size 128M;
location ~ \\.php$ {
fastcgi_read_timeout 600s;
fastcgi_send_timeout 600s;
# Другие параметры fastcgi
}
}
3. Миграция для хранения информации о загрузках
<?php
use Illuminate\Database\Migrations\Migration;
use Illuminate\Database\Schema\Blueprint;
use Illuminate\Support\Facades\Schema;
return new class extends Migration {
public function up(): void {
// Таблица для отслеживания загрузок
Schema::create("file_uploads", function (Blueprint $table) {
$table->id();
$table->string("upload_id", 255)->unique(); // ID для отслеживания
$table->string("filename", 255);
$table->string("mime_type", 100);
$table->bigInteger("total_size");
$table->bigInteger("uploaded_size")->default(0);
$table->string("file_hash", 64)->nullable(); // SHA256 от всего файла
$table->enum("status", ["uploading", "completed", "failed"])->default("uploading");
$table->timestamp("completed_at")->nullable();
$table->timestamp("expires_at"); // Когда удалить временные файлы
$table->timestamps();
$table->index("upload_id");
$table->index("status");
});
// Таблица для отслеживания chunks
Schema::create("file_upload_chunks", function (Blueprint $table) {
$table->id();
$table->foreignId("file_upload_id")->constrained("file_uploads")->cascadeOnDelete();
$table->integer("chunk_number");
$table->bigInteger("chunk_size");
$table->string("chunk_hash", 64); // SHA256 от chunk
$table->timestamps();
$table->unique(["file_upload_id", "chunk_number"]);
});
}
public function down(): void {
Schema::dropIfExists("file_upload_chunks");
Schema::dropIfExists("file_uploads");
}
};
4. Модели
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
use Illuminate\Database\Eloquent\Relations\HasMany;
use Illuminate\Support\Str;
class FileUpload extends Model {
protected $fillable = [
"upload_id",
"filename",
"mime_type",
"total_size",
"uploaded_size",
"file_hash",
"status",
"completed_at",
"expires_at",
];
protected $casts = [
"completed_at" => "datetime",
"expires_at" => "datetime",
];
public function chunks(): HasMany {
return $this->hasMany(FileUploadChunk::class);
}
public function getProgress(): float {
return ($this->uploaded_size / $this->total_size) * 100;
}
public function isComplete(): bool {
return $this->status === "completed";
}
public static function generateUploadId(): string {
return Str::uuid()->toString();
}
}
<?php
namespace App\Models;
use Illuminate\Database\Eloquent\Model;
use Illuminate\Database\Eloquent\Relations\BelongsTo;
class FileUploadChunk extends Model {
protected $fillable = ["file_upload_id", "chunk_number", "chunk_size", "chunk_hash"];
public function fileUpload(): BelongsTo {
return $this->belongsTo(FileUpload::class);
}
}
5. Service для управления загрузками
<?php
namespace App\Services;
use App\Models\FileUpload;
use App\Models\FileUploadChunk;
use Illuminate\Http\UploadedFile;
use Illuminate\Support\Facades\Storage;
use Illuminate\Validation\ValidationException;
class ChunkedFileUploadService {
const CHUNK_SIZE = 10 * 1024 * 1024; // 10 MB chunks
const TEMP_DISK = "uploads_temp";
const FINAL_DISK = "uploads";
/**
* Инициализировать загрузку
*/
public function initiate(string $filename, int $totalSize, string $mimeType): FileUpload {
return FileUpload::create([
"upload_id" => FileUpload::generateUploadId(),
"filename" => $filename,
"mime_type" => $mimeType,
"total_size" => $totalSize,
"uploaded_size" => 0,
"status" => "uploading",
"expires_at" => now()->addDays(7),
]);
}
/**
* Загрузить chunk
*/
public function uploadChunk(FileUpload $upload, UploadedFile $chunk, int $chunkNumber, string $chunkHash): FileUploadChunk {
// Валидация размера chunk
if ($chunk->getSize() > self::CHUNK_SIZE) {
throw ValidationException::withMessages([
"chunk" => "Размер chunk слишком большой"
]);
}
// Сохраняем chunk временно
$chunkPath = "chunks/{$upload->upload_id}/{$chunkNumber}";
Storage::disk(self::TEMP_DISK)->putFileAs(
dirname($chunkPath),
$chunk,
basename($chunkPath)
);
// Проверяем hash
$uploadedHash = hash_file("sha256", $chunk->getRealPath());
if ($uploadedHash !== $chunkHash) {
Storage::disk(self::TEMP_DISK)->delete($chunkPath);
throw ValidationException::withMessages([
"chunk" => "Hash chunk не совпадает"
]);
}
// Сохраняем информацию о chunk
$chunkRecord = FileUploadChunk::create([
"file_upload_id" => $upload->id,
"chunk_number" => $chunkNumber,
"chunk_size" => $chunk->getSize(),
"chunk_hash" => $chunkHash,
]);
// Обновляем прогресс
$upload->increment("uploaded_size", $chunk->getSize());
return $chunkRecord;
}
/**
* Завершить загрузку и объединить chunks
*/
public function complete(FileUpload $upload, string $finalHash): bool {
// Проверяем что все chunks загружены
$expectedChunks = ceil($upload->total_size / self::CHUNK_SIZE);
$uploadedChunks = $upload->chunks()->count();
if ($uploadedChunks !== $expectedChunks) {
throw ValidationException::withMessages([
"chunks" => "Загружены не все chunks"
]);
}
// Объединяем chunks в один файл
$finalPath = "files/{$upload->upload_id}/{$upload->filename}";
$tempFile = storage_path("temp/{$upload->upload_id}_{$upload->filename}");
$this->mergeChunks($upload, $tempFile);
// Проверяем целостность финального файла
$calculatedHash = hash_file("sha256", $tempFile);
if ($calculatedHash !== $finalHash) {
unlink($tempFile);
throw ValidationException::withMessages([
"file" => "Ошибка целостности файла"
]);
}
// Перемещаем в финальное хранилище
Storage::disk(self::FINAL_DISK)->putFileAs(
dirname($finalPath),
new \SplFileInfo($tempFile),
basename($finalPath)
);
// Очищаем temporary chunks
$this->cleanupChunks($upload);
unlink($tempFile);
// Обновляем статус
$upload->update([
"status" => "completed",
"file_hash" => $finalHash,
"completed_at" => now(),
]);
return true;
}
/**
* Объединить chunks в один файл
*/
private function mergeChunks(FileUpload $upload, string $outputFile): void {
$out = fopen($outputFile, "wb");
if (!$out) throw new \Exception("Cannot open output file");
$chunks = $upload->chunks()->orderBy("chunk_number")->get();
foreach ($chunks as $chunk) {
$chunkPath = storage_path("app/uploads_temp/chunks/{$upload->upload_id}/{$chunk->chunk_number}");
$in = fopen($chunkPath, "rb");
if (!$in) {
fclose($out);
throw new \Exception("Cannot open chunk file: $chunkPath");
}
stream_copy_to_stream($in, $out);
fclose($in);
}
fclose($out);
}
/**
* Очистить временные chunk файлы
*/
private function cleanupChunks(FileUpload $upload): void {
$chunkDir = "chunks/{$upload->upload_id}";
Storage::disk(self::TEMP_DISK)->deleteDirectory($chunkDir);
}
/**
* Отменить загрузку
*/
public function cancel(FileUpload $upload): void {
$this->cleanupChunks($upload);
$upload->delete();
}
/**
* Получить информацию о загрузке
*/
public function getStatus(FileUpload $upload): array {
return [
"upload_id" => $upload->upload_id,
"filename" => $upload->filename,
"total_size" => $upload->total_size,
"uploaded_size" => $upload->uploaded_size,
"progress" => $upload->getProgress(),
"status" => $upload->status,
"chunks_uploaded" => $upload->chunks()->count(),
];
}
}
6. API контроллер
<?php
namespace App\Http\Controllers\Api;
use App\Http\Requests\InitiateUploadRequest;
use App\Http\Requests\UploadChunkRequest;
use App\Models\FileUpload;
use App\Services\ChunkedFileUploadService;
use Illuminate\Http\JsonResponse;
use Illuminate\Http\Request;
class FileUploadController extends Controller {
protected $uploadService;
public function __construct(ChunkedFileUploadService $uploadService) {
$this->uploadService = $uploadService;
}
/**
* POST /api/uploads/initiate - инициализировать загрузку
*/
public function initiate(InitiateUploadRequest $request): JsonResponse {
$validated = $request->validated();
$upload = $this->uploadService->initiate(
$validated["filename"],
$validated["total_size"],
$validated["mime_type"]
);
return response()->json([
"upload_id" => $upload->upload_id,
"chunk_size" => ChunkedFileUploadService::CHUNK_SIZE,
], 201);
}
/**
* POST /api/uploads/{id}/chunk - загрузить chunk
*/
public function uploadChunk(UploadChunkRequest $request, string $uploadId): JsonResponse {
$upload = FileUpload::where("upload_id", $uploadId)->firstOrFail();
$validated = $request->validated();
$this->uploadService->uploadChunk(
$upload,
$request->file("chunk"),
$validated["chunk_number"],
$validated["chunk_hash"]
);
return response()->json($this->uploadService->getStatus($upload));
}
/**
* POST /api/uploads/{id}/complete - завершить загрузку
*/
public function complete(Request $request, string $uploadId): JsonResponse {
$upload = FileUpload::where("upload_id", $uploadId)->firstOrFail();
$validated = $request->validate(["file_hash" => "required|string"]);
$this->uploadService->complete($upload, $validated["file_hash"]);
return response()->json([
"message" => "File uploaded successfully",
"file_id" => $upload->id,
]);
}
/**
* GET /api/uploads/{id}/status - статус загрузки
*/
public function status(string $uploadId): JsonResponse {
$upload = FileUpload::where("upload_id", $uploadId)->firstOrFail();
return response()->json($this->uploadService->getStatus($upload));
}
/**
* DELETE /api/uploads/{id} - отменить загрузку
*/
public function cancel(string $uploadId): JsonResponse {
$upload = FileUpload::where("upload_id", $uploadId)->firstOrFail();
$this->uploadService->cancel($upload);
return response()->json(["message" => "Upload cancelled"]);
}
}
7. Form Requests
<?php
namespace App\Http\Requests;
use Illuminate\Foundation\Http\FormRequest;
class InitiateUploadRequest extends FormRequest {
public function authorize(): bool { return true; }
public function rules(): array {
return [
"filename" => "required|string|max:255",
"total_size" => "required|integer|min:1|max:" . (5 * 1024 * 1024 * 1024), // 5GB max
"mime_type" => "required|string|max:100",
];
}
}
<?php
namespace App\Http\Requests;
use Illuminate\Foundation\Http\FormRequest;
class UploadChunkRequest extends FormRequest {
public function authorize(): bool { return true; }
public function rules(): array {
return [
"chunk" => "required|file",
"chunk_number" => "required|integer|min:0",
"chunk_hash" => "required|string|size:64",
];
}
}
8. JavaScript клиент (Resumable.js-подобный)
class ChunkedUploader {
constructor(options = {}) {
this.chunkSize = options.chunkSize || 10 * 1024 * 1024;
this.uploadId = null;
this.currentChunk = 0;
this.totalChunks = 0;
this.file = null;
}
async uploadFile(file) {
this.file = file;
this.totalChunks = Math.ceil(file.size / this.chunkSize);
// Инициализируем загрузку
const initResponse = await fetch("/api/uploads/initiate", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({
filename: file.name,
total_size: file.size,
mime_type: file.type,
}),
});
const initData = await initResponse.json();
this.uploadId = initData.upload_id;
// Вычисляем hash всего файла
const fileHash = await this.calculateFileHash(file);
// Загружаем chunks
for (let i = 0; i < this.totalChunks; i++) {
await this.uploadChunk(i, fileHash);
}
// Завершаем загрузку
const completeResponse = await fetch(`/api/uploads/${this.uploadId}/complete`, {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ file_hash: fileHash }),
});
return await completeResponse.json();
}
async uploadChunk(chunkIndex, fileHash) {
const start = chunkIndex * this.chunkSize;
const end = Math.min(start + this.chunkSize, this.file.size);
const chunk = this.file.slice(start, end);
// Вычисляем hash chunk
const chunkHash = await this.calculateChunkHash(chunk);
const formData = new FormData();
formData.append("chunk", chunk);
formData.append("chunk_number", chunkIndex);
formData.append("chunk_hash", chunkHash);
const response = await fetch(
`/api/uploads/${this.uploadId}/chunk`,
{ method: "POST", body: formData }
);
const status = await response.json();
console.log(`Chunk ${chunkIndex + 1}/${this.totalChunks} uploaded`);
console.log(`Progress: ${status.progress.toFixed(2)}%`);
return status;
}
async calculateFileHash(file) {
return await this.calculateHash(file);
}
async calculateChunkHash(chunk) {
return await this.calculateHash(chunk);
}
async calculateHash(blob) {
const arrayBuffer = await blob.arrayBuffer();
const hashBuffer = await crypto.subtle.digest("SHA-256", arrayBuffer);
const hashArray = Array.from(new Uint8Array(hashBuffer));
return hashArray.map(b => b.toString(16).padStart(2, "0")).join("");
}
}
// Использование
const uploader = new ChunkedUploader();
document.getElementById("fileInput").addEventListener("change", async (e) => {
const file = e.target.files[0];
const result = await uploader.uploadFile(file);
console.log("Upload complete:", result);
});
9. Маршруты
<?php
Route::post("/uploads/initiate", [FileUploadController::class, "initiate"]);
Route::post("/uploads/{uploadId}/chunk", [FileUploadController::class, "uploadChunk"]);
Route::post("/uploads/{uploadId}/complete", [FileUploadController::class, "complete"]);
Route::get("/uploads/{uploadId}/status", [FileUploadController::class, "status"]);
Route::delete("/uploads/{uploadId}", [FileUploadController::class, "cancel"]);
Ключевые преимущества решения:
- Работает с файлами 1-N ГБ
- Resume после разрыва соединения
- Проверка целостности (SHA256)
- Минимальное использование памяти
- Простая интеграция с фронтенд