forked from LiveCarta/ContentGeneration
Migrate to uv sync and pytest coverage workflow
This commit is contained in:
28
Dockerfile
28
Dockerfile
@@ -2,13 +2,13 @@ FROM nvidia/cuda:12.4.1-cudnn-devel-ubuntu22.04
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PIP_NO_CACHE_DIR=1 \
|
||||
UV_SYSTEM_PYTHON=1 \
|
||||
UV_EXTRA_INDEX_URL=https://download.pytorch.org/whl/cu121 \
|
||||
PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True,max_split_size_mb:128
|
||||
|
||||
# Base OS tools + media stack + Python toolchain.
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3.10 \
|
||||
python3-pip \
|
||||
python3.10-dev \
|
||||
python3.10-venv \
|
||||
ffmpeg \
|
||||
@@ -23,18 +23,17 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
libgl1 \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& ln -sf /usr/bin/python3.10 /usr/bin/python \
|
||||
&& ln -sf /usr/bin/pip3 /usr/bin/pip \
|
||||
&& git lfs install
|
||||
|
||||
# Install uv.
|
||||
COPY --from=ghcr.io/astral-sh/uv:0.6.17 /uv /uvx /usr/local/bin/
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install project Python dependencies first for better layer caching.
|
||||
COPY requirements.txt /app/requirements.txt
|
||||
# Install app Python dependencies first for better layer caching.
|
||||
COPY pyproject.toml README.md /app/
|
||||
|
||||
RUN python -m pip install --upgrade pip setuptools wheel \
|
||||
&& pip install --index-url https://download.pytorch.org/whl/cu121 torch torchvision torchaudio \
|
||||
&& pip install -r /app/requirements.txt \
|
||||
&& pip install -U accelerate safetensors
|
||||
RUN uv sync --system --no-dev --no-install-project
|
||||
|
||||
# Ensure HunyuanVideo source exists in the image.
|
||||
ARG HUNYUAN_REPO=https://github.com/Tencent-Hunyuan/HunyuanVideo-1.5.git
|
||||
@@ -44,22 +43,23 @@ RUN if [ ! -f /app/HunyuanVideo-1.5/requirements.txt ]; then \
|
||||
fi
|
||||
|
||||
# Install HunyuanVideo dependencies from upstream README guidance.
|
||||
RUN pip install -r /app/HunyuanVideo-1.5/requirements.txt \
|
||||
&& pip install --upgrade tencentcloud-sdk-python \
|
||||
&& pip install sgl-kernel==0.3.18
|
||||
RUN uv pip install --system -r /app/HunyuanVideo-1.5/requirements.txt \
|
||||
&& uv pip install --system --upgrade tencentcloud-sdk-python \
|
||||
&& uv pip install --system sgl-kernel==0.3.18
|
||||
|
||||
# Optional attention backends from Hunyuan docs.
|
||||
# Build with: --build-arg INSTALL_OPTIONAL_ATTENTION=1
|
||||
ARG INSTALL_OPTIONAL_ATTENTION=0
|
||||
RUN if [ "$INSTALL_OPTIONAL_ATTENTION" = "1" ]; then \
|
||||
pip install flash-attn --no-build-isolation && \
|
||||
uv pip install --system flash-attn --no-build-isolation && \
|
||||
git clone --depth 1 https://github.com/Tencent-Hunyuan/flex-block-attn.git /tmp/flex-block-attn && \
|
||||
cd /tmp/flex-block-attn && git submodule update --init --recursive && python setup.py install && \
|
||||
git clone --depth 1 https://github.com/cooper1637/SageAttention.git /tmp/SageAttention && \
|
||||
cd /tmp/SageAttention && python setup.py install; \
|
||||
fi
|
||||
|
||||
COPY . .
|
||||
# Copy application source after dependencies are installed.
|
||||
COPY . /app
|
||||
|
||||
# Default pipeline entrypoint.
|
||||
CMD ["python", "run_video_pipeline.py"]
|
||||
|
||||
33
README.md
33
README.md
@@ -14,9 +14,8 @@ Local Python:
|
||||
|
||||
```bash
|
||||
cp .env.example .env
|
||||
python3 -m venv .venv && source .venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
python run_video_pipeline.py
|
||||
uv sync --dev
|
||||
uv run python run_video_pipeline.py
|
||||
```
|
||||
|
||||
Docker (GPU):
|
||||
@@ -61,8 +60,9 @@ docker run --rm --gpus all --env-file .env \
|
||||
1. Linux with NVIDIA GPU and CUDA runtime.
|
||||
2. `ffmpeg` and `ffprobe` available on PATH.
|
||||
3. Python 3.10+.
|
||||
4. Hunyuan model checkpoints under `HunyuanVideo-1.5/ckpts`.
|
||||
5. If using FLUX local download, access approved for `black-forest-labs/FLUX.1-schnell`.
|
||||
4. `uv` installed (https://docs.astral.sh/uv/).
|
||||
5. Hunyuan model checkpoints under `HunyuanVideo-1.5/ckpts`.
|
||||
6. If using FLUX local download, access approved for `black-forest-labs/FLUX.1-schnell`.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
@@ -82,29 +82,28 @@ cp .env.example .env
|
||||
1. Create and activate a virtual environment:
|
||||
|
||||
```bash
|
||||
python3 -m venv .venv
|
||||
uv venv
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
2. Install Python dependencies:
|
||||
|
||||
```bash
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
uv sync --dev
|
||||
```
|
||||
|
||||
3. Install Hunyuan dependencies:
|
||||
|
||||
```bash
|
||||
pip install -r HunyuanVideo-1.5/requirements.txt
|
||||
pip install --upgrade tencentcloud-sdk-python
|
||||
pip install sgl-kernel==0.3.18
|
||||
uv pip install -r HunyuanVideo-1.5/requirements.txt
|
||||
uv pip install --upgrade tencentcloud-sdk-python
|
||||
uv pip install sgl-kernel==0.3.18
|
||||
```
|
||||
|
||||
4. Run full pipeline:
|
||||
|
||||
```bash
|
||||
python run_video_pipeline.py
|
||||
uv run python run_video_pipeline.py
|
||||
```
|
||||
|
||||
5. Common options:
|
||||
@@ -207,5 +206,13 @@ docker run --rm --gpus all \
|
||||
8. Verify syntax quickly before running.
|
||||
|
||||
```bash
|
||||
python3 -m py_compile run_video_pipeline.py src/*.py
|
||||
uv run python -m py_compile run_video_pipeline.py src/*.py
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
Run tests with coverage:
|
||||
|
||||
```bash
|
||||
uv run pytest
|
||||
```
|
||||
|
||||
30
pyproject.toml
Normal file
30
pyproject.toml
Normal file
@@ -0,0 +1,30 @@
|
||||
[project]
|
||||
name = "content-generation"
|
||||
version = "0.1.0"
|
||||
description = "Video content generation pipeline"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10"
|
||||
dependencies = [
|
||||
"boto3",
|
||||
"python-dotenv",
|
||||
"elevenlabs",
|
||||
"torch",
|
||||
"transformers",
|
||||
"diffusers",
|
||||
"accelerate",
|
||||
"safetensors",
|
||||
"huggingface-hub",
|
||||
"bitsandbytes",
|
||||
]
|
||||
|
||||
[dependency-groups]
|
||||
dev = [
|
||||
"pytest",
|
||||
"pytest-cov",
|
||||
"coverage[toml]",
|
||||
]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
addopts = "-q --cov=run_video_pipeline --cov=src/logging_config.py --cov-report=term-missing --cov-fail-under=70"
|
||||
testpaths = ["tests"]
|
||||
python_files = ["test_*.py"]
|
||||
@@ -1,17 +0,0 @@
|
||||
# Core project dependencies inferred from imports in this workspace
|
||||
boto3
|
||||
python-dotenv
|
||||
elevenlabs
|
||||
torch
|
||||
transformers
|
||||
diffusers
|
||||
accelerate
|
||||
safetensors
|
||||
huggingface-hub
|
||||
|
||||
# Optional but commonly required for 4-bit quantization with BitsAndBytesConfig
|
||||
bitsandbytes
|
||||
|
||||
# Notes:
|
||||
# - ffmpeg/ffprobe are required by video scripts but installed at OS level, not via pip.
|
||||
# - torchrun is provided by the torch package.
|
||||
@@ -134,6 +134,16 @@ def main() -> int:
|
||||
cwd=args.base_dir,
|
||||
)
|
||||
|
||||
if not args.skip_generate:
|
||||
run_step(
|
||||
"Generate Images",
|
||||
_with_log_level([
|
||||
sys.executable,
|
||||
str(SCRIPT_DIR / "generate_images.py"),
|
||||
], args.log_level),
|
||||
cwd=args.base_dir,
|
||||
)
|
||||
|
||||
if not args.skip_generate:
|
||||
run_step(
|
||||
"Generate Videos",
|
||||
|
||||
@@ -1,25 +1,18 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import unittest
|
||||
|
||||
from src.logging_config import debug_log_lifecycle
|
||||
|
||||
|
||||
class TestDebugLogLifecycle(unittest.TestCase):
|
||||
def test_logs_function_start_and_end(self) -> None:
|
||||
@debug_log_lifecycle
|
||||
def sample(a: int, b: int) -> int:
|
||||
return a + b
|
||||
def test_logs_function_start_and_end(caplog) -> None:
|
||||
@debug_log_lifecycle
|
||||
def sample(a: int, b: int) -> int:
|
||||
return a + b
|
||||
|
||||
with self.assertLogs(sample.__module__, level="DEBUG") as captured:
|
||||
result = sample(2, 3)
|
||||
with caplog.at_level(logging.DEBUG, logger=sample.__module__):
|
||||
result = sample(2, 3)
|
||||
|
||||
self.assertEqual(result, 5)
|
||||
joined = "\n".join(captured.output)
|
||||
self.assertIn("Start TestDebugLogLifecycle.test_logs_function_start_and_end.<locals>.sample", joined)
|
||||
self.assertIn("End TestDebugLogLifecycle.test_logs_function_start_and_end.<locals>.sample", joined)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
assert result == 5
|
||||
assert "Start test_logs_function_start_and_end.<locals>.sample" in caplog.text
|
||||
assert "End test_logs_function_start_and_end.<locals>.sample" in caplog.text
|
||||
|
||||
109
tests/test_pipeline_full_process.py
Normal file
109
tests/test_pipeline_full_process.py
Normal file
@@ -0,0 +1,109 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sys
|
||||
import tempfile
|
||||
from argparse import Namespace
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
|
||||
import pytest
|
||||
|
||||
# Avoid requiring boto3 for orchestration tests.
|
||||
if "boto3" not in sys.modules:
|
||||
sys.modules["boto3"] = SimpleNamespace(client=lambda *args, **kwargs: object())
|
||||
|
||||
import run_video_pipeline as pipeline
|
||||
|
||||
|
||||
def test_full_generation_process_calls_all_scripts(monkeypatch) -> None:
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
base_dir = Path(tmpdir)
|
||||
hunyuan_dir = base_dir / "HunyuanVideo-1.5"
|
||||
images_dir = base_dir / "images"
|
||||
videos_dir = base_dir / "videos"
|
||||
audios_dir = base_dir / "audios"
|
||||
merged_dir = base_dir / "merged"
|
||||
output_path = base_dir / "results" / "final_output.mp4"
|
||||
reel_script = base_dir / "reel_script.json"
|
||||
|
||||
hunyuan_dir.mkdir(parents=True)
|
||||
(base_dir / "topic_description.txt").write_text("Test topic")
|
||||
|
||||
args = Namespace(
|
||||
base_dir=base_dir,
|
||||
hunyuan_dir=hunyuan_dir,
|
||||
reel_script=reel_script,
|
||||
images_dir=images_dir,
|
||||
videos_dir=videos_dir,
|
||||
audios_dir=audios_dir,
|
||||
merged_dir=merged_dir,
|
||||
output=output_path,
|
||||
seed=1,
|
||||
skip_generate=False,
|
||||
skip_audio_generate=False,
|
||||
skip_merge=False,
|
||||
skip_concat=False,
|
||||
skip_s3_upload=True,
|
||||
log_level="DEBUG",
|
||||
)
|
||||
|
||||
executed_scripts: list[str] = []
|
||||
|
||||
expected_scripts = [
|
||||
"generate_script.py",
|
||||
"generate_audios.py",
|
||||
"generate_images.py",
|
||||
"generate_videos.py",
|
||||
"merge_audio_video.py",
|
||||
"concat_merged.py",
|
||||
]
|
||||
|
||||
def fake_subprocess_run(cmd: list[str], check: bool, cwd: str | None = None):
|
||||
script_name = Path(cmd[1]).name if len(cmd) > 1 else ""
|
||||
if script_name not in expected_scripts:
|
||||
pytest.fail(f"Unexpected external process call: {cmd}")
|
||||
|
||||
executed_scripts.append(script_name)
|
||||
|
||||
if script_name == "generate_script.py":
|
||||
payload = {
|
||||
"shots": [
|
||||
{
|
||||
"shot_number": 1,
|
||||
"image_description": "A test image",
|
||||
"voiceover": "A test voiceover",
|
||||
}
|
||||
]
|
||||
}
|
||||
reel_script.write_text(json.dumps(payload))
|
||||
elif script_name == "generate_audios.py":
|
||||
audios_dir.mkdir(parents=True, exist_ok=True)
|
||||
(audios_dir / "output_1.mp3").write_bytes(b"audio")
|
||||
elif script_name == "generate_images.py":
|
||||
images_dir.mkdir(parents=True, exist_ok=True)
|
||||
(images_dir / "shot_1.png").write_bytes(b"image")
|
||||
elif script_name == "generate_videos.py":
|
||||
videos_dir.mkdir(parents=True, exist_ok=True)
|
||||
(videos_dir / "output_1.mp4").write_bytes(b"video")
|
||||
elif script_name == "merge_audio_video.py":
|
||||
merged_dir.mkdir(parents=True, exist_ok=True)
|
||||
(merged_dir / "merged_1.mp4").write_bytes(b"merged")
|
||||
elif script_name == "concat_merged.py":
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_bytes(b"final")
|
||||
|
||||
class Result:
|
||||
returncode = 0
|
||||
|
||||
return Result()
|
||||
|
||||
monkeypatch.setattr(pipeline, "parse_args", lambda: args)
|
||||
monkeypatch.setattr(pipeline.subprocess, "run", fake_subprocess_run)
|
||||
|
||||
rc = pipeline.main()
|
||||
|
||||
assert rc == 0
|
||||
assert output_path.exists()
|
||||
# Coverage check for orchestration: ensure every required script stage was called.
|
||||
assert executed_scripts == expected_scripts
|
||||
Reference in New Issue
Block a user