Initial commit: 3D Viewer application
Features: - Vue 3 frontend with Three.js/Online3DViewer - Node.js API with PostgreSQL and Redis - Python worker for model conversion - Docker Compose for deployment - ViewCube navigation with drag rotation and 90° snap - Cross-section, exploded view, and render settings - Parts tree with visibility controls 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
54
worker/Dockerfile
Normal file
54
worker/Dockerfile
Normal file
@@ -0,0 +1,54 @@
|
||||
# Python Worker for 3D Model Conversion
|
||||
FROM python:3.12-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies for 3D processing and headless rendering
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
# OpenGL/OSMesa for headless rendering (pyrender)
|
||||
libosmesa6-dev \
|
||||
libgl1 \
|
||||
libglu1-mesa \
|
||||
# Build tools for some Python packages
|
||||
build-essential \
|
||||
# OpenCASCADE runtime libraries for cascadio (STEP conversion)
|
||||
libocct-data-exchange-7.8 \
|
||||
libocct-draw-7.8 \
|
||||
libocct-foundation-7.8 \
|
||||
libocct-modeling-algorithms-7.8 \
|
||||
libocct-modeling-data-7.8 \
|
||||
libocct-ocaf-7.8 \
|
||||
libocct-visualization-7.8 \
|
||||
# Cleanup
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set environment for headless rendering
|
||||
ENV PYOPENGL_PLATFORM=osmesa
|
||||
|
||||
# Install uv for fast package management
|
||||
RUN pip install --no-cache-dir uv
|
||||
|
||||
# Copy project files
|
||||
COPY pyproject.toml .
|
||||
COPY src/ src/
|
||||
|
||||
# Create __init__.py files if they don't exist
|
||||
RUN touch src/__init__.py src/processors/__init__.py src/services/__init__.py
|
||||
|
||||
# Install dependencies using uv
|
||||
RUN uv pip install --system -e .
|
||||
|
||||
# Create non-root user
|
||||
RUN groupadd --system --gid 1001 worker && \
|
||||
useradd --system --uid 1001 --gid worker worker && \
|
||||
mkdir -p /tmp/conversions && \
|
||||
chown -R worker:worker /app /tmp/conversions
|
||||
|
||||
# Switch to non-root user
|
||||
USER worker
|
||||
|
||||
# Set temp directory
|
||||
ENV TEMP_DIR=/tmp/conversions
|
||||
|
||||
# Run the worker
|
||||
CMD ["python", "-m", "src.main"]
|
||||
34
worker/pyproject.toml
Normal file
34
worker/pyproject.toml
Normal file
@@ -0,0 +1,34 @@
|
||||
[project]
|
||||
name = "viewer3d-worker"
|
||||
version = "1.0.0"
|
||||
description = "3D Model Conversion Worker Service"
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
"redis>=5.0.0",
|
||||
"minio>=7.2.0",
|
||||
"trimesh>=4.4.0",
|
||||
"cascadio>=0.0.13",
|
||||
"pillow>=10.0.0",
|
||||
"pyrender>=0.1.45",
|
||||
"pydantic-settings>=2.0.0",
|
||||
"psycopg[binary]>=3.1.0",
|
||||
"numpy>=1.24.0",
|
||||
"fast-simplification>=0.1.7",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
worker = "src.main:main"
|
||||
|
||||
[build-system]
|
||||
requires = ["hatchling"]
|
||||
build-backend = "hatchling.build"
|
||||
|
||||
[tool.hatch.build.targets.wheel]
|
||||
packages = ["src"]
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 100
|
||||
target-version = "py311"
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = ["E", "F", "I", "N", "W"]
|
||||
1
worker/src/__init__.py
Normal file
1
worker/src/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# 3D Model Conversion Worker
|
||||
49
worker/src/config.py
Normal file
49
worker/src/config.py
Normal file
@@ -0,0 +1,49 @@
|
||||
"""Configuration management using Pydantic Settings."""
|
||||
|
||||
import os
|
||||
from functools import lru_cache
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
"""Application settings loaded from environment variables."""
|
||||
|
||||
# Redis
|
||||
redis_url: str = "redis://localhost:6379"
|
||||
redis_stream: str = "bull:model-conversion:wait"
|
||||
redis_consumer_group: str = "conversion-workers"
|
||||
redis_consumer_name: str = f"worker-{os.getpid()}"
|
||||
|
||||
# Database
|
||||
database_url: str = "postgresql://viewer:viewer_password@localhost:5432/viewer_db"
|
||||
|
||||
# MinIO
|
||||
minio_endpoint: str = "localhost:9000"
|
||||
minio_public_endpoint: str = "localhost:9000" # For public URLs (browser access)
|
||||
minio_access_key: str = "minioadmin"
|
||||
minio_secret_key: str = "minioadmin"
|
||||
minio_use_ssl: bool = False
|
||||
minio_bucket_raw: str = "raw-models"
|
||||
minio_bucket_converted: str = "converted-models"
|
||||
minio_bucket_thumbnails: str = "thumbnails"
|
||||
|
||||
# Processing
|
||||
temp_dir: str = "/tmp/conversions"
|
||||
max_file_size_mb: int = 500
|
||||
thumbnail_size: tuple[int, int] = (256, 256)
|
||||
|
||||
# Logging
|
||||
log_level: str = "INFO"
|
||||
|
||||
class Config:
|
||||
env_file = ".env"
|
||||
case_sensitive = False
|
||||
|
||||
|
||||
@lru_cache
|
||||
def get_settings() -> Settings:
|
||||
"""Get cached settings instance."""
|
||||
return Settings()
|
||||
|
||||
|
||||
settings = get_settings()
|
||||
463
worker/src/main.py
Normal file
463
worker/src/main.py
Normal file
@@ -0,0 +1,463 @@
|
||||
"""
|
||||
3D Model Conversion Worker
|
||||
|
||||
Listens to BullMQ queue in Redis and processes model conversion jobs.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import psycopg
|
||||
import redis
|
||||
|
||||
from .config import settings
|
||||
from .processors.converter import convert_to_glb, convert_to_glb_with_lod
|
||||
from .services.storage import download_file, upload_file
|
||||
from .services.thumbnail import generate_thumbnail
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=getattr(logging, settings.log_level),
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
handlers=[logging.StreamHandler(sys.stdout)],
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Graceful shutdown flag
|
||||
shutdown_requested = False
|
||||
|
||||
|
||||
def signal_handler(signum, frame):
|
||||
"""Handle shutdown signals."""
|
||||
global shutdown_requested
|
||||
logger.info(f"Received signal {signum}, initiating graceful shutdown...")
|
||||
shutdown_requested = True
|
||||
|
||||
|
||||
def get_redis_client() -> redis.Redis:
|
||||
"""Create Redis client."""
|
||||
return redis.from_url(settings.redis_url, decode_responses=True)
|
||||
|
||||
|
||||
def get_db_connection() -> psycopg.Connection:
|
||||
"""Create database connection."""
|
||||
return psycopg.connect(settings.database_url)
|
||||
|
||||
|
||||
def update_thumbnail_only(
|
||||
model_id: str,
|
||||
thumbnail_url: str,
|
||||
) -> None:
|
||||
"""Update only the thumbnail URL in the database (for thumbnail-only jobs)."""
|
||||
with get_db_connection() as conn:
|
||||
with conn.cursor() as cur:
|
||||
cur.execute(
|
||||
"""
|
||||
UPDATE models
|
||||
SET thumbnail_url = %s,
|
||||
thumbnail_storage_key = %s,
|
||||
updated_at = NOW()
|
||||
WHERE id = %s
|
||||
""",
|
||||
(
|
||||
thumbnail_url,
|
||||
f"{model_id}/preview.png",
|
||||
model_id,
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
logger.info(f"Updated thumbnail for model {model_id}")
|
||||
|
||||
|
||||
def update_model_status(
|
||||
model_id: str,
|
||||
status: str,
|
||||
model_url: str = None,
|
||||
thumbnail_url: str = None,
|
||||
metadata: dict = None,
|
||||
error: str = None,
|
||||
lod_urls: dict = None,
|
||||
) -> None:
|
||||
"""Update model status in the database."""
|
||||
with get_db_connection() as conn:
|
||||
with conn.cursor() as cur:
|
||||
if status == 'completed':
|
||||
# Include LOD URLs in metadata
|
||||
full_metadata = metadata or {}
|
||||
if lod_urls:
|
||||
full_metadata['lod_urls'] = lod_urls
|
||||
|
||||
cur.execute(
|
||||
"""
|
||||
UPDATE models
|
||||
SET conversion_status = %s,
|
||||
model_url = %s,
|
||||
thumbnail_url = %s,
|
||||
converted_storage_key = %s,
|
||||
thumbnail_storage_key = %s,
|
||||
metadata = %s,
|
||||
updated_at = NOW()
|
||||
WHERE id = %s
|
||||
""",
|
||||
(
|
||||
status,
|
||||
model_url,
|
||||
thumbnail_url,
|
||||
f"{model_id}/model.glb",
|
||||
f"{model_id}/preview.png",
|
||||
json.dumps(full_metadata),
|
||||
model_id,
|
||||
),
|
||||
)
|
||||
elif status == 'failed':
|
||||
cur.execute(
|
||||
"""
|
||||
UPDATE models
|
||||
SET conversion_status = %s,
|
||||
conversion_error = %s,
|
||||
updated_at = NOW()
|
||||
WHERE id = %s
|
||||
""",
|
||||
(status, error, model_id),
|
||||
)
|
||||
else:
|
||||
cur.execute(
|
||||
"""
|
||||
UPDATE models
|
||||
SET conversion_status = %s,
|
||||
updated_at = NOW()
|
||||
WHERE id = %s
|
||||
""",
|
||||
(status, model_id),
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
logger.info(f"Updated model {model_id} status to {status}")
|
||||
|
||||
|
||||
def save_model_parts(model_id: str, parts: list[dict]) -> None:
|
||||
"""Save model parts to the database."""
|
||||
if not parts:
|
||||
return
|
||||
|
||||
with get_db_connection() as conn:
|
||||
with conn.cursor() as cur:
|
||||
for part in parts:
|
||||
cur.execute(
|
||||
"""
|
||||
INSERT INTO model_parts (model_id, name, bounding_box, center_point)
|
||||
VALUES (%s, %s, %s, %s)
|
||||
""",
|
||||
(
|
||||
model_id,
|
||||
part.get('name', 'unnamed'),
|
||||
json.dumps(part.get('bounding_box', {})),
|
||||
json.dumps(part.get('center_point', {})),
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
logger.info(f"Saved {len(parts)} parts for model {model_id}")
|
||||
|
||||
|
||||
def process_thumbnail_job(job_data: dict[str, Any]) -> dict[str, Any]:
|
||||
"""
|
||||
Process a thumbnail-only job for GLB/GLTF files.
|
||||
|
||||
1. Download GLB file from model URL
|
||||
2. Generate thumbnail
|
||||
3. Upload thumbnail to MinIO
|
||||
4. Update database with thumbnail URL
|
||||
"""
|
||||
model_id = job_data['modelId']
|
||||
model_url = job_data['modelUrl']
|
||||
|
||||
logger.info(f"Processing thumbnail job for model {model_id}")
|
||||
|
||||
# Create temp directory for this job
|
||||
temp_dir = Path(settings.temp_dir) / f"thumb_{model_id}"
|
||||
temp_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
try:
|
||||
# 1. Download GLB file from model URL
|
||||
import urllib.request
|
||||
input_path = temp_dir / "input.glb"
|
||||
|
||||
# model_url might be internal MinIO URL, convert to accessible URL
|
||||
download_url = model_url
|
||||
# If it's an internal URL, we need to use MinIO client instead
|
||||
if 'minio:9000' in model_url or 'localhost:9000' in model_url:
|
||||
# Extract bucket and key from URL
|
||||
# URL format: http://minio:9000/bucket/key
|
||||
from urllib.parse import urlparse
|
||||
parsed = urlparse(model_url)
|
||||
path_parts = parsed.path.lstrip('/').split('/', 1)
|
||||
if len(path_parts) == 2:
|
||||
bucket, key = path_parts
|
||||
download_file(bucket, key, input_path)
|
||||
else:
|
||||
raise ValueError(f"Invalid model URL format: {model_url}")
|
||||
else:
|
||||
# External URL, download directly
|
||||
urllib.request.urlretrieve(download_url, input_path)
|
||||
|
||||
logger.info(f"Downloaded GLB file to {input_path}")
|
||||
|
||||
# 2. Generate thumbnail
|
||||
thumbnail_path = temp_dir / "preview.png"
|
||||
generate_thumbnail(input_path, thumbnail_path)
|
||||
logger.info(f"Generated thumbnail: {thumbnail_path}")
|
||||
|
||||
# 3. Upload thumbnail to MinIO
|
||||
thumbnail_key = f"{model_id}/preview.png"
|
||||
thumbnail_url = upload_file(
|
||||
thumbnail_path,
|
||||
settings.minio_bucket_thumbnails,
|
||||
thumbnail_key,
|
||||
content_type="image/png",
|
||||
)
|
||||
logger.info(f"Uploaded thumbnail: {thumbnail_key}")
|
||||
|
||||
# 4. Update database with thumbnail URL
|
||||
update_thumbnail_only(model_id, thumbnail_url)
|
||||
|
||||
return {
|
||||
'modelId': model_id,
|
||||
'thumbnailUrl': thumbnail_url,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Thumbnail job failed for model {model_id}: {e}", exc_info=True)
|
||||
raise
|
||||
|
||||
finally:
|
||||
# Cleanup temp files
|
||||
import shutil
|
||||
if temp_dir.exists():
|
||||
shutil.rmtree(temp_dir, ignore_errors=True)
|
||||
logger.debug(f"Cleaned up temp directory: {temp_dir}")
|
||||
|
||||
|
||||
def process_job(job_data: dict[str, Any]) -> dict[str, Any]:
|
||||
"""
|
||||
Process a single conversion job with LOD support.
|
||||
|
||||
1. Download original file from MinIO
|
||||
2. Convert to GLB with multiple LOD levels
|
||||
3. Generate thumbnail
|
||||
4. Upload all LOD files to MinIO
|
||||
5. Update database
|
||||
"""
|
||||
# Check if this is a thumbnail-only job
|
||||
job_type = job_data.get('jobType', 'conversion')
|
||||
if job_type == 'thumbnail':
|
||||
return process_thumbnail_job(job_data)
|
||||
|
||||
model_id = job_data['modelId']
|
||||
storage_key = job_data['key']
|
||||
file_type = job_data['fileType']
|
||||
|
||||
logger.info(f"Processing job for model {model_id}, type: {file_type}")
|
||||
|
||||
# Update status to processing
|
||||
update_model_status(model_id, 'processing')
|
||||
|
||||
# Create temp directory for this job
|
||||
temp_dir = Path(settings.temp_dir) / model_id
|
||||
temp_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
try:
|
||||
# 1. Download original file
|
||||
input_path = temp_dir / f"input.{file_type}"
|
||||
download_file(settings.minio_bucket_raw, storage_key, input_path)
|
||||
logger.info(f"Downloaded input file to {input_path}")
|
||||
|
||||
# 2. Convert to GLB with LOD levels
|
||||
output_dir = temp_dir / "lod"
|
||||
metadata = convert_to_glb_with_lod(input_path, output_dir, file_type, model_id)
|
||||
logger.info(f"Converted to GLB with LOD: {output_dir}")
|
||||
|
||||
# Get LOD files info
|
||||
lod_files = metadata.get('lod_files', {'lod0': f'{model_id}_lod0.glb'})
|
||||
|
||||
# 3. Generate thumbnail from LOD0 (highest quality)
|
||||
lod0_path = output_dir / lod_files['lod0']
|
||||
thumbnail_path = temp_dir / "preview.png"
|
||||
generate_thumbnail(lod0_path, thumbnail_path)
|
||||
logger.info(f"Generated thumbnail: {thumbnail_path}")
|
||||
|
||||
# 4. Upload all LOD files to MinIO
|
||||
lod_urls = {}
|
||||
for lod_level, lod_filename in lod_files.items():
|
||||
lod_path = output_dir / lod_filename
|
||||
if lod_path.exists():
|
||||
lod_key = f"{model_id}/{lod_filename}"
|
||||
lod_url = upload_file(
|
||||
lod_path,
|
||||
settings.minio_bucket_converted,
|
||||
lod_key,
|
||||
content_type="model/gltf-binary",
|
||||
)
|
||||
lod_urls[lod_level] = lod_url
|
||||
logger.info(f"Uploaded {lod_level}: {lod_key}")
|
||||
|
||||
# Also upload LOD0 as model.glb for backward compatibility
|
||||
model_key = f"{model_id}/model.glb"
|
||||
model_url = upload_file(
|
||||
lod0_path,
|
||||
settings.minio_bucket_converted,
|
||||
model_key,
|
||||
content_type="model/gltf-binary",
|
||||
)
|
||||
|
||||
# Upload thumbnail
|
||||
thumbnail_key = f"{model_id}/preview.png"
|
||||
thumbnail_url = upload_file(
|
||||
thumbnail_path,
|
||||
settings.minio_bucket_thumbnails,
|
||||
thumbnail_key,
|
||||
content_type="image/png",
|
||||
)
|
||||
|
||||
# 5. Save model parts if available
|
||||
parts = metadata.get('parts', [])
|
||||
if parts:
|
||||
save_model_parts(model_id, parts)
|
||||
|
||||
# 6. Update database with success (includes LOD URLs in metadata)
|
||||
update_model_status(
|
||||
model_id,
|
||||
'completed',
|
||||
model_url=model_url,
|
||||
thumbnail_url=thumbnail_url,
|
||||
metadata=metadata,
|
||||
lod_urls=lod_urls,
|
||||
)
|
||||
|
||||
return {
|
||||
'modelId': model_id,
|
||||
'modelUrl': model_url,
|
||||
'thumbnailUrl': thumbnail_url,
|
||||
'lodUrls': lod_urls,
|
||||
'metadata': metadata,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Job failed for model {model_id}: {e}", exc_info=True)
|
||||
update_model_status(model_id, 'failed', error=str(e))
|
||||
raise
|
||||
|
||||
finally:
|
||||
# Cleanup temp files
|
||||
import shutil
|
||||
if temp_dir.exists():
|
||||
shutil.rmtree(temp_dir, ignore_errors=True)
|
||||
logger.debug(f"Cleaned up temp directory: {temp_dir}")
|
||||
|
||||
|
||||
def poll_jobs(redis_client: redis.Redis) -> None:
|
||||
"""
|
||||
Poll for jobs from the BullMQ queue.
|
||||
|
||||
BullMQ stores jobs in Redis with a specific structure.
|
||||
We use BRPOPLPUSH to atomically move jobs from wait to active.
|
||||
"""
|
||||
wait_key = "bull:model-conversion:wait"
|
||||
active_key = "bull:model-conversion:active"
|
||||
completed_key = "bull:model-conversion:completed"
|
||||
|
||||
while not shutdown_requested:
|
||||
try:
|
||||
# Try to get a job (blocking with 5 second timeout)
|
||||
job_id = redis_client.brpoplpush(wait_key, active_key, timeout=5)
|
||||
|
||||
if job_id is None:
|
||||
continue
|
||||
|
||||
logger.info(f"Received job: {job_id}")
|
||||
|
||||
# Get job data
|
||||
job_key = f"bull:model-conversion:{job_id}"
|
||||
job_json = redis_client.hget(job_key, "data")
|
||||
|
||||
if not job_json:
|
||||
logger.warning(f"No data found for job {job_id}")
|
||||
redis_client.lrem(active_key, 1, job_id)
|
||||
continue
|
||||
|
||||
job_data = json.loads(job_json)
|
||||
|
||||
# Process the job
|
||||
try:
|
||||
result = process_job(job_data)
|
||||
|
||||
# Mark job as completed
|
||||
redis_client.hset(job_key, "returnvalue", json.dumps(result))
|
||||
redis_client.hset(job_key, "finishedOn", str(int(time.time() * 1000)))
|
||||
redis_client.lrem(active_key, 1, job_id)
|
||||
redis_client.lpush(completed_key, job_id)
|
||||
|
||||
logger.info(f"Job {job_id} completed successfully")
|
||||
|
||||
except Exception as e:
|
||||
# Mark job as failed
|
||||
redis_client.hset(job_key, "failedReason", str(e))
|
||||
redis_client.hset(job_key, "finishedOn", str(int(time.time() * 1000)))
|
||||
redis_client.lrem(active_key, 1, job_id)
|
||||
|
||||
# Move to failed queue
|
||||
failed_key = "bull:model-conversion:failed"
|
||||
redis_client.lpush(failed_key, job_id)
|
||||
|
||||
logger.error(f"Job {job_id} failed: {e}")
|
||||
|
||||
except redis.exceptions.ConnectionError as e:
|
||||
logger.error(f"Redis connection error: {e}")
|
||||
time.sleep(5)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in job polling: {e}", exc_info=True)
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point for the worker."""
|
||||
logger.info("Starting 3D Model Conversion Worker")
|
||||
logger.info(f"Redis URL: {settings.redis_url}")
|
||||
logger.info(f"MinIO endpoint: {settings.minio_endpoint}")
|
||||
|
||||
# Setup signal handlers
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
# Create temp directory
|
||||
temp_dir = Path(settings.temp_dir)
|
||||
temp_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Connect to Redis
|
||||
redis_client = get_redis_client()
|
||||
|
||||
# Test connection
|
||||
try:
|
||||
redis_client.ping()
|
||||
logger.info("Connected to Redis")
|
||||
except redis.exceptions.ConnectionError as e:
|
||||
logger.fatal(f"Failed to connect to Redis: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
# Start polling for jobs
|
||||
logger.info("Worker ready, polling for jobs...")
|
||||
poll_jobs(redis_client)
|
||||
|
||||
logger.info("Worker shutdown complete")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
1
worker/src/processors/__init__.py
Normal file
1
worker/src/processors/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Model processors
|
||||
391
worker/src/processors/converter.py
Normal file
391
worker/src/processors/converter.py
Normal file
@@ -0,0 +1,391 @@
|
||||
"""3D model conversion processor with LOD support."""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
import trimesh
|
||||
|
||||
from ..config import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# LOD configuration: level -> face ratio (for non-STEP files)
|
||||
LOD_LEVELS = {
|
||||
0: 1.0, # LOD0: 100% faces (original)
|
||||
1: 0.5, # LOD1: 50% faces
|
||||
2: 0.25, # LOD2: 25% faces
|
||||
}
|
||||
|
||||
# LOD tessellation parameters for STEP files (cascadio)
|
||||
# Higher values = coarser mesh = fewer triangles
|
||||
LOD_TESSELLATION = {
|
||||
0: {'tol_linear': 0.01, 'tol_angular': 0.5}, # High quality (default)
|
||||
1: {'tol_linear': 0.1, 'tol_angular': 1.0}, # Medium quality
|
||||
2: {'tol_linear': 0.5, 'tol_angular': 2.0}, # Low quality (for preview)
|
||||
}
|
||||
|
||||
|
||||
def convert_to_glb(input_path: Path, output_path: Path, file_type: str) -> dict[str, Any]:
|
||||
"""
|
||||
Convert a 3D model to GLB format with LOD support.
|
||||
|
||||
Supports: STEP, STL, OBJ, and other formats via trimesh/cascadio.
|
||||
|
||||
Returns metadata about the converted model including LOD file paths.
|
||||
"""
|
||||
file_type = file_type.lower()
|
||||
|
||||
# Ensure output directory exists
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if file_type in ('step', 'stp'):
|
||||
return _convert_step(input_path, output_path)
|
||||
else:
|
||||
return _convert_with_trimesh(input_path, output_path, file_type)
|
||||
|
||||
|
||||
def convert_to_glb_with_lod(input_path: Path, output_dir: Path, file_type: str, model_id: str) -> dict[str, Any]:
|
||||
"""
|
||||
Convert a 3D model to GLB format with multiple LOD levels.
|
||||
|
||||
For STEP files: Generate each LOD directly from source with different tessellation precision.
|
||||
For other files: Generate LOD0 then simplify for other levels.
|
||||
|
||||
Args:
|
||||
input_path: Path to input file
|
||||
output_dir: Directory to save LOD files
|
||||
file_type: File extension (step, stl, obj, etc.)
|
||||
model_id: Unique model identifier for file naming
|
||||
|
||||
Returns:
|
||||
Metadata including LOD file paths and statistics
|
||||
"""
|
||||
file_type = file_type.lower()
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
lod_files = {}
|
||||
|
||||
# STEP files: Generate each LOD with different tessellation precision
|
||||
if file_type in ('step', 'stp'):
|
||||
return _convert_step_with_lod(input_path, output_dir, model_id)
|
||||
|
||||
# Non-STEP files: Use post-processing simplification
|
||||
return _convert_other_with_lod(input_path, output_dir, file_type, model_id)
|
||||
|
||||
|
||||
def _convert_step_with_lod(input_path: Path, output_dir: Path, model_id: str) -> dict[str, Any]:
|
||||
"""
|
||||
Convert STEP file to GLB with multiple LOD levels using different tessellation precision.
|
||||
|
||||
This is more effective than post-processing simplification because it controls
|
||||
mesh generation at the source.
|
||||
"""
|
||||
lod_files = {}
|
||||
metadata = None
|
||||
|
||||
for level, params in LOD_TESSELLATION.items():
|
||||
lod_path = output_dir / f"{model_id}_lod{level}.glb"
|
||||
|
||||
try:
|
||||
level_metadata = _convert_step(
|
||||
input_path,
|
||||
lod_path,
|
||||
tol_linear=params['tol_linear'],
|
||||
tol_angular=params['tol_angular'],
|
||||
)
|
||||
|
||||
lod_files[f'lod{level}'] = str(lod_path.name)
|
||||
faces = level_metadata.get('faces', 0)
|
||||
logger.info(f"Generated LOD{level} with {faces:,} faces (tol_linear={params['tol_linear']})")
|
||||
|
||||
# Use LOD0 metadata as the primary metadata
|
||||
if level == 0:
|
||||
metadata = level_metadata
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to generate LOD{level}: {e}")
|
||||
# Fall back to LOD0 if available
|
||||
if 'lod0' in lod_files:
|
||||
lod_files[f'lod{level}'] = lod_files['lod0']
|
||||
|
||||
# If LOD0 failed, raise error
|
||||
if metadata is None:
|
||||
raise RuntimeError("Failed to convert STEP file")
|
||||
|
||||
# Add LOD info to metadata
|
||||
metadata['lod_files'] = lod_files
|
||||
metadata['lod_levels'] = len(set(lod_files.values()))
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
def _convert_other_with_lod(input_path: Path, output_dir: Path, file_type: str, model_id: str) -> dict[str, Any]:
|
||||
"""
|
||||
Convert non-STEP files to GLB with LOD using post-processing simplification.
|
||||
"""
|
||||
# LOD0 path (original quality)
|
||||
lod0_path = output_dir / f"{model_id}_lod0.glb"
|
||||
|
||||
# Convert to LOD0
|
||||
metadata = _convert_with_trimesh(input_path, lod0_path, file_type)
|
||||
|
||||
lod_files = {
|
||||
'lod0': str(lod0_path.name),
|
||||
}
|
||||
|
||||
# Get face count for LOD generation decision
|
||||
total_faces = metadata.get('faces', 0)
|
||||
|
||||
# Only generate LODs if model has enough faces
|
||||
if total_faces > 1000:
|
||||
try:
|
||||
# Generate LOD1 and LOD2 using mesh simplification
|
||||
for level in [1, 2]:
|
||||
lod_path = output_dir / f"{model_id}_lod{level}.glb"
|
||||
ratio = LOD_LEVELS[level]
|
||||
|
||||
# Reload mesh fresh for each LOD level
|
||||
mesh = trimesh.load(str(lod0_path))
|
||||
|
||||
simplified = _simplify_mesh(mesh, ratio)
|
||||
if simplified is not None:
|
||||
simplified.export(str(lod_path), file_type='glb')
|
||||
lod_files[f'lod{level}'] = str(lod_path.name)
|
||||
logger.info(f"Generated LOD{level} with {ratio*100:.0f}% faces: {lod_path.name}")
|
||||
else:
|
||||
logger.warning(f"Failed to generate LOD{level}, using LOD0")
|
||||
lod_files[f'lod{level}'] = lod_files['lod0']
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"LOD generation failed: {e}, using LOD0 for all levels")
|
||||
lod_files['lod1'] = lod_files['lod0']
|
||||
lod_files['lod2'] = lod_files['lod0']
|
||||
else:
|
||||
# Small model, use LOD0 for all levels
|
||||
logger.info(f"Model has {total_faces} faces, skipping LOD generation")
|
||||
lod_files['lod1'] = lod_files['lod0']
|
||||
lod_files['lod2'] = lod_files['lod0']
|
||||
|
||||
# Add LOD info to metadata
|
||||
metadata['lod_files'] = lod_files
|
||||
metadata['lod_levels'] = len(set(lod_files.values()))
|
||||
|
||||
return metadata
|
||||
|
||||
|
||||
def _simplify_mesh(mesh: trimesh.Trimesh | trimesh.Scene, ratio: float) -> trimesh.Trimesh | trimesh.Scene | None:
|
||||
"""
|
||||
Simplify a mesh or scene to the target face ratio.
|
||||
|
||||
Args:
|
||||
mesh: Trimesh mesh or scene
|
||||
ratio: Target ratio of faces (0.0 - 1.0)
|
||||
|
||||
Returns:
|
||||
Simplified mesh/scene or None if failed
|
||||
"""
|
||||
# Minimum reduction required (at least 10% reduction for fast_simplification to work)
|
||||
MIN_REDUCTION_RATIO = 0.9
|
||||
|
||||
try:
|
||||
if isinstance(mesh, trimesh.Scene):
|
||||
# Simplify each geometry in the scene
|
||||
simplified_geometries = {}
|
||||
for name, geom in mesh.geometry.items():
|
||||
# Skip small geometries and non-mesh objects
|
||||
if not hasattr(geom, 'faces') or len(geom.faces) < 100:
|
||||
simplified_geometries[name] = geom
|
||||
continue
|
||||
|
||||
original_faces = len(geom.faces)
|
||||
target_faces = max(int(original_faces * ratio), 4)
|
||||
|
||||
# Only simplify if we're reducing by at least 10%
|
||||
# (fast_simplification requires reduction > 0)
|
||||
if target_faces < original_faces * MIN_REDUCTION_RATIO:
|
||||
try:
|
||||
simplified = geom.simplify_quadric_decimation(target_faces)
|
||||
simplified_geometries[name] = simplified
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to simplify geometry {name}: {e}")
|
||||
simplified_geometries[name] = geom
|
||||
else:
|
||||
# Reduction too small, skip simplification
|
||||
simplified_geometries[name] = geom
|
||||
|
||||
# Create new scene with simplified geometries
|
||||
new_scene = trimesh.Scene()
|
||||
for name, geom in simplified_geometries.items():
|
||||
try:
|
||||
# Get original transform if exists
|
||||
node_name = None
|
||||
if hasattr(mesh.graph, 'nodes_geometry'):
|
||||
for item in mesh.graph.nodes_geometry:
|
||||
# Handle both tuple formats: (node, geom_name) or (node, geom_name, ...)
|
||||
if len(item) >= 2 and item[1] == name:
|
||||
node_name = item[0]
|
||||
break
|
||||
|
||||
if node_name:
|
||||
transform = mesh.graph.get(node_name)[0]
|
||||
new_scene.add_geometry(geom, node_name=node_name, geom_name=name, transform=transform)
|
||||
else:
|
||||
new_scene.add_geometry(geom, geom_name=name)
|
||||
except Exception as e:
|
||||
# If transform lookup fails, just add geometry without transform
|
||||
logger.debug(f"Could not get transform for {name}: {e}")
|
||||
new_scene.add_geometry(geom, geom_name=name)
|
||||
|
||||
return new_scene
|
||||
|
||||
elif hasattr(mesh, 'faces') and len(mesh.faces) >= 100:
|
||||
# Single mesh simplification
|
||||
original_faces = len(mesh.faces)
|
||||
target_faces = max(int(original_faces * ratio), 4)
|
||||
|
||||
# Only simplify if we're reducing by at least 10%
|
||||
if target_faces < original_faces * MIN_REDUCTION_RATIO:
|
||||
return mesh.simplify_quadric_decimation(target_faces)
|
||||
|
||||
return mesh
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Mesh simplification failed: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def _convert_step(
|
||||
input_path: Path,
|
||||
output_path: Path,
|
||||
tol_linear: float = 0.01,
|
||||
tol_angular: float = 0.5,
|
||||
) -> dict[str, Any]:
|
||||
"""Convert STEP file using cascadio with configurable tessellation precision.
|
||||
|
||||
Args:
|
||||
input_path: Path to STEP file
|
||||
output_path: Path to save GLB file
|
||||
tol_linear: Linear deflection tolerance (higher = coarser mesh)
|
||||
tol_angular: Angular deflection tolerance in radians (higher = coarser mesh)
|
||||
|
||||
Returns:
|
||||
Metadata about the converted model
|
||||
"""
|
||||
try:
|
||||
import cascadio
|
||||
|
||||
logger.info(f"Converting STEP file with cascadio: {input_path}")
|
||||
logger.info(f"Tessellation params: tol_linear={tol_linear}, tol_angular={tol_angular}")
|
||||
|
||||
cascadio.step_to_glb(
|
||||
str(input_path),
|
||||
str(output_path),
|
||||
tol_linear=tol_linear,
|
||||
tol_angular=tol_angular,
|
||||
)
|
||||
|
||||
# Load the result to get metadata
|
||||
mesh = trimesh.load(str(output_path))
|
||||
return _extract_metadata(mesh)
|
||||
|
||||
except ImportError:
|
||||
logger.error("cascadio not installed, cannot convert STEP files")
|
||||
raise RuntimeError("STEP conversion requires cascadio package")
|
||||
except Exception as e:
|
||||
logger.error(f"STEP conversion failed: {e}")
|
||||
raise
|
||||
|
||||
|
||||
def _convert_with_trimesh(input_path: Path, output_path: Path, file_type: str) -> dict[str, Any]:
|
||||
"""Convert STL, OBJ, and other formats using trimesh."""
|
||||
logger.info(f"Converting {file_type.upper()} file with trimesh: {input_path}")
|
||||
|
||||
try:
|
||||
# Load the mesh
|
||||
mesh = trimesh.load(str(input_path))
|
||||
|
||||
# Export to GLB
|
||||
mesh.export(str(output_path), file_type='glb')
|
||||
|
||||
return _extract_metadata(mesh)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Trimesh conversion failed: {e}")
|
||||
raise
|
||||
|
||||
|
||||
def _extract_metadata(mesh: trimesh.Trimesh | trimesh.Scene) -> dict[str, Any]:
|
||||
"""Extract metadata from a trimesh object."""
|
||||
metadata: dict[str, Any] = {}
|
||||
|
||||
try:
|
||||
if isinstance(mesh, trimesh.Scene):
|
||||
# Scene with multiple meshes
|
||||
metadata['type'] = 'scene'
|
||||
metadata['parts_count'] = len(mesh.geometry)
|
||||
|
||||
# Aggregate stats
|
||||
total_vertices = 0
|
||||
total_faces = 0
|
||||
|
||||
for name, geom in mesh.geometry.items():
|
||||
if hasattr(geom, 'vertices'):
|
||||
total_vertices += len(geom.vertices)
|
||||
if hasattr(geom, 'faces'):
|
||||
total_faces += len(geom.faces)
|
||||
|
||||
metadata['vertices'] = total_vertices
|
||||
metadata['faces'] = total_faces
|
||||
|
||||
# Bounding box
|
||||
if hasattr(mesh, 'bounds') and mesh.bounds is not None:
|
||||
bounds = mesh.bounds
|
||||
metadata['bounding_box'] = {
|
||||
'min': {'x': float(bounds[0][0]), 'y': float(bounds[0][1]), 'z': float(bounds[0][2])},
|
||||
'max': {'x': float(bounds[1][0]), 'y': float(bounds[1][1]), 'z': float(bounds[1][2])},
|
||||
}
|
||||
|
||||
# Parts info
|
||||
parts = []
|
||||
for name, geom in mesh.geometry.items():
|
||||
part_info = {'name': name}
|
||||
|
||||
if hasattr(geom, 'bounds') and geom.bounds is not None:
|
||||
part_bounds = geom.bounds
|
||||
part_info['bounding_box'] = {
|
||||
'min': {'x': float(part_bounds[0][0]), 'y': float(part_bounds[0][1]), 'z': float(part_bounds[0][2])},
|
||||
'max': {'x': float(part_bounds[1][0]), 'y': float(part_bounds[1][1]), 'z': float(part_bounds[1][2])},
|
||||
}
|
||||
part_info['center_point'] = {
|
||||
'x': float((part_bounds[0][0] + part_bounds[1][0]) / 2),
|
||||
'y': float((part_bounds[0][1] + part_bounds[1][1]) / 2),
|
||||
'z': float((part_bounds[0][2] + part_bounds[1][2]) / 2),
|
||||
}
|
||||
|
||||
parts.append(part_info)
|
||||
|
||||
metadata['parts'] = parts
|
||||
|
||||
else:
|
||||
# Single mesh
|
||||
metadata['type'] = 'mesh'
|
||||
metadata['parts_count'] = 1
|
||||
|
||||
if hasattr(mesh, 'vertices'):
|
||||
metadata['vertices'] = len(mesh.vertices)
|
||||
if hasattr(mesh, 'faces'):
|
||||
metadata['faces'] = len(mesh.faces)
|
||||
|
||||
if hasattr(mesh, 'bounds') and mesh.bounds is not None:
|
||||
bounds = mesh.bounds
|
||||
metadata['bounding_box'] = {
|
||||
'min': {'x': float(bounds[0][0]), 'y': float(bounds[0][1]), 'z': float(bounds[0][2])},
|
||||
'max': {'x': float(bounds[1][0]), 'y': float(bounds[1][1]), 'z': float(bounds[1][2])},
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error extracting metadata: {e}")
|
||||
|
||||
return metadata
|
||||
1
worker/src/services/__init__.py
Normal file
1
worker/src/services/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Worker services
|
||||
61
worker/src/services/storage.py
Normal file
61
worker/src/services/storage.py
Normal file
@@ -0,0 +1,61 @@
|
||||
"""MinIO storage service for the worker."""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from minio import Minio
|
||||
from minio.error import S3Error
|
||||
|
||||
from ..config import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_minio_client() -> Minio:
|
||||
"""Create MinIO client."""
|
||||
return Minio(
|
||||
endpoint=settings.minio_endpoint,
|
||||
access_key=settings.minio_access_key,
|
||||
secret_key=settings.minio_secret_key,
|
||||
secure=settings.minio_use_ssl,
|
||||
)
|
||||
|
||||
|
||||
def download_file(bucket: str, key: str, local_path: Path) -> None:
|
||||
"""Download a file from MinIO."""
|
||||
client = get_minio_client()
|
||||
|
||||
# Ensure parent directory exists
|
||||
local_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
logger.info(f"Downloading {bucket}/{key} to {local_path}")
|
||||
client.fget_object(bucket_name=bucket, object_name=key, file_path=str(local_path))
|
||||
|
||||
|
||||
def upload_file(local_path: Path, bucket: str, key: str, content_type: str = None) -> str:
|
||||
"""Upload a file to MinIO and return the URL."""
|
||||
client = get_minio_client()
|
||||
|
||||
logger.info(f"Uploading {local_path} to {bucket}/{key}")
|
||||
|
||||
client.fput_object(
|
||||
bucket_name=bucket,
|
||||
object_name=key,
|
||||
file_path=str(local_path),
|
||||
content_type=content_type or "application/octet-stream",
|
||||
)
|
||||
|
||||
# Return the public URL (using public endpoint for browser access)
|
||||
protocol = "https" if settings.minio_use_ssl else "http"
|
||||
public_endpoint = settings.minio_public_endpoint or settings.minio_endpoint
|
||||
return f"{protocol}://{public_endpoint}/{bucket}/{key}"
|
||||
|
||||
|
||||
def delete_file(bucket: str, key: str) -> None:
|
||||
"""Delete a file from MinIO."""
|
||||
client = get_minio_client()
|
||||
|
||||
try:
|
||||
client.remove_object(bucket_name=bucket, object_name=key)
|
||||
logger.info(f"Deleted {bucket}/{key}")
|
||||
except S3Error as e:
|
||||
logger.warning(f"Failed to delete {bucket}/{key}: {e}")
|
||||
277
worker/src/services/thumbnail.py
Normal file
277
worker/src/services/thumbnail.py
Normal file
@@ -0,0 +1,277 @@
|
||||
"""Thumbnail generation service using trimesh and legacy OSMesa."""
|
||||
|
||||
import logging
|
||||
from pathlib import Path
|
||||
|
||||
import numpy as np
|
||||
import trimesh
|
||||
from PIL import Image
|
||||
|
||||
from ..config import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Maximum faces to render for thumbnail (performance limit for immediate mode)
|
||||
MAX_FACES_FOR_RENDER = 50000
|
||||
|
||||
|
||||
def generate_thumbnail(glb_path: Path, output_path: Path) -> bool:
|
||||
"""
|
||||
Generate a thumbnail image from a GLB file.
|
||||
|
||||
Uses legacy OSMesa for off-screen rendering.
|
||||
Falls back to a simple placeholder if rendering fails.
|
||||
"""
|
||||
try:
|
||||
return _generate_with_osmesa(glb_path, output_path)
|
||||
except ImportError as e:
|
||||
logger.warning(f"OSMesa not available: {e}, using simple thumbnail")
|
||||
return _generate_simple_thumbnail(glb_path, output_path)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to generate thumbnail with OSMesa: {e}", exc_info=True)
|
||||
return _generate_simple_thumbnail(glb_path, output_path)
|
||||
|
||||
|
||||
def _generate_with_osmesa(glb_path: Path, output_path: Path) -> bool:
|
||||
"""Generate thumbnail using legacy OSMesa context and OpenGL."""
|
||||
from OpenGL import osmesa
|
||||
from OpenGL.GL import (
|
||||
GL_COLOR_BUFFER_BIT,
|
||||
GL_DEPTH_BUFFER_BIT,
|
||||
GL_DEPTH_TEST,
|
||||
GL_LESS,
|
||||
GL_LIGHT0,
|
||||
GL_LIGHT1,
|
||||
GL_LIGHTING,
|
||||
GL_MODELVIEW,
|
||||
GL_NORMALIZE,
|
||||
GL_POSITION,
|
||||
GL_PROJECTION,
|
||||
GL_SMOOTH,
|
||||
GL_TRIANGLES,
|
||||
GL_UNSIGNED_BYTE,
|
||||
glBegin,
|
||||
glClear,
|
||||
glClearColor,
|
||||
glColor3f,
|
||||
glDepthFunc,
|
||||
glEnable,
|
||||
glEnd,
|
||||
glFinish,
|
||||
glLightfv,
|
||||
glLoadIdentity,
|
||||
glMatrixMode,
|
||||
glNormal3fv,
|
||||
glShadeModel,
|
||||
glVertex3fv,
|
||||
glViewport,
|
||||
)
|
||||
from OpenGL.GLU import gluLookAt, gluPerspective
|
||||
|
||||
# Load the mesh
|
||||
mesh = trimesh.load(str(glb_path))
|
||||
logger.info(f"Loaded mesh from {glb_path}")
|
||||
|
||||
# Get combined geometry if it's a scene
|
||||
if isinstance(mesh, trimesh.Scene):
|
||||
# Combine all geometries into a single mesh
|
||||
meshes = []
|
||||
for name, geom in mesh.geometry.items():
|
||||
if isinstance(geom, trimesh.Trimesh):
|
||||
meshes.append(geom)
|
||||
if meshes:
|
||||
mesh = trimesh.util.concatenate(meshes)
|
||||
else:
|
||||
logger.warning("No valid meshes found in scene")
|
||||
return _generate_simple_thumbnail(glb_path, output_path)
|
||||
|
||||
if not isinstance(mesh, trimesh.Trimesh):
|
||||
logger.warning(f"Unsupported mesh type: {type(mesh)}")
|
||||
return _generate_simple_thumbnail(glb_path, output_path)
|
||||
|
||||
# Simplify mesh if too large for immediate mode rendering
|
||||
num_faces = len(mesh.faces)
|
||||
if num_faces > MAX_FACES_FOR_RENDER:
|
||||
logger.info(f"Simplifying mesh from {num_faces} to ~{MAX_FACES_FOR_RENDER} faces for thumbnail")
|
||||
try:
|
||||
# Try fast-simplification library first
|
||||
import fast_simplification
|
||||
simplified_vertices, simplified_faces = fast_simplification.simplify(
|
||||
mesh.vertices,
|
||||
mesh.faces,
|
||||
target_reduction=1 - (MAX_FACES_FOR_RENDER / num_faces)
|
||||
)
|
||||
mesh = trimesh.Trimesh(vertices=simplified_vertices, faces=simplified_faces)
|
||||
logger.info(f"Simplified to {len(mesh.faces)} faces using fast-simplification")
|
||||
except Exception as e:
|
||||
logger.warning(f"Simplification failed: {e}, will render subset")
|
||||
# Limit to first N faces if simplification fails
|
||||
if num_faces > MAX_FACES_FOR_RENDER:
|
||||
mesh.faces = mesh.faces[:MAX_FACES_FOR_RENDER]
|
||||
|
||||
# Get mesh data
|
||||
vertices = mesh.vertices
|
||||
faces = mesh.faces
|
||||
face_normals = mesh.face_normals if hasattr(mesh, 'face_normals') else None
|
||||
|
||||
# Get vertex colors if available
|
||||
vertex_colors = None
|
||||
if hasattr(mesh, 'visual') and hasattr(mesh.visual, 'vertex_colors'):
|
||||
try:
|
||||
vc = mesh.visual.vertex_colors
|
||||
if vc is not None and len(vc) > 0:
|
||||
vertex_colors = vc[:, :3].astype(np.float32) / 255.0
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Calculate bounding box and camera position
|
||||
bounds = mesh.bounds
|
||||
center = (bounds[0] + bounds[1]) / 2
|
||||
size = np.max(bounds[1] - bounds[0])
|
||||
|
||||
if size == 0 or np.isnan(size):
|
||||
size = 1.0
|
||||
|
||||
# Camera setup - position for isometric-like view
|
||||
camera_distance = size * 2.5
|
||||
camera_pos = center + np.array([
|
||||
camera_distance * 0.7,
|
||||
camera_distance * 0.5,
|
||||
camera_distance * 0.7,
|
||||
])
|
||||
|
||||
# Create OSMesa context using legacy function
|
||||
width, height = settings.thumbnail_size
|
||||
ctx = osmesa.OSMesaCreateContextExt(osmesa.OSMESA_RGBA, 24, 0, 0, None)
|
||||
|
||||
if not ctx:
|
||||
raise RuntimeError("Failed to create OSMesa context")
|
||||
|
||||
try:
|
||||
# Create buffer for rendering
|
||||
buffer = np.zeros((height, width, 4), dtype=np.uint8)
|
||||
|
||||
# Make context current
|
||||
result = osmesa.OSMesaMakeCurrent(ctx, buffer, GL_UNSIGNED_BYTE, width, height)
|
||||
if not result:
|
||||
raise RuntimeError("Failed to make OSMesa context current")
|
||||
|
||||
# Set up viewport
|
||||
glViewport(0, 0, width, height)
|
||||
|
||||
# Set up projection matrix
|
||||
glMatrixMode(GL_PROJECTION)
|
||||
glLoadIdentity()
|
||||
aspect = width / height
|
||||
gluPerspective(45.0, aspect, size * 0.01, size * 100)
|
||||
|
||||
# Set up modelview matrix
|
||||
glMatrixMode(GL_MODELVIEW)
|
||||
glLoadIdentity()
|
||||
gluLookAt(
|
||||
float(camera_pos[0]), float(camera_pos[1]), float(camera_pos[2]),
|
||||
float(center[0]), float(center[1]), float(center[2]),
|
||||
0, 1, 0
|
||||
)
|
||||
|
||||
# Enable depth testing
|
||||
glEnable(GL_DEPTH_TEST)
|
||||
glDepthFunc(GL_LESS)
|
||||
|
||||
# Enable lighting
|
||||
glEnable(GL_LIGHTING)
|
||||
glEnable(GL_LIGHT0)
|
||||
glEnable(GL_LIGHT1)
|
||||
glEnable(GL_NORMALIZE)
|
||||
glShadeModel(GL_SMOOTH)
|
||||
|
||||
# Enable color material so vertex colors work with lighting
|
||||
from OpenGL.GL import GL_COLOR_MATERIAL, GL_AMBIENT_AND_DIFFUSE, glColorMaterial
|
||||
glEnable(GL_COLOR_MATERIAL)
|
||||
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE)
|
||||
|
||||
# Set up light 0 (main light from camera direction)
|
||||
from OpenGL.GL import GL_AMBIENT, GL_DIFFUSE, GL_SPECULAR
|
||||
light0_pos = [float(camera_pos[0]), float(camera_pos[1]), float(camera_pos[2]), 0.0] # Directional light
|
||||
glLightfv(GL_LIGHT0, GL_POSITION, light0_pos)
|
||||
glLightfv(GL_LIGHT0, GL_AMBIENT, [0.3, 0.3, 0.3, 1.0])
|
||||
glLightfv(GL_LIGHT0, GL_DIFFUSE, [0.8, 0.8, 0.8, 1.0])
|
||||
|
||||
# Set up light 1 (fill light from opposite side)
|
||||
light1_pos = [float(-camera_pos[0]), float(camera_pos[1]), float(-camera_pos[2]), 0.0]
|
||||
glLightfv(GL_LIGHT1, GL_POSITION, light1_pos)
|
||||
glLightfv(GL_LIGHT1, GL_AMBIENT, [0.2, 0.2, 0.2, 1.0])
|
||||
glLightfv(GL_LIGHT1, GL_DIFFUSE, [0.5, 0.5, 0.5, 1.0])
|
||||
|
||||
# Clear buffers
|
||||
glClearColor(0.15, 0.15, 0.18, 1.0)
|
||||
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
|
||||
|
||||
# Render the mesh using immediate mode
|
||||
glBegin(GL_TRIANGLES)
|
||||
|
||||
for i, face in enumerate(faces):
|
||||
# Set face normal for lighting
|
||||
if face_normals is not None:
|
||||
n = face_normals[i]
|
||||
glNormal3fv([float(n[0]), float(n[1]), float(n[2])])
|
||||
|
||||
for vertex_idx in face:
|
||||
# Set vertex color (default to light gray if no colors)
|
||||
if vertex_colors is not None and vertex_idx < len(vertex_colors):
|
||||
c = vertex_colors[vertex_idx]
|
||||
glColor3f(float(c[0]), float(c[1]), float(c[2]))
|
||||
else:
|
||||
glColor3f(0.75, 0.75, 0.78)
|
||||
|
||||
# Draw vertex
|
||||
v = vertices[vertex_idx]
|
||||
glVertex3fv([float(v[0]), float(v[1]), float(v[2])])
|
||||
|
||||
glEnd()
|
||||
glFinish()
|
||||
|
||||
# Flip the image vertically (OpenGL origin is bottom-left)
|
||||
image_data = np.flipud(buffer)
|
||||
|
||||
# Save the image
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
Image.fromarray(image_data).save(str(output_path))
|
||||
logger.info(f"Thumbnail generated with OSMesa: {output_path} ({len(faces)} faces rendered)")
|
||||
return True
|
||||
|
||||
finally:
|
||||
osmesa.OSMesaDestroyContext(ctx)
|
||||
|
||||
|
||||
def _generate_simple_thumbnail(glb_path: Path, output_path: Path) -> bool:
|
||||
"""
|
||||
Generate a simple placeholder thumbnail when OSMesa is not available.
|
||||
Creates a solid color image with a gradient pattern.
|
||||
"""
|
||||
width, height = settings.thumbnail_size
|
||||
|
||||
# Create a simple gradient background
|
||||
img = Image.new('RGB', (width, height), color=(64, 64, 64))
|
||||
|
||||
# Add a simple icon/pattern to indicate 3D model
|
||||
pixels = img.load()
|
||||
center_x, center_y = width // 2, height // 2
|
||||
|
||||
# Draw a simple cube-like shape
|
||||
for y in range(height):
|
||||
for x in range(width):
|
||||
# Create a gradient
|
||||
dist = ((x - center_x) ** 2 + (y - center_y) ** 2) ** 0.5
|
||||
max_dist = (width ** 2 + height ** 2) ** 0.5 / 2
|
||||
factor = 1 - (dist / max_dist) * 0.5
|
||||
|
||||
r = int(80 * factor)
|
||||
g = int(120 * factor)
|
||||
b = int(160 * factor)
|
||||
pixels[x, y] = (r, g, b)
|
||||
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
img.save(str(output_path))
|
||||
logger.info(f"Simple thumbnail generated: {output_path}")
|
||||
return True
|
||||
Reference in New Issue
Block a user