Spaces:
Running
Running
| import modal | |
| import textwrap | |
| # Volume that already contains: | |
| # sam-3d-objects/checkpoints/pipeline.yaml | |
| # AND will now cache DINOv2 / other model weights | |
| volume = modal.Volume.from_name("sam3d-weights", create_if_missing=False) | |
| # --------------------------------------------------------------------------- | |
| # Image build: CUDA base + PyTorch + PyTorch3D + SAM-3D repo + deps | |
| # --------------------------------------------------------------------------- | |
| sam3d_image = ( | |
| modal.Image.from_registry( | |
| "nvidia/cuda:12.4.1-devel-ubuntu22.04", | |
| add_python="3.11", # Python 3.11 | |
| ) | |
| .apt_install( | |
| "git", | |
| "g++", | |
| "gcc", | |
| "clang", | |
| "build-essential", | |
| "libgl1-mesa-glx", | |
| "libglib2.0-0", | |
| "libopenexr-dev", | |
| "wget", | |
| ) | |
| # STEP 1: Install PyTorch CUDA 12.4 stack (hard fail if broken) | |
| .pip_install( | |
| "torch==2.5.1", | |
| "torchvision", | |
| "torchaudio", | |
| index_url="https://download.pytorch.org/whl/cu124", | |
| ) | |
| # STEP 1.5: Build deps (needed for PyTorch3D / SAM-3D) | |
| .pip_install( | |
| "fvcore", | |
| "iopath", | |
| "numpy", | |
| "ninja", | |
| "setuptools", | |
| "wheel", | |
| ) | |
| # STEP 2: Clone the SAM-3D Objects repo | |
| .run_commands( | |
| "echo '[STEP 2] Cloning facebookresearch/sam-3d-objects' && " | |
| "git clone https://github.com/facebookresearch/sam-3d-objects.git /sam3d" | |
| ) | |
| # STEP 2.1: Remove nvidia-pyindex from pyproject so pip doesn't try to build it | |
| .run_commands( | |
| "echo '[STEP 2.1] Removing nvidia-pyindex from pyproject.toml (if present)' && " | |
| "cd /sam3d && " | |
| "if [ -f pyproject.toml ]; then " | |
| " sed -i '/nvidia-pyindex/d' pyproject.toml; " | |
| "fi" | |
| ) | |
| # STEP 3: Install [p3d] extras (PyTorch3D-related deps), fail-soft | |
| .run_commands( | |
| "echo '[STEP 3] Installing sam-3d-objects extra [p3d]' && " | |
| "cd /sam3d && " | |
| "PIP_EXTRA_INDEX_URL='https://pypi.ngc.nvidia.com https://download.pytorch.org/whl/cu124' " | |
| "pip install -e '.[p3d]' " | |
| "|| echo '[WARN] [p3d] extras failed to install, continuing without them.'" | |
| ) | |
| # STEP 4: Install [inference] extras (Kaolin etc.), fail-soft | |
| .run_commands( | |
| "echo '[STEP 4] Installing sam-3d-objects extra [inference] (includes Kaolin etc.)' && " | |
| "cd /sam3d && " | |
| "PIP_FIND_LINKS='https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-2.5.1_cu121.html' " | |
| "pip install -e '.[inference]' " | |
| "|| echo '[WARN] [inference] extras failed to install, continuing without them.'" | |
| ) | |
| # STEP 5: Helper libs (open3d, trimesh, seaborn) – fail-soft | |
| .run_commands( | |
| "echo '[STEP 5] Installing helper libraries: open3d, trimesh, seaborn' && " | |
| "pip install open3d trimesh seaborn " | |
| "|| echo '[WARN] Helper libs (open3d/trimesh/seaborn) failed to install, continuing.'" | |
| ) | |
| # STEP 5.5: Config libs required by inference.py (omegaconf, hydra-core) | |
| .run_commands( | |
| "echo '[STEP 5.5] Installing config libraries: omegaconf, hydra-core' && " | |
| "pip install omegaconf hydra-core " | |
| "|| echo '[WARN] omegaconf/hydra-core failed to install, continuing.'" | |
| ) | |
| # STEP 5.6: Install utils3d explicitly (inference.py imports this) | |
| .run_commands( | |
| "echo '[STEP 5.6] Installing utils3d' && " | |
| "pip install " | |
| "'git+https://github.com/EasternJournalist/utils3d.git@3913c65d81e05e47b9f367250cf8c0f7462a0900' " | |
| "|| echo '[WARN] utils3d failed to install, continuing.'" | |
| ) | |
| # STEP 5.7: Installing gradio (inference.py imports this) | |
| .run_commands( | |
| "echo '[STEP 5.7] Installing gradio' && " | |
| "pip install gradio " | |
| "|| echo '[WARN] gradio failed to install, continuing.'" | |
| ) | |
| .run_commands( | |
| "echo '[STEP 5.8] Installing kaolin from NVIDIA index' && " | |
| "pip install kaolin " | |
| "-f https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-2.5.1_cu121.html " | |
| "|| echo '[WARN] kaolin install failed, continuing.'" | |
| ) | |
| # STEP 5.9: Install loguru (needed by inference_pipeline_pointmap) | |
| .run_commands( | |
| "echo '[STEP 5.9] Installing loguru' && " | |
| "pip install loguru " | |
| "|| echo '[WARN] loguru failed to install, continuing.'" | |
| ) | |
| # STEP 5.91: Install timm (vision transformer lib) | |
| .run_commands( | |
| "echo '[STEP 5.91] Installing timm' && " | |
| "pip install timm " | |
| "|| echo '[WARN] timm failed to install, continuing.'" | |
| ) | |
| # STEP 5.8: Install PyTorch3D from GitHub @stable, using the pattern that worked for you | |
| .run_commands( | |
| "echo '[STEP 5.92] Installing PyTorch3D from GitHub @stable (no build isolation, no deps)' && " | |
| "python -c 'import pytorch3d' 2>/dev/null && " | |
| "echo 'PyTorch3D already installed, skipping...' || ( " | |
| "export FORCE_CUDA=1 && " | |
| "export TORCH_CUDA_ARCH_LIST='8.0;8.6;8.9;9.0' && " | |
| "pip install --no-build-isolation --no-deps " | |
| "\"git+https://github.com/facebookresearch/pytorch3d.git@stable\" " | |
| ")" | |
| ) | |
| .run_commands( | |
| "cd /sam3d && pip install '.[dev]' --no-deps" | |
| ) | |
| .run_commands("pip install optree") | |
| .run_commands("pip install astor==0.8.1") | |
| .run_commands("pip install opencv-python") | |
| .run_commands("pip install lightning") | |
| .run_commands("pip install spconv-cu121==2.3.8") | |
| .run_commands("pip install psutil && pip install --no-build-isolation flash_attn==2.8.3 || echo '[WARN] flash_attn failed'") | |
| .run_commands("pip install xatlas==0.0.9") | |
| .run_commands("pip install pyvista") | |
| .run_commands("pip install pymeshfix==0.17.0") | |
| .run_commands("pip install igraph") | |
| .run_commands("pip install easydict") | |
| .run_commands("pip install igraph") | |
| .run_commands( | |
| "export TORCH_CUDA_ARCH_LIST='8.0;8.6;8.9;9.0' && " | |
| "pip install --no-build-isolation 'git+https://github.com/nerfstudio-project/gsplat.git@2323de5905d5e90e035f792fe65bad0fedd413e7'" | |
| ) | |
| .run_commands("pip install igraph") | |
| .run_commands("pip install 'git+https://github.com/microsoft/MoGe.git@a8c37341bc0325ca99b9d57981cc3bb2bd3e255b'") | |
| .run_commands("pip install imageio") | |
| # STEP 6: Patch hydra – skip if it fails | |
| .run_commands( | |
| "echo '[STEP 6] Patching hydra' && " | |
| "cd /sam3d && " | |
| "./patching/hydra " | |
| "|| echo '[WARN] Hydra patch failed, continuing without patch.'" | |
| ) | |
| ) | |
| app = modal.App("sam3d-objects-inference", image=sam3d_image) | |
| # --------------------------------------------------------------------------- | |
| # Runtime helper: minimal pytorch3d stub so SAM-3D imports work (fallback) | |
| class SAM3DModel: | |
| def setup(self): | |
| """Model loads once when container starts. snap=True caches the loaded state.""" | |
| import os | |
| import sys | |
| import math | |
| import types | |
| import torch | |
| # Cache setup | |
| CACHE_DIR = "/weights/model_cache" | |
| os.makedirs(CACHE_DIR, exist_ok=True) | |
| os.environ["TORCH_HOME"] = CACHE_DIR | |
| os.environ["TORCH_HUB"] = os.path.join(CACHE_DIR, "hub") | |
| os.environ["HF_HOME"] = os.path.join(CACHE_DIR, "huggingface") | |
| os.environ["TRANSFORMERS_CACHE"] = os.path.join(CACHE_DIR, "huggingface") | |
| os.environ["XDG_CACHE_HOME"] = CACHE_DIR | |
| os.environ["TIMM_CACHE"] = os.path.join(CACHE_DIR, "timm") | |
| os.environ.setdefault("CUDA_HOME", "/usr/local/cuda") | |
| os.environ.setdefault("CONDA_PREFIX", "/usr/local/cuda") | |
| # pytorch3d stub | |
| try: | |
| import pytorch3d | |
| except Exception: | |
| pkg = types.ModuleType("pytorch3d") | |
| transforms_mod = types.ModuleType("pytorch3d.transforms") | |
| renderer_mod = types.ModuleType("pytorch3d.renderer") | |
| def _quat_conj(q): | |
| w, x, y, z = q.unbind(-1) | |
| return torch.stack((w, -x, -y, -z), dim=-1) | |
| def quaternion_multiply(q1, q2): | |
| w1, x1, y1, z1 = q1.unbind(-1) | |
| w2, x2, y2, z2 = q2.unbind(-1) | |
| return torch.stack([w1*w2-x1*x2-y1*y2-z1*z2, w1*x2+x1*w2+y1*z2-z1*y2, | |
| w1*y2-x1*z2+y1*w2+z1*x2, w1*z2+x1*y2-y1*x2+z1*w2], dim=-1) | |
| def quaternion_invert(q): | |
| return _quat_conj(q) / (q.norm(dim=-1, keepdim=True) ** 2 + 1e-8) | |
| transforms_mod.quaternion_multiply = quaternion_multiply | |
| transforms_mod.quaternion_invert = quaternion_invert | |
| class Transform3d: | |
| def __init__(self, matrix=None, device=None): | |
| self.matrix = torch.eye(4, device=device).unsqueeze(0) if matrix is None else matrix | |
| def compose(self, other): | |
| return Transform3d(other.matrix @ self.matrix) | |
| def transform_points(self, points): | |
| if points.dim() == 2: | |
| pts = torch.cat([points, torch.ones(points.shape[0], 1, device=points.device)], dim=-1) | |
| return (self.matrix[0] @ pts.T).T[..., :3] | |
| elif points.dim() == 3: | |
| B, N, _ = points.shape | |
| pts = torch.cat([points, torch.ones(B, N, 1, device=points.device)], dim=-1) | |
| mat = self.matrix.expand(B, -1, -1) if self.matrix.shape[0] == 1 and B > 1 else self.matrix | |
| return torch.bmm(mat, pts.transpose(1, 2)).transpose(1, 2)[..., :3] | |
| transforms_mod.Transform3d = Transform3d | |
| def look_at_view_transform(dist=1.0, elev=0.0, azim=0.0, device=None): | |
| dist_t = torch.tensor([dist], device=device, dtype=torch.float32) | |
| elev_rad = torch.tensor([elev], device=device) * math.pi / 180.0 | |
| azim_rad = torch.tensor([azim], device=device) * math.pi / 180.0 | |
| x = dist_t * torch.cos(elev_rad) * torch.sin(azim_rad) | |
| y = dist_t * torch.sin(elev_rad) | |
| z = dist_t * torch.cos(elev_rad) * torch.cos(azim_rad) | |
| cam_pos = torch.stack([x, y, z], dim=-1) | |
| up = torch.tensor([[0.0, 1.0, 0.0]], device=device) | |
| z_axis = torch.nn.functional.normalize(cam_pos, dim=-1) | |
| x_axis = torch.nn.functional.normalize(torch.cross(up, z_axis, dim=-1), dim=-1) | |
| y_axis = torch.cross(z_axis, x_axis, dim=-1) | |
| R = torch.stack([x_axis, y_axis, z_axis], dim=-1) | |
| T = -torch.bmm(R, cam_pos.unsqueeze(-1)).squeeze(-1) | |
| return R, T | |
| renderer_mod.look_at_view_transform = look_at_view_transform | |
| pkg.transforms = transforms_mod | |
| pkg.renderer = renderer_mod | |
| sys.modules["pytorch3d"] = pkg | |
| sys.modules["pytorch3d.transforms"] = transforms_mod | |
| sys.modules["pytorch3d.renderer"] = renderer_mod | |
| sys.path.insert(0, "/sam3d") | |
| sys.path.insert(0, "/sam3d/notebook") | |
| from inference import Inference, load_image | |
| self.load_image = load_image | |
| self.model = Inference("/weights/sam-3d-objects/checkpoints/pipeline.yaml", compile=False) | |
| print("[SETUP] Model loaded!") | |
| def reconstruct(self, image_bytes: bytes, mask_bytes: bytes = None) -> tuple[bytes, bytes]: | |
| import os, io, tempfile, shutil | |
| import numpy as np | |
| from PIL import Image | |
| import torch | |
| temp_dir = tempfile.mkdtemp() | |
| image_path = os.path.join(temp_dir, "image.png") | |
| mask_path = os.path.join(temp_dir, "mask.png") | |
| with open(image_path, 'wb') as f: | |
| f.write(image_bytes) | |
| pil_image = Image.open(image_path) | |
| if mask_bytes is not None: | |
| with open(mask_path, 'wb') as f: | |
| f.write(mask_bytes) | |
| mask = np.array(Image.open(mask_path).convert('L')) | |
| elif pil_image.mode == 'RGBA': | |
| alpha = np.array(pil_image)[:, :, 3] | |
| mask = (alpha > 128).astype(np.uint8) * 255 | |
| pil_image = pil_image.convert('RGB') | |
| pil_image.save(image_path) | |
| else: | |
| raise ValueError("Provide either: 1) separate mask_bytes, or 2) RGBA image with alpha mask") | |
| if np.sum(mask > 0) < 100: | |
| raise ValueError("Mask too small!") | |
| image = self.load_image(image_path) | |
| if mask.shape[0] != image.shape[0] or mask.shape[1] != image.shape[1]: | |
| mask = np.array(Image.fromarray(mask).resize((image.shape[1], image.shape[0]), Image.NEAREST)) | |
| with torch.inference_mode(): | |
| output = self.model(image, mask, seed=42) | |
| shutil.rmtree(temp_dir, ignore_errors=True) | |
| ply_buffer = io.BytesIO() | |
| output["gs"].save_ply(ply_buffer) | |
| glb_bytes = None | |
| if "mesh" in output and output["mesh"]: | |
| import trimesh | |
| mesh = output["mesh"][0] if isinstance(output["mesh"], list) else output["mesh"] | |
| glb_bytes = trimesh.Trimesh( | |
| vertices=mesh.vertices.cpu().numpy(), | |
| faces=mesh.faces.cpu().numpy() | |
| ).export(file_type="glb") | |
| return ply_buffer.getvalue(), glb_bytes | |
| def main( | |
| input_path: str = "sam3d_1.png", | |
| mask_path: str = "sam3d_1gray.png", | |
| output_path: str = "output_model.ply", | |
| ): | |
| """ | |
| Local test: | |
| # With RGBA image (mask in alpha): | |
| modal run modal_sam3d.py --input-path image_rgba.png | |
| # With separate mask file (official pattern): | |
| modal run modal_sam3d.py --input-path image.png --mask-path mask.png | |
| """ | |
| from pathlib import Path | |
| input_file = Path(input_path) | |
| if not input_file.exists(): | |
| print(f"[LOCAL] ERROR: Input image not found: {input_file.resolve()}") | |
| return | |
| mask_bytes = None | |
| if mask_path: | |
| mask_file = Path(mask_path) | |
| if mask_file.exists(): | |
| mask_bytes = mask_file.read_bytes() | |
| print(f"[LOCAL] Using separate mask file: {mask_file}") | |
| else: | |
| print(f"[LOCAL] WARNING: Mask file not found: {mask_file}") | |
| print(f"[LOCAL] Sending {input_file} to SAM-3D on Modal...") | |
| model = SAM3DModel() | |
| ply_bytes, glb_bytes = model.reconstruct.remote(input_file.read_bytes(), mask_bytes) | |
| output_file = Path(output_path) | |
| output_file.write_bytes(ply_bytes) | |
| if glb_bytes: | |
| glb_file = Path(output_path).with_suffix(".glb") | |
| glb_file.write_bytes(glb_bytes) | |
| print(f"[LOCAL] Saved mesh to: {glb_file}") | |
| print(f"[LOCAL] Saved 3D model to: {output_file.resolve()} ({len(ply_bytes)} bytes)") |