bhatanerohan commited on
Commit
0d6cb9a
·
verified ·
1 Parent(s): 22d5c4a

Upload modal_sam3d.py

Browse files
Files changed (1) hide show
  1. modal_sam3d.py +366 -0
modal_sam3d.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import modal
3
+ import textwrap
4
+
5
+ # Volume that already contains:
6
+ # sam-3d-objects/checkpoints/pipeline.yaml
7
+ # AND will now cache DINOv2 / other model weights
8
+ volume = modal.Volume.from_name("sam3d-weights", create_if_missing=False)
9
+
10
+ # ---------------------------------------------------------------------------
11
+ # Image build: CUDA base + PyTorch + PyTorch3D + SAM-3D repo + deps
12
+ # ---------------------------------------------------------------------------
13
+ sam3d_image = (
14
+ modal.Image.from_registry(
15
+ "nvidia/cuda:12.4.1-devel-ubuntu22.04",
16
+ add_python="3.11", # Python 3.11
17
+ )
18
+ .apt_install(
19
+ "git",
20
+ "g++",
21
+ "gcc",
22
+ "clang",
23
+ "build-essential",
24
+ "libgl1-mesa-glx",
25
+ "libglib2.0-0",
26
+ "libopenexr-dev",
27
+ "wget",
28
+ )
29
+
30
+ # STEP 1: Install PyTorch CUDA 12.4 stack (hard fail if broken)
31
+ .pip_install(
32
+ "torch==2.5.1",
33
+ "torchvision",
34
+ "torchaudio",
35
+ index_url="https://download.pytorch.org/whl/cu124",
36
+ )
37
+
38
+ # STEP 1.5: Build deps (needed for PyTorch3D / SAM-3D)
39
+ .pip_install(
40
+ "fvcore",
41
+ "iopath",
42
+ "numpy",
43
+ "ninja",
44
+ "setuptools",
45
+ "wheel",
46
+ )
47
+
48
+ # STEP 2: Clone the SAM-3D Objects repo
49
+ .run_commands(
50
+ "echo '[STEP 2] Cloning facebookresearch/sam-3d-objects' && "
51
+ "git clone https://github.com/facebookresearch/sam-3d-objects.git /sam3d"
52
+ )
53
+
54
+ # STEP 2.1: Remove nvidia-pyindex from pyproject so pip doesn't try to build it
55
+ .run_commands(
56
+ "echo '[STEP 2.1] Removing nvidia-pyindex from pyproject.toml (if present)' && "
57
+ "cd /sam3d && "
58
+ "if [ -f pyproject.toml ]; then "
59
+ " sed -i '/nvidia-pyindex/d' pyproject.toml; "
60
+ "fi"
61
+ )
62
+
63
+ # STEP 3: Install [p3d] extras (PyTorch3D-related deps), fail-soft
64
+ .run_commands(
65
+ "echo '[STEP 3] Installing sam-3d-objects extra [p3d]' && "
66
+ "cd /sam3d && "
67
+ "PIP_EXTRA_INDEX_URL='https://pypi.ngc.nvidia.com https://download.pytorch.org/whl/cu124' "
68
+ "pip install -e '.[p3d]' "
69
+ "|| echo '[WARN] [p3d] extras failed to install, continuing without them.'"
70
+ )
71
+
72
+ # STEP 4: Install [inference] extras (Kaolin etc.), fail-soft
73
+ .run_commands(
74
+ "echo '[STEP 4] Installing sam-3d-objects extra [inference] (includes Kaolin etc.)' && "
75
+ "cd /sam3d && "
76
+ "PIP_FIND_LINKS='https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-2.5.1_cu121.html' "
77
+ "pip install -e '.[inference]' "
78
+ "|| echo '[WARN] [inference] extras failed to install, continuing without them.'"
79
+ )
80
+
81
+ # STEP 5: Helper libs (open3d, trimesh, seaborn) – fail-soft
82
+ .run_commands(
83
+ "echo '[STEP 5] Installing helper libraries: open3d, trimesh, seaborn' && "
84
+ "pip install open3d trimesh seaborn "
85
+ "|| echo '[WARN] Helper libs (open3d/trimesh/seaborn) failed to install, continuing.'"
86
+ )
87
+
88
+ # STEP 5.5: Config libs required by inference.py (omegaconf, hydra-core)
89
+ .run_commands(
90
+ "echo '[STEP 5.5] Installing config libraries: omegaconf, hydra-core' && "
91
+ "pip install omegaconf hydra-core "
92
+ "|| echo '[WARN] omegaconf/hydra-core failed to install, continuing.'"
93
+ )
94
+
95
+ # STEP 5.6: Install utils3d explicitly (inference.py imports this)
96
+ .run_commands(
97
+ "echo '[STEP 5.6] Installing utils3d' && "
98
+ "pip install "
99
+ "'git+https://github.com/EasternJournalist/utils3d.git@3913c65d81e05e47b9f367250cf8c0f7462a0900' "
100
+ "|| echo '[WARN] utils3d failed to install, continuing.'"
101
+ )
102
+
103
+ # STEP 5.7: Installing gradio (inference.py imports this)
104
+ .run_commands(
105
+ "echo '[STEP 5.7] Installing gradio' && "
106
+ "pip install gradio "
107
+ "|| echo '[WARN] gradio failed to install, continuing.'"
108
+ )
109
+
110
+ .run_commands(
111
+ "echo '[STEP 5.8] Installing kaolin from NVIDIA index' && "
112
+ "pip install kaolin "
113
+ "-f https://nvidia-kaolin.s3.us-east-2.amazonaws.com/torch-2.5.1_cu121.html "
114
+ "|| echo '[WARN] kaolin install failed, continuing.'"
115
+ )
116
+
117
+ # STEP 5.9: Install loguru (needed by inference_pipeline_pointmap)
118
+ .run_commands(
119
+ "echo '[STEP 5.9] Installing loguru' && "
120
+ "pip install loguru "
121
+ "|| echo '[WARN] loguru failed to install, continuing.'"
122
+ )
123
+
124
+ # STEP 5.91: Install timm (vision transformer lib)
125
+ .run_commands(
126
+ "echo '[STEP 5.91] Installing timm' && "
127
+ "pip install timm "
128
+ "|| echo '[WARN] timm failed to install, continuing.'"
129
+ )
130
+
131
+ # STEP 5.8: Install PyTorch3D from GitHub @stable, using the pattern that worked for you
132
+ .run_commands(
133
+ "echo '[STEP 5.92] Installing PyTorch3D from GitHub @stable (no build isolation, no deps)' && "
134
+ "python -c 'import pytorch3d' 2>/dev/null && "
135
+ "echo 'PyTorch3D already installed, skipping...' || ( "
136
+ "export FORCE_CUDA=1 && "
137
+ "export TORCH_CUDA_ARCH_LIST='8.0;8.6;8.9;9.0' && "
138
+ "pip install --no-build-isolation --no-deps "
139
+ "\"git+https://github.com/facebookresearch/pytorch3d.git@stable\" "
140
+ ")"
141
+ )
142
+
143
+ .run_commands(
144
+ "cd /sam3d && pip install '.[dev]' --no-deps"
145
+ )
146
+
147
+ .run_commands("pip install optree")
148
+ .run_commands("pip install astor==0.8.1")
149
+ .run_commands("pip install opencv-python")
150
+ .run_commands("pip install lightning")
151
+ .run_commands("pip install spconv-cu121==2.3.8")
152
+ .run_commands("pip install psutil && pip install --no-build-isolation flash_attn==2.8.3 || echo '[WARN] flash_attn failed'")
153
+ .run_commands("pip install xatlas==0.0.9")
154
+ .run_commands("pip install pyvista")
155
+ .run_commands("pip install pymeshfix==0.17.0")
156
+ .run_commands("pip install igraph")
157
+ .run_commands("pip install easydict")
158
+ .run_commands("pip install igraph")
159
+ .run_commands(
160
+ "export TORCH_CUDA_ARCH_LIST='8.0;8.6;8.9;9.0' && "
161
+ "pip install --no-build-isolation 'git+https://github.com/nerfstudio-project/gsplat.git@2323de5905d5e90e035f792fe65bad0fedd413e7'"
162
+ )
163
+ .run_commands("pip install igraph")
164
+ .run_commands("pip install 'git+https://github.com/microsoft/MoGe.git@a8c37341bc0325ca99b9d57981cc3bb2bd3e255b'")
165
+ .run_commands("pip install imageio")
166
+ # STEP 6: Patch hydra – skip if it fails
167
+ .run_commands(
168
+ "echo '[STEP 6] Patching hydra' && "
169
+ "cd /sam3d && "
170
+ "./patching/hydra "
171
+ "|| echo '[WARN] Hydra patch failed, continuing without patch.'"
172
+ )
173
+ )
174
+
175
+ app = modal.App("sam3d-objects-inference", image=sam3d_image)
176
+
177
+ # ---------------------------------------------------------------------------
178
+ # Runtime helper: minimal pytorch3d stub so SAM-3D imports work (fallback)
179
+ @app.cls(
180
+ image=sam3d_image,
181
+ gpu="A10G",
182
+ timeout=600,
183
+ volumes={"/weights": volume},
184
+ scaledown_window=300, # renamed from container_idle_timeout
185
+ enable_memory_snapshot=True, # required for snap=True
186
+ )
187
+ class SAM3DModel:
188
+
189
+ @modal.enter(snap=True)
190
+ def setup(self):
191
+ """Model loads once when container starts. snap=True caches the loaded state."""
192
+ import os
193
+ import sys
194
+ import math
195
+ import types
196
+ import torch
197
+
198
+ # Cache setup
199
+ CACHE_DIR = "/weights/model_cache"
200
+ os.makedirs(CACHE_DIR, exist_ok=True)
201
+ os.environ["TORCH_HOME"] = CACHE_DIR
202
+ os.environ["TORCH_HUB"] = os.path.join(CACHE_DIR, "hub")
203
+ os.environ["HF_HOME"] = os.path.join(CACHE_DIR, "huggingface")
204
+ os.environ["TRANSFORMERS_CACHE"] = os.path.join(CACHE_DIR, "huggingface")
205
+ os.environ["XDG_CACHE_HOME"] = CACHE_DIR
206
+ os.environ["TIMM_CACHE"] = os.path.join(CACHE_DIR, "timm")
207
+ os.environ.setdefault("CUDA_HOME", "/usr/local/cuda")
208
+ os.environ.setdefault("CONDA_PREFIX", "/usr/local/cuda")
209
+
210
+ # pytorch3d stub
211
+ try:
212
+ import pytorch3d
213
+ except Exception:
214
+ pkg = types.ModuleType("pytorch3d")
215
+ transforms_mod = types.ModuleType("pytorch3d.transforms")
216
+ renderer_mod = types.ModuleType("pytorch3d.renderer")
217
+ def _quat_conj(q):
218
+ w, x, y, z = q.unbind(-1)
219
+ return torch.stack((w, -x, -y, -z), dim=-1)
220
+ def quaternion_multiply(q1, q2):
221
+ w1, x1, y1, z1 = q1.unbind(-1)
222
+ w2, x2, y2, z2 = q2.unbind(-1)
223
+ return torch.stack([w1*w2-x1*x2-y1*y2-z1*z2, w1*x2+x1*w2+y1*z2-z1*y2,
224
+ w1*y2-x1*z2+y1*w2+z1*x2, w1*z2+x1*y2-y1*x2+z1*w2], dim=-1)
225
+ def quaternion_invert(q):
226
+ return _quat_conj(q) / (q.norm(dim=-1, keepdim=True) ** 2 + 1e-8)
227
+ transforms_mod.quaternion_multiply = quaternion_multiply
228
+ transforms_mod.quaternion_invert = quaternion_invert
229
+ class Transform3d:
230
+ def __init__(self, matrix=None, device=None):
231
+ self.matrix = torch.eye(4, device=device).unsqueeze(0) if matrix is None else matrix
232
+ def compose(self, other):
233
+ return Transform3d(other.matrix @ self.matrix)
234
+ def transform_points(self, points):
235
+ if points.dim() == 2:
236
+ pts = torch.cat([points, torch.ones(points.shape[0], 1, device=points.device)], dim=-1)
237
+ return (self.matrix[0] @ pts.T).T[..., :3]
238
+ elif points.dim() == 3:
239
+ B, N, _ = points.shape
240
+ pts = torch.cat([points, torch.ones(B, N, 1, device=points.device)], dim=-1)
241
+ mat = self.matrix.expand(B, -1, -1) if self.matrix.shape[0] == 1 and B > 1 else self.matrix
242
+ return torch.bmm(mat, pts.transpose(1, 2)).transpose(1, 2)[..., :3]
243
+ transforms_mod.Transform3d = Transform3d
244
+ def look_at_view_transform(dist=1.0, elev=0.0, azim=0.0, device=None):
245
+ dist_t = torch.tensor([dist], device=device, dtype=torch.float32)
246
+ elev_rad = torch.tensor([elev], device=device) * math.pi / 180.0
247
+ azim_rad = torch.tensor([azim], device=device) * math.pi / 180.0
248
+ x = dist_t * torch.cos(elev_rad) * torch.sin(azim_rad)
249
+ y = dist_t * torch.sin(elev_rad)
250
+ z = dist_t * torch.cos(elev_rad) * torch.cos(azim_rad)
251
+ cam_pos = torch.stack([x, y, z], dim=-1)
252
+ up = torch.tensor([[0.0, 1.0, 0.0]], device=device)
253
+ z_axis = torch.nn.functional.normalize(cam_pos, dim=-1)
254
+ x_axis = torch.nn.functional.normalize(torch.cross(up, z_axis, dim=-1), dim=-1)
255
+ y_axis = torch.cross(z_axis, x_axis, dim=-1)
256
+ R = torch.stack([x_axis, y_axis, z_axis], dim=-1)
257
+ T = -torch.bmm(R, cam_pos.unsqueeze(-1)).squeeze(-1)
258
+ return R, T
259
+ renderer_mod.look_at_view_transform = look_at_view_transform
260
+ pkg.transforms = transforms_mod
261
+ pkg.renderer = renderer_mod
262
+ sys.modules["pytorch3d"] = pkg
263
+ sys.modules["pytorch3d.transforms"] = transforms_mod
264
+ sys.modules["pytorch3d.renderer"] = renderer_mod
265
+
266
+ sys.path.insert(0, "/sam3d")
267
+ sys.path.insert(0, "/sam3d/notebook")
268
+ from inference import Inference, load_image
269
+ self.load_image = load_image
270
+ self.model = Inference("/weights/sam-3d-objects/checkpoints/pipeline.yaml", compile=False)
271
+ print("[SETUP] Model loaded!")
272
+
273
+ @modal.method()
274
+ def reconstruct(self, image_bytes: bytes, mask_bytes: bytes = None) -> tuple[bytes, bytes]:
275
+ import os, io, tempfile, shutil
276
+ import numpy as np
277
+ from PIL import Image
278
+ import torch
279
+
280
+ temp_dir = tempfile.mkdtemp()
281
+ image_path = os.path.join(temp_dir, "image.png")
282
+ mask_path = os.path.join(temp_dir, "mask.png")
283
+ with open(image_path, 'wb') as f:
284
+ f.write(image_bytes)
285
+
286
+ pil_image = Image.open(image_path)
287
+ if mask_bytes is not None:
288
+ with open(mask_path, 'wb') as f:
289
+ f.write(mask_bytes)
290
+ mask = np.array(Image.open(mask_path).convert('L'))
291
+ elif pil_image.mode == 'RGBA':
292
+ alpha = np.array(pil_image)[:, :, 3]
293
+ mask = (alpha > 128).astype(np.uint8) * 255
294
+ pil_image = pil_image.convert('RGB')
295
+ pil_image.save(image_path)
296
+ else:
297
+ raise ValueError("Provide either: 1) separate mask_bytes, or 2) RGBA image with alpha mask")
298
+
299
+ if np.sum(mask > 0) < 100:
300
+ raise ValueError("Mask too small!")
301
+
302
+ image = self.load_image(image_path)
303
+ if mask.shape[0] != image.shape[0] or mask.shape[1] != image.shape[1]:
304
+ mask = np.array(Image.fromarray(mask).resize((image.shape[1], image.shape[0]), Image.NEAREST))
305
+
306
+ with torch.inference_mode():
307
+ output = self.model(image, mask, seed=42)
308
+
309
+ shutil.rmtree(temp_dir, ignore_errors=True)
310
+
311
+ ply_buffer = io.BytesIO()
312
+ output["gs"].save_ply(ply_buffer)
313
+
314
+ glb_bytes = None
315
+ if "mesh" in output and output["mesh"]:
316
+ import trimesh
317
+ mesh = output["mesh"][0] if isinstance(output["mesh"], list) else output["mesh"]
318
+ glb_bytes = trimesh.Trimesh(
319
+ vertices=mesh.vertices.cpu().numpy(),
320
+ faces=mesh.faces.cpu().numpy()
321
+ ).export(file_type="glb")
322
+
323
+ return ply_buffer.getvalue(), glb_bytes
324
+
325
+
326
+ @app.local_entrypoint()
327
+ def main(
328
+ input_path: str = "sam3d_1.png",
329
+ mask_path: str = "sam3d_1gray.png",
330
+ output_path: str = "output_model.ply",
331
+ ):
332
+ """
333
+ Local test:
334
+ # With RGBA image (mask in alpha):
335
+ modal run modal_sam3d.py --input-path image_rgba.png
336
+
337
+ # With separate mask file (official pattern):
338
+ modal run modal_sam3d.py --input-path image.png --mask-path mask.png
339
+ """
340
+ from pathlib import Path
341
+
342
+ input_file = Path(input_path)
343
+ if not input_file.exists():
344
+ print(f"[LOCAL] ERROR: Input image not found: {input_file.resolve()}")
345
+ return
346
+
347
+ mask_bytes = None
348
+ if mask_path:
349
+ mask_file = Path(mask_path)
350
+ if mask_file.exists():
351
+ mask_bytes = mask_file.read_bytes()
352
+ print(f"[LOCAL] Using separate mask file: {mask_file}")
353
+ else:
354
+ print(f"[LOCAL] WARNING: Mask file not found: {mask_file}")
355
+
356
+ print(f"[LOCAL] Sending {input_file} to SAM-3D on Modal...")
357
+ model = SAM3DModel()
358
+ ply_bytes, glb_bytes = model.reconstruct.remote(input_file.read_bytes(), mask_bytes)
359
+
360
+ output_file = Path(output_path)
361
+ output_file.write_bytes(ply_bytes)
362
+ if glb_bytes:
363
+ glb_file = Path(output_path).with_suffix(".glb")
364
+ glb_file.write_bytes(glb_bytes)
365
+ print(f"[LOCAL] Saved mesh to: {glb_file}")
366
+ print(f"[LOCAL] Saved 3D model to: {output_file.resolve()} ({len(ply_bytes)} bytes)")