This script is a simple pipeline script for the Tripo3D API, supporting
text-to-model and image-to-model modes. It requires an platform API key
as well as the tripo3d package available from PyPi (via pip install tripo3d).
-
-
Save tspspi/a0dbffa5c95f48224f7100018b88614d to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env python3 | |
| from __future__ import annotations | |
| import argparse | |
| import asyncio | |
| import json | |
| import os | |
| import re | |
| from pathlib import Path | |
| from typing import Any, Dict, Iterable, List, Optional | |
| from tripo3d import TripoClient | |
| try: | |
| from tripo3d.models import RigType, RigSpec | |
| except Exception: | |
| RigType = None | |
| RigSpec = None | |
| def ensure_dir(path: Path) -> None: | |
| path.mkdir(parents=True, exist_ok=True) | |
| def sanitize_name(name: str) -> str: | |
| s = re.sub(r"[^A-Za-z0-9._-]+", "_", name.strip()) | |
| s = re.sub(r"_+", "_", s).strip("_") | |
| return s or "part" | |
| def normalize_rig_type(name: Optional[str]): | |
| if not name or RigType is None: | |
| return None | |
| mapping = { | |
| "biped": RigType.BIPED, | |
| "quadruped": RigType.QUADRUPED, | |
| "hexapod": RigType.HEXAPOD, | |
| "octopod": RigType.OCTOPOD, | |
| "avian": RigType.AVIAN, | |
| "serpentine": RigType.SERPENTINE, | |
| "aquatic": RigType.AQUATIC, | |
| "others": RigType.OTHERS, | |
| } | |
| return mapping[name.strip().lower()] | |
| def normalize_rig_spec(name: Optional[str]): | |
| if not name or RigSpec is None: | |
| return None | |
| mapping = { | |
| "mixamo": RigSpec.MIXAMO, | |
| "tripo": RigSpec.TRIPO, | |
| } | |
| return mapping[name.strip().lower()] | |
| def to_plain(value: Any) -> Any: | |
| if value is None or isinstance(value, (str, int, float, bool)): | |
| return value | |
| if isinstance(value, Path): | |
| return str(value) | |
| if isinstance(value, dict): | |
| return {str(k): to_plain(v) for k, v in value.items()} | |
| if isinstance(value, (list, tuple, set)): | |
| return [to_plain(v) for v in value] | |
| if hasattr(value, "__dict__"): | |
| out = {} | |
| for k, v in vars(value).items(): | |
| if not k.startswith("_"): | |
| out[k] = to_plain(v) | |
| return out | |
| return str(value) | |
| def task_to_dict(task: Any) -> Dict[str, Any]: | |
| out: Dict[str, Any] = {} | |
| for name in dir(task): | |
| if name.startswith("_"): | |
| continue | |
| try: | |
| value = getattr(task, name) | |
| except Exception: | |
| continue | |
| if callable(value): | |
| continue | |
| out[name] = to_plain(value) | |
| return out | |
| async def save_task_metadata(task: Any, out_dir: Path, stem: str) -> Path: | |
| meta = task_to_dict(task) | |
| path = out_dir / f"{stem}.task.json" | |
| path.write_text(json.dumps(meta, indent=2, ensure_ascii=False), encoding="utf-8") | |
| return path | |
| async def wait_success(client: TripoClient, task_id: str, label: str): | |
| task = await client.wait_for_task(task_id, polling_interval=2.0, timeout=None, verbose=True) | |
| status = str(getattr(task, "status", "")).lower() | |
| if "success" not in status: | |
| raise RuntimeError( | |
| f"{label} failed: status={getattr(task, 'status', None)} " | |
| f"error_code={getattr(task, 'error_code', None)} " | |
| f"error_msg={getattr(task, 'error_msg', None)}" | |
| ) | |
| return task | |
| async def download_and_rename_models(client: TripoClient, task: Any, out_dir: Path, stem: str) -> Dict[str, str]: | |
| files = await client.download_task_models(task, str(out_dir)) | |
| renamed: Dict[str, str] = {} | |
| for model_type, file_path in files.items(): | |
| if not file_path: | |
| continue | |
| src = Path(file_path) | |
| dst = out_dir / f"{stem}.{model_type}{src.suffix}" | |
| if src.resolve() != dst.resolve(): | |
| if dst.exists(): | |
| dst.unlink() | |
| src.replace(dst) | |
| renamed[model_type] = str(dst) | |
| return renamed | |
| def _extract_names_from_value(value: Any) -> List[str]: | |
| names: List[str] = [] | |
| if value is None: | |
| return names | |
| if isinstance(value, str): | |
| return [value] | |
| if isinstance(value, list): | |
| for item in value: | |
| if isinstance(item, str): | |
| names.append(item) | |
| elif isinstance(item, dict): | |
| for key in ("name", "part_name", "label", "id"): | |
| if isinstance(item.get(key), str): | |
| names.append(item[key]) | |
| break | |
| else: | |
| for key in ("name", "part_name", "label", "id"): | |
| try: | |
| v = getattr(item, key, None) | |
| except Exception: | |
| v = None | |
| if isinstance(v, str): | |
| names.append(v) | |
| break | |
| elif isinstance(value, dict): | |
| # Sometimes parts may be a dict keyed by part name | |
| if all(isinstance(k, str) for k in value.keys()): | |
| names.extend(list(value.keys())) | |
| for key in ("parts", "part_names", "segmented_parts"): | |
| if key in value: | |
| names.extend(_extract_names_from_value(value[key])) | |
| else: | |
| for key in ("parts", "part_names", "segmented_parts"): | |
| try: | |
| v = getattr(value, key, None) | |
| except Exception: | |
| v = None | |
| if v is not None: | |
| names.extend(_extract_names_from_value(v)) | |
| # de-duplicate while preserving order | |
| seen = set() | |
| deduped = [] | |
| for n in names: | |
| if n not in seen: | |
| seen.add(n) | |
| deduped.append(n) | |
| return deduped | |
| def discover_part_names(task: Any) -> List[str]: | |
| """ | |
| Defensive discovery because the public SDK docs document `part_names` | |
| as inputs on texture/convert, but do not clearly document the exact | |
| output field carrying generated part names. | |
| """ | |
| candidates: List[Any] = [] | |
| # whole task | |
| candidates.append(task) | |
| # task.output if present | |
| try: | |
| candidates.append(getattr(task, "output", None)) | |
| except Exception: | |
| pass | |
| # dict forms | |
| as_dict = task_to_dict(task) | |
| candidates.append(as_dict) | |
| candidates.append(as_dict.get("output")) | |
| part_names: List[str] = [] | |
| for candidate in candidates: | |
| part_names.extend(_extract_names_from_value(candidate)) | |
| # reject obviously bogus values | |
| cleaned: List[str] = [] | |
| for name in part_names: | |
| if not isinstance(name, str): | |
| continue | |
| n = name.strip() | |
| if not n: | |
| continue | |
| if n.lower() in {"success", "pending", "running", "failed"}: | |
| continue | |
| cleaned.append(n) | |
| # de-duplicate | |
| seen = set() | |
| result = [] | |
| for n in cleaned: | |
| if n not in seen: | |
| seen.add(n) | |
| result.append(n) | |
| return result | |
| async def export_one_format( | |
| client: TripoClient, | |
| source_task_id: str, | |
| fmt: str, | |
| out_dir: Path, | |
| stem: str, | |
| *, | |
| part_names: Optional[List[str]] = None, | |
| face_limit: int = 10000, | |
| flatten_bottom: bool = False, | |
| flatten_bottom_threshold: float = 0.01, | |
| pivot_to_center_bottom: bool = False, | |
| pack_uv: bool = False, | |
| texture_format: str = "JPEG", | |
| with_animation: bool = False, | |
| ) -> Dict[str, str]: | |
| export_task_id = await client.convert_model( | |
| original_model_task_id=source_task_id, | |
| format=fmt, | |
| face_limit=face_limit, | |
| flatten_bottom=flatten_bottom, | |
| flatten_bottom_threshold=flatten_bottom_threshold, | |
| pivot_to_center_bottom=pivot_to_center_bottom, | |
| pack_uv=pack_uv, | |
| texture_format=texture_format, | |
| bake=True, | |
| with_animation=with_animation, | |
| part_names=part_names, | |
| ) | |
| export_task = await wait_success(client, export_task_id, f"{fmt} export {stem}") | |
| await save_task_metadata(export_task, out_dir, stem) | |
| return await download_and_rename_models(client, export_task, out_dir, stem) | |
| async def export_parts_for_format( | |
| client: TripoClient, | |
| source_task_id: str, | |
| fmt: str, | |
| out_dir: Path, | |
| base_stem: str, | |
| part_names: List[str], | |
| *, | |
| face_limit: int, | |
| flatten_bottom: bool, | |
| flatten_bottom_threshold: float, | |
| pivot_to_center_bottom: bool, | |
| pack_uv: bool, | |
| texture_format: str, | |
| ) -> List[Dict[str, Any]]: | |
| results = [] | |
| for idx, part_name in enumerate(part_names, start=1): | |
| stem = f"{base_stem}__{idx:03d}__{sanitize_name(part_name)}" | |
| files = await export_one_format( | |
| client=client, | |
| source_task_id=source_task_id, | |
| fmt=fmt, | |
| out_dir=out_dir, | |
| stem=stem, | |
| part_names=[part_name], | |
| face_limit=face_limit, | |
| flatten_bottom=flatten_bottom, | |
| flatten_bottom_threshold=flatten_bottom_threshold, | |
| pivot_to_center_bottom=pivot_to_center_bottom, | |
| pack_uv=pack_uv, | |
| texture_format=texture_format, | |
| with_animation=False, | |
| ) | |
| results.append({ | |
| "index": idx, | |
| "part_name": part_name, | |
| "files": files, | |
| }) | |
| return results | |
| async def pipeline(args): | |
| out_dir = Path(args.out).resolve() | |
| ensure_dir(out_dir) | |
| async with TripoClient(api_key=args.api_key) as client: | |
| # 1) Base generation | |
| if args.mode == "text": | |
| if not args.prompt: | |
| raise ValueError("--prompt is required for text mode") | |
| base_task_id = await client.text_to_model( | |
| prompt=args.prompt, | |
| negative_prompt=args.negative_prompt, | |
| texture=bool(args.texture), | |
| pbr=bool(args.texture), | |
| texture_quality=args.texture_quality, | |
| face_limit=args.face_limit, | |
| compress=args.compress, | |
| quad=args.quad, | |
| auto_size=args.auto_size, | |
| generate_parts=args.generate_parts, | |
| smart_low_poly=args.smart_low_poly, | |
| ) | |
| else: | |
| if not args.image: | |
| raise ValueError("--image is required for image mode") | |
| base_task_id = await client.image_to_model( | |
| image=args.image, | |
| texture=bool(args.texture), | |
| pbr=bool(args.texture), | |
| texture_quality=args.texture_quality, | |
| face_limit=args.face_limit, | |
| compress=args.compress, | |
| quad=args.quad, | |
| auto_size=args.auto_size, | |
| orientation=args.orientation, | |
| generate_parts=args.generate_parts, | |
| smart_low_poly=args.smart_low_poly, | |
| ) | |
| base_task = await wait_success(client, base_task_id, "base generation") | |
| await save_task_metadata(base_task, out_dir, "01_base") | |
| base_files = await download_and_rename_models(client, base_task, out_dir, "01_base") | |
| print("Base files:", base_files) | |
| part_names = discover_part_names(base_task) if args.generate_parts else [] | |
| parts_manifest: Dict[str, Any] = {"part_names": part_names} | |
| (out_dir / "01_parts_manifest.json").write_text( | |
| json.dumps(parts_manifest, indent=2, ensure_ascii=False), | |
| encoding="utf-8", | |
| ) | |
| if args.generate_parts: | |
| if part_names: | |
| print(f"Discovered {len(part_names)} parts:", part_names) | |
| else: | |
| print("generate_parts was enabled, but no part names could be discovered from the task payload.") | |
| # This task id is the full-model branch used for full exports and rigging. | |
| full_mesh_task_id = base_task.task_id | |
| full_texture_task_id = base_task.task_id | |
| # 2) Optional explicit whole-model texture pass | |
| if args.retexture: | |
| texture_task_id = await client.texture_model( | |
| original_model_task_id=full_mesh_task_id, | |
| texture=True, | |
| pbr=True, | |
| texture_quality=args.texture_quality, | |
| compress=args.compress, | |
| bake=True, | |
| text_prompt=args.texture_prompt, | |
| image_prompt=args.texture_image_prompt, | |
| style_image=args.texture_style_image, | |
| ) | |
| texture_task = await wait_success(client, texture_task_id, "whole-model texture") | |
| await save_task_metadata(texture_task, out_dir, "02_textured_full") | |
| full_texture_files = await download_and_rename_models(client, texture_task, out_dir, "02_textured_full") | |
| print("Whole textured files:", full_texture_files) | |
| full_texture_task_id = texture_task.task_id | |
| # 3) Optional per-part texture passes | |
| # This creates per-part textured assets with __001, __002... naming. | |
| # By default it exports the textured part as GLTF so the texture survives cleanly. | |
| if args.generate_parts and part_names and args.export_part_textures: | |
| part_texture_manifest = [] | |
| for idx, part_name in enumerate(part_names, start=1): | |
| stem = f"03_textured_part__{idx:03d}__{sanitize_name(part_name)}" | |
| part_tex_task_id = await client.texture_model( | |
| original_model_task_id=full_mesh_task_id, | |
| texture=True, | |
| pbr=True, | |
| texture_quality=args.texture_quality, | |
| texture_alignment=args.texture_alignment, | |
| part_names=[part_name], | |
| compress=args.compress, | |
| bake=True, | |
| text_prompt=args.texture_prompt, | |
| image_prompt=args.texture_image_prompt, | |
| style_image=args.texture_style_image, | |
| ) | |
| part_tex_task = await wait_success(client, part_tex_task_id, f"texture part {part_name}") | |
| await save_task_metadata(part_tex_task, out_dir, stem) | |
| files = await download_and_rename_models(client, part_tex_task, out_dir, stem) | |
| # optional extra converted per-part textured delivery, e.g. GLTF | |
| converted = {} | |
| if args.part_texture_export_format: | |
| converted = await export_one_format( | |
| client=client, | |
| source_task_id=part_tex_task.task_id, | |
| fmt=args.part_texture_export_format, | |
| out_dir=out_dir, | |
| stem=f"{stem}__export", | |
| part_names=None, | |
| face_limit=args.export_face_limit, | |
| flatten_bottom=args.flatten_bottom, | |
| flatten_bottom_threshold=args.flatten_bottom_threshold, | |
| pivot_to_center_bottom=args.pivot_to_center_bottom, | |
| pack_uv=args.pack_uv, | |
| texture_format=args.texture_file_format, | |
| with_animation=False, | |
| ) | |
| part_texture_manifest.append({ | |
| "index": idx, | |
| "part_name": part_name, | |
| "task_id": part_tex_task.task_id, | |
| "downloaded_files": files, | |
| "converted_files": converted, | |
| }) | |
| (out_dir / "03_part_textures_manifest.json").write_text( | |
| json.dumps(part_texture_manifest, indent=2, ensure_ascii=False), | |
| encoding="utf-8", | |
| ) | |
| # 4) Optional rigging only for whole model | |
| rig_task = None | |
| if args.rig: | |
| check_task_id = await client.check_riggable(full_texture_task_id) | |
| check_task = await wait_success(client, check_task_id, "riggability check") | |
| await save_task_metadata(check_task, out_dir, "04_rig_check") | |
| riggable = bool(getattr(getattr(check_task, "output", None), "riggable", False)) | |
| if not riggable: | |
| raise RuntimeError("Model is not riggable according to Tripo rig check") | |
| rig_task_id = await client.rig_model( | |
| original_model_task_id=full_texture_task_id, | |
| out_format=args.rig_format, | |
| rig_type=normalize_rig_type(args.rig_type), | |
| spec=normalize_rig_spec(args.rig_spec), | |
| ) | |
| rig_task = await wait_success(client, rig_task_id, "rigging") | |
| await save_task_metadata(rig_task, out_dir, "05_rigged_full") | |
| rig_files = await download_and_rename_models(client, rig_task, out_dir, "05_rigged_full") | |
| print("Rigged full-model files:", rig_files) | |
| # 5) Combined printable exports from the whole model branch | |
| combined_export_source = full_texture_task_id | |
| if args.export_stl: | |
| files = await export_one_format( | |
| client=client, | |
| source_task_id=combined_export_source, | |
| fmt="STL", | |
| out_dir=out_dir, | |
| stem="06_export_full_stl", | |
| face_limit=args.export_face_limit, | |
| flatten_bottom=args.flatten_bottom, | |
| flatten_bottom_threshold=args.flatten_bottom_threshold, | |
| pivot_to_center_bottom=args.pivot_to_center_bottom, | |
| pack_uv=args.pack_uv, | |
| texture_format=args.texture_file_format, | |
| with_animation=False, | |
| ) | |
| print("Combined STL:", files) | |
| if args.export_3mf: | |
| files = await export_one_format( | |
| client=client, | |
| source_task_id=combined_export_source, | |
| fmt="3MF", | |
| out_dir=out_dir, | |
| stem="07_export_full_3mf", | |
| face_limit=args.export_face_limit, | |
| flatten_bottom=args.flatten_bottom, | |
| flatten_bottom_threshold=args.flatten_bottom_threshold, | |
| pivot_to_center_bottom=args.pivot_to_center_bottom, | |
| pack_uv=args.pack_uv, | |
| texture_format=args.texture_file_format, | |
| with_animation=False, | |
| ) | |
| print("Combined 3MF:", files) | |
| # 6) Per-part printable exports with __001, __002... | |
| if args.generate_parts and part_names: | |
| if args.export_stl: | |
| stl_manifest = await export_parts_for_format( | |
| client=client, | |
| source_task_id=combined_export_source, | |
| fmt="STL", | |
| out_dir=out_dir, | |
| base_stem="08_export_part_stl", | |
| part_names=part_names, | |
| face_limit=args.export_face_limit, | |
| flatten_bottom=args.flatten_bottom, | |
| flatten_bottom_threshold=args.flatten_bottom_threshold, | |
| pivot_to_center_bottom=args.pivot_to_center_bottom, | |
| pack_uv=args.pack_uv, | |
| texture_format=args.texture_file_format, | |
| ) | |
| (out_dir / "08_export_part_stl_manifest.json").write_text( | |
| json.dumps(stl_manifest, indent=2, ensure_ascii=False), | |
| encoding="utf-8", | |
| ) | |
| if args.export_3mf: | |
| mf_manifest = await export_parts_for_format( | |
| client=client, | |
| source_task_id=combined_export_source, | |
| fmt="3MF", | |
| out_dir=out_dir, | |
| base_stem="09_export_part_3mf", | |
| part_names=part_names, | |
| face_limit=args.export_face_limit, | |
| flatten_bottom=args.flatten_bottom, | |
| flatten_bottom_threshold=args.flatten_bottom_threshold, | |
| pivot_to_center_bottom=args.pivot_to_center_bottom, | |
| pack_uv=args.pack_uv, | |
| texture_format=args.texture_file_format, | |
| ) | |
| (out_dir / "09_export_part_3mf_manifest.json").write_text( | |
| json.dumps(mf_manifest, indent=2, ensure_ascii=False), | |
| encoding="utf-8", | |
| ) | |
| def build_parser(): | |
| p = argparse.ArgumentParser() | |
| p.add_argument("--api-key", default=os.environ.get("TRIPO_API_KEY")) | |
| p.add_argument("--mode", choices=["text", "image"], required=True) | |
| p.add_argument("--prompt") | |
| p.add_argument("--negative-prompt", default=None) | |
| p.add_argument("--image") | |
| p.add_argument("--out", required=True) | |
| p.add_argument("--texture", action="store_true", help="Generate texture during initial generation") | |
| p.add_argument("--retexture", action="store_true", help="Run explicit whole-model texture step after generation") | |
| p.add_argument("--texture-prompt", default=None) | |
| p.add_argument("--texture-image-prompt", default=None) | |
| p.add_argument("--texture-style-image", default=None) | |
| p.add_argument("--texture-quality", choices=["standard", "detailed"], default="standard") | |
| p.add_argument("--texture-alignment", choices=["original_image", "geometry"], default="original_image") | |
| p.add_argument("--texture-file-format", default="JPEG", | |
| choices=["BMP", "DPX", "HDR", "JPEG", "OPEN_EXR", "PNG", "TARGA", "TIFF", "WEBP"]) | |
| p.add_argument("--face-limit", type=int, default=None) | |
| p.add_argument("--export-face-limit", type=int, default=10000) | |
| p.add_argument("--compress", action="store_true") | |
| p.add_argument("--quad", action="store_true") | |
| p.add_argument("--auto-size", action="store_true") | |
| p.add_argument("--smart-low-poly", action="store_true") | |
| p.add_argument("--orientation", default="default") | |
| p.add_argument("--generate-parts", action="store_true") | |
| p.add_argument("--export-part-textures", action="store_true", | |
| help="For each discovered part, generate a separate textured asset") | |
| p.add_argument("--part-texture-export-format", default="GLTF", | |
| choices=["GLTF", "FBX", "OBJ", "USDZ"], | |
| help="Optional converted output for each textured part") | |
| p.add_argument("--rig", action="store_true") | |
| p.add_argument("--rig-type", default=None, | |
| choices=["biped", "quadruped", "hexapod", "octopod", "avian", "serpentine", "aquatic", "others"]) | |
| p.add_argument("--rig-spec", default="tripo", choices=["tripo", "mixamo"]) | |
| p.add_argument("--rig-format", default="fbx", choices=["fbx", "glb"]) | |
| p.add_argument("--export-stl", action="store_true") | |
| p.add_argument("--export-3mf", action="store_true") | |
| p.add_argument("--flatten-bottom", action="store_true") | |
| p.add_argument("--flatten-bottom-threshold", type=float, default=0.01) | |
| p.add_argument("--pivot-to-center-bottom", action="store_true") | |
| p.add_argument("--pack-uv", action="store_true") | |
| return p | |
| def main(): | |
| parser = build_parser() | |
| args = parser.parse_args() | |
| if not args.api_key: | |
| parser.error("Missing API key. Set --api-key or TRIPO_API_KEY") | |
| asyncio.run(pipeline(args)) | |
| if __name__ == "__main__": | |
| main() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment