Spaces:
Paused
Paused
Commit
·
08b23ce
0
Parent(s):
reset: clean history (purge leaked token)
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +36 -0
- .gitignore +12 -0
- Dockerfile +85 -0
- README.md +12 -0
- app.py +344 -0
- app_backup.py +52 -0
- app_backup_encoding.py +52 -0
- requirements.txt +27 -0
- samples/demo.mp4 +3 -0
- test_puppeteer_api_v2.ps1 +57 -0
- third_party/Puppeteer/.gitmodules +15 -0
- third_party/Puppeteer/LICENSE +201 -0
- third_party/Puppeteer/README.md +105 -0
- third_party/Puppeteer/animation/README.md +76 -0
- third_party/Puppeteer/animation/demo.sh +7 -0
- third_party/Puppeteer/animation/download.py +13 -0
- third_party/Puppeteer/animation/model.py +199 -0
- third_party/Puppeteer/animation/optimization.py +626 -0
- third_party/Puppeteer/animation/renderer.py +348 -0
- third_party/Puppeteer/animation/utils/cameras/back.json +1 -0
- third_party/Puppeteer/animation/utils/cameras/back_left.json +64 -0
- third_party/Puppeteer/animation/utils/cameras/back_right.json +64 -0
- third_party/Puppeteer/animation/utils/cameras/front.json +1 -0
- third_party/Puppeteer/animation/utils/cameras/front_left.json +64 -0
- third_party/Puppeteer/animation/utils/cameras/front_right.json +64 -0
- third_party/Puppeteer/animation/utils/cameras/left.json +1 -0
- third_party/Puppeteer/animation/utils/cameras/right.json +1 -0
- third_party/Puppeteer/animation/utils/data_loader.py +170 -0
- third_party/Puppeteer/animation/utils/loss_utils.py +420 -0
- third_party/Puppeteer/animation/utils/misc.py +34 -0
- third_party/Puppeteer/animation/utils/quat_utils.py +179 -0
- third_party/Puppeteer/animation/utils/render_first_frame.py +93 -0
- third_party/Puppeteer/animation/utils/save_flow.py +297 -0
- third_party/Puppeteer/animation/utils/save_utils.py +374 -0
- third_party/Puppeteer/checkpoints/rig.ckpt +3 -0
- third_party/Puppeteer/demo_animation.sh +63 -0
- third_party/Puppeteer/demo_rigging.sh +117 -0
- third_party/Puppeteer/requirements.txt +29 -0
- third_party/Puppeteer/skeleton/README.md +93 -0
- third_party/Puppeteer/skeleton/data_utils/README.md +43 -0
- third_party/Puppeteer/skeleton/data_utils/convert_npz_to_mesh_rig.py +107 -0
- third_party/Puppeteer/skeleton/data_utils/data_loader.py +122 -0
- third_party/Puppeteer/skeleton/data_utils/pyrender_wrapper.py +144 -0
- third_party/Puppeteer/skeleton/data_utils/read_npz.py +43 -0
- third_party/Puppeteer/skeleton/data_utils/read_rig_mesh_from_glb.py +198 -0
- third_party/Puppeteer/skeleton/data_utils/render_data.py +61 -0
- third_party/Puppeteer/skeleton/data_utils/save_npz.py +256 -0
- third_party/Puppeteer/skeleton/demo.py +219 -0
- third_party/Puppeteer/skeleton/demo.sh +19 -0
- third_party/Puppeteer/skeleton/download.py +25 -0
.gitattributes
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# local test scripts
|
| 3 |
+
*test_puppeteer_api*.ps1
|
| 4 |
+
logs/
|
| 5 |
+
results/
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
# local test scripts
|
| 9 |
+
*test_puppeteer_api*.ps1
|
| 10 |
+
logs/
|
| 11 |
+
results/
|
| 12 |
+
|
Dockerfile
ADDED
|
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ============================================================
|
| 2 |
+
# Puppeteer GPU API Dockerfile (Final, CUDA 11.8 + A10G Ready)
|
| 3 |
+
# ============================================================
|
| 4 |
+
|
| 5 |
+
FROM nvidia/cuda:11.8.0-cudnn8-runtime-ubuntu22.04
|
| 6 |
+
|
| 7 |
+
# ------------------------------------------------------------
|
| 8 |
+
# OS Dependencies
|
| 9 |
+
# ------------------------------------------------------------
|
| 10 |
+
RUN apt-get update && apt-get install -y \
|
| 11 |
+
python3 python3-pip python3-venv \
|
| 12 |
+
git wget curl unzip ffmpeg bash \
|
| 13 |
+
libgl1 libglib2.0-0 \
|
| 14 |
+
&& rm -rf /var/lib/apt/lists/* \
|
| 15 |
+
&& ln -sf /usr/bin/python3 /usr/bin/python
|
| 16 |
+
|
| 17 |
+
# ------------------------------------------------------------
|
| 18 |
+
# Environment Variables
|
| 19 |
+
# ------------------------------------------------------------
|
| 20 |
+
ENV PIP_NO_CACHE_DIR=1 \
|
| 21 |
+
PYTHONUNBUFFERED=1 \
|
| 22 |
+
# OMP 경고 제거 및 단일 스레드 고정 (libgomp 에러 회피)
|
| 23 |
+
OMP_NUM_THREADS=1 \
|
| 24 |
+
MKL_THREADING_LAYER=SEQUENTIAL \
|
| 25 |
+
# 입출력 경로
|
| 26 |
+
TMP_IN_DIR=/data/in \
|
| 27 |
+
RESULT_DIR=/data/results
|
| 28 |
+
|
| 29 |
+
RUN python -m pip install --upgrade pip
|
| 30 |
+
|
| 31 |
+
# ------------------------------------------------------------
|
| 32 |
+
# Build Cache Busting (optional, force rebuild)
|
| 33 |
+
# ------------------------------------------------------------
|
| 34 |
+
ARG CACHE_BUST=2025-11-05-01-30
|
| 35 |
+
|
| 36 |
+
# ------------------------------------------------------------
|
| 37 |
+
# Work Directory
|
| 38 |
+
# ------------------------------------------------------------
|
| 39 |
+
WORKDIR /app
|
| 40 |
+
|
| 41 |
+
# ------------------------------------------------------------
|
| 42 |
+
# Python Dependencies (Torch 제외)
|
| 43 |
+
# ------------------------------------------------------------
|
| 44 |
+
COPY requirements.txt /app/requirements.txt
|
| 45 |
+
RUN pip install --no-cache-dir -r /app/requirements.txt
|
| 46 |
+
|
| 47 |
+
# ------------------------------------------------------------
|
| 48 |
+
# CUDA용 PyTorch 설치 (cu118)
|
| 49 |
+
# ------------------------------------------------------------
|
| 50 |
+
RUN pip uninstall -y torch torchvision torchaudio || true
|
| 51 |
+
RUN pip install --no-cache-dir --index-url https://download.pytorch.org/whl/cu118 \
|
| 52 |
+
torch torchvision torchaudio
|
| 53 |
+
|
| 54 |
+
# ------------------------------------------------------------
|
| 55 |
+
# App Source
|
| 56 |
+
# ------------------------------------------------------------
|
| 57 |
+
COPY app.py /app/app.py
|
| 58 |
+
|
| 59 |
+
# ------------------------------------------------------------
|
| 60 |
+
# Puppeteer Vendor (벤더 코드)
|
| 61 |
+
# ------------------------------------------------------------
|
| 62 |
+
COPY third_party/Puppeteer /app/Puppeteer
|
| 63 |
+
RUN chmod +x /app/Puppeteer/demo_rigging.sh || true
|
| 64 |
+
|
| 65 |
+
# PYTHONPATH: app / Puppeteer / third_party
|
| 66 |
+
ENV PYTHONPATH=/app:/app/Puppeteer:/app/Puppeteer/third_party:$PYTHONPATH
|
| 67 |
+
|
| 68 |
+
# 일부 코드가 'third_partys' 를 import 하는 경우 대비
|
| 69 |
+
RUN ln -s /app/Puppeteer/third_party /app/third_partys || true \
|
| 70 |
+
&& touch /app/Puppeteer/third_party/__init__.py
|
| 71 |
+
|
| 72 |
+
# ------------------------------------------------------------
|
| 73 |
+
# Writable Paths
|
| 74 |
+
# ------------------------------------------------------------
|
| 75 |
+
RUN mkdir -p /data/in /data/results && chmod -R 777 /data
|
| 76 |
+
|
| 77 |
+
# ------------------------------------------------------------
|
| 78 |
+
# ------------------------------------------------------------
|
| 79 |
+
# Entrypoint (cd /app 보장)
|
| 80 |
+
# ------------------------------------------------------------
|
| 81 |
+
RUN printf '#!/bin/bash\nset -euo pipefail\ncd /app\npython -c "import importlib, sys; import app; print(\\"[boot] app imported OK\\")" || exit 1\nuvicorn app:app --host 0.0.0.0 --port ${PORT:-7860}\n' > /app/run.sh \
|
| 82 |
+
&& chmod +x /app/run.sh
|
| 83 |
+
|
| 84 |
+
EXPOSE 7860
|
| 85 |
+
CMD ["sh", "-c", "/app/run.sh"]
|
README.md
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Puppeteer Api
|
| 3 |
+
emoji: 🏆
|
| 4 |
+
colorFrom: yellow
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
license: mit
|
| 9 |
+
short_description: puppeteer-api
|
| 10 |
+
---
|
| 11 |
+
|
| 12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,344 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
import sys
|
| 4 |
+
import shutil
|
| 5 |
+
import subprocess
|
| 6 |
+
from pathlib import Path
|
| 7 |
+
from typing import List, Optional
|
| 8 |
+
import importlib.util
|
| 9 |
+
|
| 10 |
+
import requests
|
| 11 |
+
from fastapi import FastAPI, HTTPException
|
| 12 |
+
from fastapi.responses import FileResponse
|
| 13 |
+
from pydantic import BaseModel, HttpUrl, Field
|
| 14 |
+
|
| 15 |
+
# ----------------------------------------------------------------------------- #
|
| 16 |
+
# 🔧 환경 고정: libgomp 경고/에러 회피 (invalid OMP_NUM_THREADS)
|
| 17 |
+
# ----------------------------------------------------------------------------- #
|
| 18 |
+
# 일부 컨테이너에서 OMP_NUM_THREADS가 비어있거나 잘못 들어가면 libgomp가 에러를 냅니다.
|
| 19 |
+
# 안전하게 정수값으로 강제 세팅합니다.
|
| 20 |
+
os.environ["OMP_NUM_THREADS"] = os.environ.get("OMP_NUM_THREADS", "4")
|
| 21 |
+
if not os.environ["OMP_NUM_THREADS"].isdigit():
|
| 22 |
+
os.environ["OMP_NUM_THREADS"] = "4"
|
| 23 |
+
|
| 24 |
+
# ----------------------------------------------------------------------------- #
|
| 25 |
+
# 🔧 런타임 의존성 자동 설치 (tqdm, einops, scipy, trimesh 등)
|
| 26 |
+
# - requirements/Dockerfile에 빠진 경우를 대비해, 서버 기동 시 한 번 체크해서 설치
|
| 27 |
+
# ----------------------------------------------------------------------------- #
|
| 28 |
+
RUNTIME_DEPS = [
|
| 29 |
+
"tqdm",
|
| 30 |
+
"einops",
|
| 31 |
+
"scipy",
|
| 32 |
+
"trimesh",
|
| 33 |
+
"accelerate", # 추가
|
| 34 |
+
"timm", # 추가
|
| 35 |
+
# 아래는 여유 패키지 (에러 나면 자동 보강)
|
| 36 |
+
"networkx",
|
| 37 |
+
"scikit-image",
|
| 38 |
+
]
|
| 39 |
+
|
| 40 |
+
def _need_install(mod_name: str) -> bool:
|
| 41 |
+
return importlib.util.find_spec(mod_name) is None
|
| 42 |
+
|
| 43 |
+
def _pip_install(pkgs: List[str]) -> None:
|
| 44 |
+
if not pkgs:
|
| 45 |
+
return
|
| 46 |
+
try:
|
| 47 |
+
subprocess.check_call([sys.executable, "-m", "pip", "install", *pkgs])
|
| 48 |
+
except Exception as e:
|
| 49 |
+
print(f"[deps] pip install failed for {pkgs}: {e}")
|
| 50 |
+
|
| 51 |
+
def _ensure_runtime_deps() -> None:
|
| 52 |
+
# numpy 2.x면 scipy 등과 충돌 가능 → numpy<2로 내리는 시도
|
| 53 |
+
try:
|
| 54 |
+
import numpy as _np
|
| 55 |
+
if _np.__version__.startswith("2"):
|
| 56 |
+
print(f"[deps] numpy=={_np.__version__} detected; attempting to pin <2.0")
|
| 57 |
+
_pip_install(["numpy<2"])
|
| 58 |
+
except Exception as e:
|
| 59 |
+
print(f"[deps] numpy check failed: {e}")
|
| 60 |
+
# 필수 모듈 채우기
|
| 61 |
+
missing = [m for m in RUNTIME_DEPS if _need_install(m)]
|
| 62 |
+
if missing:
|
| 63 |
+
print(f"[deps] installing missing modules: {missing}")
|
| 64 |
+
_pip_install(missing)
|
| 65 |
+
# 최종 확인 로그
|
| 66 |
+
for m in RUNTIME_DEPS:
|
| 67 |
+
print(f"[deps] {m} -> {'OK' if not _need_install(m) else 'MISSING'}")
|
| 68 |
+
|
| 69 |
+
_ensure_runtime_deps()
|
| 70 |
+
|
| 71 |
+
# ----------------------------------------------------------------------------- #
|
| 72 |
+
# FastAPI 초기화
|
| 73 |
+
# ----------------------------------------------------------------------------- #
|
| 74 |
+
app = FastAPI(title="Puppeteer API", version="1.0.0")
|
| 75 |
+
|
| 76 |
+
# ----------------------------------------------------------------------------- #
|
| 77 |
+
# Settings
|
| 78 |
+
# ----------------------------------------------------------------------------- #
|
| 79 |
+
PUPPETEER_SRC = Path(os.environ.get("PUPPETEER_DIR", "/app/Puppeteer")) # 읽기 전용 원본
|
| 80 |
+
PUPPETEER_RUN = Path(os.environ.get("PUPPETEER_RUN", "/tmp/puppeteer_run")) # 실행용 복사본(쓰기 가능)
|
| 81 |
+
RESULT_DIR = Path(os.environ.get("RESULT_DIR", str(PUPPETEER_RUN / "results"))) # rig 결과 기본 경로
|
| 82 |
+
TMP_IN_DIR = Path(os.environ.get("TMP_IN_DIR", "/tmp/in")) # 입력 저장 경로
|
| 83 |
+
DOWNLOAD_TIMEOUT = int(os.environ.get("DOWNLOAD_TIMEOUT", "180"))
|
| 84 |
+
MAX_DOWNLOAD_MB = int(os.environ.get("MAX_DOWNLOAD_MB", "512"))
|
| 85 |
+
SAFE_NAME = re.compile(r"[^A-Za-z0-9._-]+")
|
| 86 |
+
# 애니메이션/리깅 결과를 폭넓게 찾기 위한 후보 경로
|
| 87 |
+
RESULT_BASES = [
|
| 88 |
+
Path("/app/Puppeteer/results"),
|
| 89 |
+
RESULT_DIR,
|
| 90 |
+
Path("/data/results"),
|
| 91 |
+
Path("/tmp/puppeteer_run/results"),
|
| 92 |
+
]
|
| 93 |
+
|
| 94 |
+
# ----------------------------------------------------------------------------- #
|
| 95 |
+
# Auto-download checkpoints (런타임 시 자동 다운로드)
|
| 96 |
+
# ----------------------------------------------------------------------------- #
|
| 97 |
+
ckpt_path = Path("/app/Puppeteer/checkpoints")
|
| 98 |
+
if not ckpt_path.exists() or not any(ckpt_path.iterdir()):
|
| 99 |
+
try:
|
| 100 |
+
print("[init] checkpoints missing — trying runtime download via script...")
|
| 101 |
+
subprocess.run(
|
| 102 |
+
["bash", "-lc", "cd /app/Puppeteer && ./scripts/download_ckpt.sh"],
|
| 103 |
+
check=True,
|
| 104 |
+
stdout=subprocess.PIPE,
|
| 105 |
+
stderr=subprocess.STDOUT,
|
| 106 |
+
text=True,
|
| 107 |
+
)
|
| 108 |
+
print("[init] Puppeteer checkpoints downloaded successfully via script.")
|
| 109 |
+
except Exception as e:
|
| 110 |
+
print("[init] WARNING: checkpoint script failed:", e)
|
| 111 |
+
try:
|
| 112 |
+
ckpt_path.mkdir(parents=True, exist_ok=True)
|
| 113 |
+
print("[init] trying manual download from GitHub release...")
|
| 114 |
+
subprocess.run(
|
| 115 |
+
[
|
| 116 |
+
"wget",
|
| 117 |
+
"-O",
|
| 118 |
+
"/app/Puppeteer/checkpoints/rig.ckpt",
|
| 119 |
+
"https://github.com/ByteDance-Seed/Puppeteer/releases/download/v1.0.0/rig.ckpt",
|
| 120 |
+
],
|
| 121 |
+
check=True,
|
| 122 |
+
)
|
| 123 |
+
print("[init] rig.ckpt downloaded manually.")
|
| 124 |
+
except Exception as e2:
|
| 125 |
+
print("[init] WARNING: manual checkpoint download failed:", e2)
|
| 126 |
+
|
| 127 |
+
# ----------------------------------------------------------------------------- #
|
| 128 |
+
# Schemas
|
| 129 |
+
# ----------------------------------------------------------------------------- #
|
| 130 |
+
class RigIn(BaseModel):
|
| 131 |
+
mesh_url: HttpUrl = Field(..., description="Input mesh URL (obj/glb/fbx/…)")
|
| 132 |
+
workdir: Optional[str] = Field(default=None, description="Optional work directory name")
|
| 133 |
+
|
| 134 |
+
class RigOut(BaseModel):
|
| 135 |
+
status: str
|
| 136 |
+
result_dir: Optional[str] = None
|
| 137 |
+
files_preview: Optional[List[str]] = None
|
| 138 |
+
detail: Optional[str] = None
|
| 139 |
+
gpu: Optional[bool] = None
|
| 140 |
+
gpu_name: Optional[str] = None
|
| 141 |
+
|
| 142 |
+
class AnimateIn(BaseModel):
|
| 143 |
+
video_url: HttpUrl = Field(..., description="Input video URL (mp4, mov, etc.)")
|
| 144 |
+
mesh_path: Optional[str] = Field(
|
| 145 |
+
default="/app/Puppeteer/results/rigged.glb",
|
| 146 |
+
description="Path to rigged mesh"
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
# ----------------------------------------------------------------------------- #
|
| 150 |
+
# Utils
|
| 151 |
+
# ----------------------------------------------------------------------------- #
|
| 152 |
+
def ensure_dirs() -> None:
|
| 153 |
+
TMP_IN_DIR.mkdir(parents=True, exist_ok=True)
|
| 154 |
+
PUPPETEER_RUN.mkdir(parents=True, exist_ok=True)
|
| 155 |
+
RESULT_DIR.mkdir(parents=True, exist_ok=True)
|
| 156 |
+
|
| 157 |
+
def prepare_run_tree() -> None:
|
| 158 |
+
if not PUPPETEER_SRC.exists():
|
| 159 |
+
raise HTTPException(status_code=500, detail=f"Puppeteer not found: {PUPPETEER_SRC}")
|
| 160 |
+
shutil.copytree(PUPPETEER_SRC, PUPPETEER_RUN, dirs_exist_ok=True)
|
| 161 |
+
script = PUPPETEER_RUN / "demo_rigging.sh"
|
| 162 |
+
if script.exists():
|
| 163 |
+
script.chmod(0o755)
|
| 164 |
+
|
| 165 |
+
def safe_basename(url: str) -> str:
|
| 166 |
+
name = os.path.basename(url.split("?")[0])
|
| 167 |
+
return SAFE_NAME.sub("_", name) or "input_mesh"
|
| 168 |
+
|
| 169 |
+
def download_with_limit(url: str, dst: Path) -> None:
|
| 170 |
+
with requests.get(url, stream=True, timeout=DOWNLOAD_TIMEOUT) as r:
|
| 171 |
+
r.raise_for_status()
|
| 172 |
+
total = 0
|
| 173 |
+
with open(dst, "wb") as f:
|
| 174 |
+
for chunk in r.iter_content(chunk_size=1024 * 1024):
|
| 175 |
+
if not chunk:
|
| 176 |
+
continue
|
| 177 |
+
total += len(chunk)
|
| 178 |
+
if total > MAX_DOWNLOAD_MB * 1024 * 1024:
|
| 179 |
+
raise HTTPException(status_code=413, detail="File too large")
|
| 180 |
+
f.write(chunk)
|
| 181 |
+
|
| 182 |
+
def torch_info() -> tuple[bool, Optional[str]]:
|
| 183 |
+
try:
|
| 184 |
+
import torch
|
| 185 |
+
ok = torch.cuda.is_available()
|
| 186 |
+
name = torch.cuda.get_device_name(0) if ok else None
|
| 187 |
+
return ok, name
|
| 188 |
+
except Exception:
|
| 189 |
+
return False, None
|
| 190 |
+
|
| 191 |
+
def scan_results(limit: int = 200) -> List[str]:
|
| 192 |
+
files: List[str] = []
|
| 193 |
+
exts = ("*.glb", "*.mp4", "*.fbx", "*.obj", "*.gltf", "*.png", "*.jpg", "*.json", "*.txt")
|
| 194 |
+
for base in RESULT_BASES:
|
| 195 |
+
if base.exists():
|
| 196 |
+
for ext in exts:
|
| 197 |
+
for p in base.rglob(ext):
|
| 198 |
+
if p.is_file():
|
| 199 |
+
files.append(str(p))
|
| 200 |
+
if len(files) >= limit:
|
| 201 |
+
return files
|
| 202 |
+
return files
|
| 203 |
+
|
| 204 |
+
# ----------------------------------------------------------------------------- #
|
| 205 |
+
# Routes
|
| 206 |
+
# ----------------------------------------------------------------------------- #
|
| 207 |
+
@app.get("/")
|
| 208 |
+
def root():
|
| 209 |
+
return {"status": "ready", "service": "puppeteer-api"}
|
| 210 |
+
|
| 211 |
+
@app.get("/health")
|
| 212 |
+
def health():
|
| 213 |
+
gpu, name = torch_info()
|
| 214 |
+
return {"status": "ok", "cuda": gpu, "gpu": name}
|
| 215 |
+
|
| 216 |
+
@app.post("/rig", response_model=RigOut)
|
| 217 |
+
def rig(inp: RigIn):
|
| 218 |
+
ensure_dirs()
|
| 219 |
+
prepare_run_tree()
|
| 220 |
+
|
| 221 |
+
basename = safe_basename(str(inp.mesh_url))
|
| 222 |
+
mesh_path = TMP_IN_DIR / basename
|
| 223 |
+
_ = SAFE_NAME.sub("_", inp.workdir or "job") # reserved, 현재는 미사용
|
| 224 |
+
|
| 225 |
+
try:
|
| 226 |
+
download_with_limit(str(inp.mesh_url), mesh_path)
|
| 227 |
+
except Exception as e:
|
| 228 |
+
raise HTTPException(status_code=400, detail=f"Download error: {e}")
|
| 229 |
+
|
| 230 |
+
script = PUPPETEER_RUN / "demo_rigging.sh"
|
| 231 |
+
cmd = ["bash", str(script), str(mesh_path)]
|
| 232 |
+
try:
|
| 233 |
+
proc = subprocess.run(
|
| 234 |
+
cmd,
|
| 235 |
+
cwd=str(PUPPETEER_RUN),
|
| 236 |
+
check=True,
|
| 237 |
+
stdout=subprocess.PIPE,
|
| 238 |
+
stderr=subprocess.STDOUT,
|
| 239 |
+
text=True,
|
| 240 |
+
)
|
| 241 |
+
run_log = proc.stdout[-4000:]
|
| 242 |
+
except subprocess.CalledProcessError as e:
|
| 243 |
+
snippet = (e.stdout or "")[-2000:]
|
| 244 |
+
raise HTTPException(status_code=500, detail=f"Puppeteer failed: {snippet}")
|
| 245 |
+
except FileNotFoundError:
|
| 246 |
+
raise HTTPException(status_code=500, detail="demo_rigging.sh not found")
|
| 247 |
+
|
| 248 |
+
preview = scan_results(limit=20)
|
| 249 |
+
gpu, gpu_name = torch_info()
|
| 250 |
+
return RigOut(
|
| 251 |
+
status="ok",
|
| 252 |
+
result_dir=str(RESULT_DIR),
|
| 253 |
+
files_preview=preview[:10],
|
| 254 |
+
detail=run_log if preview else "no result files found",
|
| 255 |
+
gpu=gpu,
|
| 256 |
+
gpu_name=gpu_name,
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
@app.post("/animate")
|
| 260 |
+
def animate(inp: AnimateIn):
|
| 261 |
+
"""
|
| 262 |
+
Puppeteer의 demo_animation.sh 실행 (영상 기반 애니메이션)
|
| 263 |
+
입력: video_url (mp4), mesh_path (rigged.glb 기본값)
|
| 264 |
+
"""
|
| 265 |
+
pdir = Path("/app/Puppeteer")
|
| 266 |
+
script = pdir / "demo_animation.sh"
|
| 267 |
+
video_path = Path("/tmp/video.mp4")
|
| 268 |
+
|
| 269 |
+
if not script.exists():
|
| 270 |
+
raise HTTPException(status_code=404, detail="demo_animation.sh not found")
|
| 271 |
+
|
| 272 |
+
# -------- requests 기반 영상 다운로드 -------- #
|
| 273 |
+
try:
|
| 274 |
+
print(f"[animate] downloading video from {inp.video_url}")
|
| 275 |
+
with requests.get(str(inp.video_url), stream=True, timeout=60) as r:
|
| 276 |
+
r.raise_for_status()
|
| 277 |
+
with open(video_path, "wb") as f:
|
| 278 |
+
for chunk in r.iter_content(chunk_size=8192):
|
| 279 |
+
if chunk:
|
| 280 |
+
f.write(chunk)
|
| 281 |
+
print(f"[animate] Video saved to {video_path}")
|
| 282 |
+
except Exception as e:
|
| 283 |
+
raise HTTPException(status_code=400, detail=f"Video download failed via requests: {e}")
|
| 284 |
+
|
| 285 |
+
# -------- Puppeteer 애니메이션 실행 -------- #
|
| 286 |
+
cmd = [
|
| 287 |
+
"bash", str(script),
|
| 288 |
+
"--mesh", str(inp.mesh_path),
|
| 289 |
+
"--video", str(video_path),
|
| 290 |
+
]
|
| 291 |
+
try:
|
| 292 |
+
proc = subprocess.run(
|
| 293 |
+
cmd,
|
| 294 |
+
cwd=str(pdir),
|
| 295 |
+
check=True,
|
| 296 |
+
stdout=subprocess.PIPE,
|
| 297 |
+
stderr=subprocess.STDOUT,
|
| 298 |
+
text=True,
|
| 299 |
+
)
|
| 300 |
+
output = proc.stdout[-4000:]
|
| 301 |
+
except subprocess.CalledProcessError as e:
|
| 302 |
+
raise HTTPException(status_code=500, detail=f"Animation failed: {e.stdout[-2000:]}")
|
| 303 |
+
except Exception as e:
|
| 304 |
+
raise HTTPException(status_code=500, detail=f"Unexpected error: {e}")
|
| 305 |
+
|
| 306 |
+
anim_results = scan_results(limit=20)
|
| 307 |
+
return {
|
| 308 |
+
"status": "ok",
|
| 309 |
+
"video_used": str(inp.video_url),
|
| 310 |
+
"detail": output,
|
| 311 |
+
"files_preview": anim_results[:10],
|
| 312 |
+
}
|
| 313 |
+
|
| 314 |
+
# -------- 결과 파일 확인/다운로드 유틸 -------- #
|
| 315 |
+
@app.get("/list")
|
| 316 |
+
def list_results():
|
| 317 |
+
files = scan_results(limit=500)
|
| 318 |
+
return {"count": len(files), "files": files}
|
| 319 |
+
|
| 320 |
+
@app.get("/download")
|
| 321 |
+
def download(path: str):
|
| 322 |
+
p = Path(path).resolve()
|
| 323 |
+
# 안전한 경로만 허용
|
| 324 |
+
if not any(str(p).startswith(str(b.resolve())) for b in RESULT_BASES):
|
| 325 |
+
raise HTTPException(status_code=400, detail="invalid path")
|
| 326 |
+
if not p.exists() or not p.is_file():
|
| 327 |
+
raise HTTPException(status_code=404, detail="file not found")
|
| 328 |
+
return FileResponse(str(p), filename=p.name)
|
| 329 |
+
|
| 330 |
+
@app.get("/debug")
|
| 331 |
+
def debug():
|
| 332 |
+
pdir = Path("/app/Puppeteer")
|
| 333 |
+
script = pdir / "demo_rigging.sh"
|
| 334 |
+
ckpt_dir = pdir / "checkpoints"
|
| 335 |
+
req_file = pdir / "requirements.txt"
|
| 336 |
+
return {
|
| 337 |
+
"script_exists": script.exists(),
|
| 338 |
+
"ckpt_dir_exists": ckpt_dir.exists(),
|
| 339 |
+
"req_exists": req_file.exists(),
|
| 340 |
+
"ckpt_samples": [str(p) for p in ckpt_dir.glob("**/*")][:15],
|
| 341 |
+
"tmp_in": os.environ.get("TMP_IN_DIR", "/data/in"),
|
| 342 |
+
"result_dir": os.environ.get("RESULT_DIR", "/data/results"),
|
| 343 |
+
"omp_num_threads": os.environ.get("OMP_NUM_THREADS"),
|
| 344 |
+
}
|
app_backup.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os, subprocess, requests
|
| 2 |
+
from fastapi import FastAPI
|
| 3 |
+
from pydantic import BaseModel
|
| 4 |
+
|
| 5 |
+
app = FastAPI()
|
| 6 |
+
|
| 7 |
+
class RigIn(BaseModel):
|
| 8 |
+
mesh_url: str # 입력 파일 URL (obj, glb, fbx 등)
|
| 9 |
+
|
| 10 |
+
@app.get("/")
|
| 11 |
+
def root():
|
| 12 |
+
return {"message": "Puppeteer API (GPU) ready"}
|
| 13 |
+
|
| 14 |
+
@app.get("/health")
|
| 15 |
+
def health():
|
| 16 |
+
try:
|
| 17 |
+
import torch
|
| 18 |
+
gpu = torch.cuda.is_available()
|
| 19 |
+
name = torch.cuda.get_device_name(0) if gpu else None
|
| 20 |
+
return {"status": "ok", "cuda": gpu, "gpu": name}
|
| 21 |
+
except Exception as e:
|
| 22 |
+
return {"status": "ok", "cuda": False, "detail": str(e)}
|
| 23 |
+
|
| 24 |
+
@app.post("/rig")
|
| 25 |
+
def rig(inp: RigIn):
|
| 26 |
+
os.makedirs("/tmp/in", exist_ok=True)
|
| 27 |
+
mesh_path = os.path.join("/tmp/in", os.path.basename(inp.mesh_url))
|
| 28 |
+
|
| 29 |
+
# 1️⃣ 입력 파일 다운로드
|
| 30 |
+
with requests.get(inp.mesh_url, stream=True) as r:
|
| 31 |
+
r.raise_for_status()
|
| 32 |
+
with open(mesh_path, "wb") as f:
|
| 33 |
+
for chunk in r.iter_content(chunk_size=8192):
|
| 34 |
+
if chunk:
|
| 35 |
+
f.write(chunk)
|
| 36 |
+
|
| 37 |
+
# 2️⃣ Puppeteer 실행
|
| 38 |
+
workdir = "/app/Puppeteer"
|
| 39 |
+
cmd = ["bash", "demo_rigging.sh", mesh_path]
|
| 40 |
+
try:
|
| 41 |
+
subprocess.run(cmd, cwd=workdir, check=True)
|
| 42 |
+
except subprocess.CalledProcessError as e:
|
| 43 |
+
return {"status": "error", "detail": str(e)}
|
| 44 |
+
|
| 45 |
+
# 3️⃣ 결과 목록 반환
|
| 46 |
+
result_dir = os.path.join(workdir, "results")
|
| 47 |
+
files = []
|
| 48 |
+
for rootdir, _, filenames in os.walk(result_dir):
|
| 49 |
+
for fn in filenames:
|
| 50 |
+
files.append(os.path.join(rootdir, fn))
|
| 51 |
+
if len(files) >= 20: break
|
| 52 |
+
return {"status": "ok", "result_dir": result_dir, "files_preview": files[:10]}
|
app_backup_encoding.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os, subprocess, requests
|
| 2 |
+
from fastapi import FastAPI
|
| 3 |
+
from pydantic import BaseModel
|
| 4 |
+
|
| 5 |
+
app = FastAPI()
|
| 6 |
+
|
| 7 |
+
class RigIn(BaseModel):
|
| 8 |
+
혻 혻 mesh_url: str혻 # ?낅젰 ?뚯씪 URL (obj, glb, fbx ??
|
| 9 |
+
|
| 10 |
+
@app.get("/")
|
| 11 |
+
def root():
|
| 12 |
+
혻 혻 return {"message": "Puppeteer API (GPU) ready"}
|
| 13 |
+
|
| 14 |
+
@app.get("/health")
|
| 15 |
+
def health():
|
| 16 |
+
혻 혻 try:
|
| 17 |
+
혻 혻 혻 혻 import torch
|
| 18 |
+
혻 혻 혻 혻 gpu = torch.cuda.is_available()
|
| 19 |
+
혻 혻 혻 혻 name = torch.cuda.get_device_name(0) if gpu else None
|
| 20 |
+
혻 혻 혻 혻 return {"status": "ok", "cuda": gpu, "gpu": name}
|
| 21 |
+
혻 혻 except Exception as e:
|
| 22 |
+
혻 혻 혻 혻 return {"status": "ok", "cuda": False, "detail": str(e)}
|
| 23 |
+
|
| 24 |
+
@app.post("/rig")
|
| 25 |
+
def rig(inp: RigIn):
|
| 26 |
+
혻 혻 os.makedirs("/tmp/in", exist_ok=True)
|
| 27 |
+
혻 혻 mesh_path = os.path.join("/tmp/in", os.path.basename(inp.mesh_url))
|
| 28 |
+
|
| 29 |
+
혻 혻 # 1截뤴깵 ?낅젰 ?뚯씪 ?ㅼ슫濡쒕뱶
|
| 30 |
+
혻 혻 with requests.get(inp.mesh_url, stream=True) as r:
|
| 31 |
+
혻 혻 혻 혻 r.raise_for_status()
|
| 32 |
+
혻 혻 혻 혻 with open(mesh_path, "wb") as f:
|
| 33 |
+
혻 혻 혻 혻 혻 혻 for chunk in r.iter_content(chunk_size=8192):
|
| 34 |
+
혻 혻 혻 혻 혻 혻 혻 혻 if chunk:
|
| 35 |
+
혻 혻 혻 혻 혻 혻 혻 혻 혻 혻 f.write(chunk)
|
| 36 |
+
|
| 37 |
+
혻 혻 # 2截뤴깵 Puppeteer ?ㅽ뻾
|
| 38 |
+
혻 혻 workdir = "/app/Puppeteer"
|
| 39 |
+
혻 혻 cmd = ["bash", "demo_rigging.sh", mesh_path]
|
| 40 |
+
혻 혻 try:
|
| 41 |
+
혻 혻 혻 혻 subprocess.run(cmd, cwd=workdir, check=True)
|
| 42 |
+
혻 혻 except subprocess.CalledProcessError as e:
|
| 43 |
+
혻 혻 혻 혻 return {"status": "error", "detail": str(e)}
|
| 44 |
+
|
| 45 |
+
혻 혻 # 3截뤴깵 寃곌낵 紐⑸줉 諛섑솚
|
| 46 |
+
혻 혻 result_dir = os.path.join(workdir, "results")
|
| 47 |
+
혻 혻 files = []
|
| 48 |
+
혻 혻 for rootdir, _, filenames in os.walk(result_dir):
|
| 49 |
+
혻 혻 혻 혻 for fn in filenames:
|
| 50 |
+
혻 혻 혻 혻 혻 혻 files.append(os.path.join(rootdir, fn))
|
| 51 |
+
혻 혻 혻 혻 혻 혻 if len(files) >= 20: break
|
| 52 |
+
혻 혻 return {"status": "ok", "result_dir": result_dir, "files_preview": files[:10]}
|
requirements.txt
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# server
|
| 2 |
+
fastapi==0.115.5
|
| 3 |
+
uvicorn[standard]==0.34.0
|
| 4 |
+
pydantic==2.9.2
|
| 5 |
+
requests==2.32.3
|
| 6 |
+
|
| 7 |
+
# numeric stack (pin to avoid ABI woes)
|
| 8 |
+
numpy<2
|
| 9 |
+
scipy==1.11.4
|
| 10 |
+
|
| 11 |
+
# geometry / images
|
| 12 |
+
trimesh==4.4.9
|
| 13 |
+
networkx==3.3
|
| 14 |
+
scikit-image==0.24.0
|
| 15 |
+
opencv-python-headless #
|
| 16 |
+
# training/runtime utils
|
| 17 |
+
tqdm==4.66.5
|
| 18 |
+
einops==0.8.0
|
| 19 |
+
accelerate==1.0.1
|
| 20 |
+
timm==1.0.9
|
| 21 |
+
|
| 22 |
+
# Hugging Face stack for skeleton step
|
| 23 |
+
transformers==4.44.2
|
| 24 |
+
tokenizers>=0.14.0
|
| 25 |
+
safetensors>=0.4.2
|
| 26 |
+
huggingface-hub>=0.23.0
|
| 27 |
+
|
samples/demo.mp4
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5e66e01296a4984841baaf0b9542aed07a5d5eb84958135a8d612b9ff1ec9419
|
| 3 |
+
size 574823
|
test_puppeteer_api_v2.ps1
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
param(
|
| 2 |
+
[string]$BaseUrl = "https://seungminkwak-puppeteer-api.hf.space",
|
| 3 |
+
[string]$Token = "",
|
| 4 |
+
[string]$MeshUrl = "https://cdn.jsdelivr.net/gh/KhronosGroup/glTF-Sample-Models@master/2.0/CesiumMan/glTF-Binary/CesiumMan.glb",
|
| 5 |
+
[string]$Workdir = "job-cesium",
|
| 6 |
+
[int]$MaxTries = 12,
|
| 7 |
+
[int]$DelaySec = 10
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
$ErrorActionPreference = "Stop"
|
| 11 |
+
|
| 12 |
+
Write-Host "=== Puppeteer API quick test ==="
|
| 13 |
+
Write-Host "[1] /health"
|
| 14 |
+
$health = Invoke-RestMethod -Uri "$BaseUrl/health" -Headers @{ Authorization = "Bearer $Token" }
|
| 15 |
+
$health | ConvertTo-Json -Depth 6 | Write-Host
|
| 16 |
+
|
| 17 |
+
Write-Host "[2] /rig"
|
| 18 |
+
$body = @{ mesh_url = $MeshUrl; workdir = $Workdir } | ConvertTo-Json -Depth 6
|
| 19 |
+
$resp = Invoke-RestMethod -Uri "$BaseUrl/rig" -Headers @{ Authorization = "Bearer $Token"; "Content-Type"="application/json" } -Method POST -Body $body
|
| 20 |
+
$resp | ConvertTo-Json -Depth 6 | Write-Host
|
| 21 |
+
|
| 22 |
+
Write-Host "[3] Poll /list"
|
| 23 |
+
$files = @()
|
| 24 |
+
for ($i=1; $i -le $MaxTries; $i++) {
|
| 25 |
+
try {
|
| 26 |
+
$list = Invoke-RestMethod -Uri "$BaseUrl/list" -Headers @{ Authorization = "Bearer $Token" }
|
| 27 |
+
if ($list.files_preview) {
|
| 28 |
+
$files = $list.files_preview
|
| 29 |
+
Write-Host (" -> Found: {0}" -f ($files -join ", "))
|
| 30 |
+
break
|
| 31 |
+
} else {
|
| 32 |
+
Write-Host (" -> Try {0}/{1}: no files yet" -f $i, $MaxTries)
|
| 33 |
+
}
|
| 34 |
+
} catch {
|
| 35 |
+
Write-Host (" -> Try {0}/{1}: error {2}" -f $i, $MaxTries, $_.Exception.Message)
|
| 36 |
+
}
|
| 37 |
+
Start-Sleep -Seconds $DelaySec
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
if (-not $files -or $files.Count -eq 0) {
|
| 41 |
+
Write-Host "No result files found." -ForegroundColor Red
|
| 42 |
+
exit 2
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
# choose a file
|
| 46 |
+
$preferred = "/data/results/rigged.glb"
|
| 47 |
+
$target = if ($files -contains $preferred) { $preferred } else { $files[0] }
|
| 48 |
+
Write-Host ("[4] Download {0}" -f $target)
|
| 49 |
+
|
| 50 |
+
$enc = [uri]::EscapeDataString($target)
|
| 51 |
+
$newDir = Join-Path $PWD "results"
|
| 52 |
+
New-Item -ItemType Directory -Path $newDir -Force | Out-Null
|
| 53 |
+
$out = Join-Path $newDir (Split-Path -Leaf $target)
|
| 54 |
+
Invoke-WebRequest -Uri "$BaseUrl/download?path=$enc" -Headers @{ Authorization = "Bearer $Token" } -OutFile $out
|
| 55 |
+
Write-Host ("Saved to {0}" -f $out)
|
| 56 |
+
try { ii $out | Out-Null } catch {}
|
| 57 |
+
Write-Host "=== Done ==="
|
third_party/Puppeteer/.gitmodules
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[submodule "animation/third_partys/ptlflow"]
|
| 2 |
+
path = animation/third_partys/ptlflow
|
| 3 |
+
url = https://github.com/ChaoyueSong/ptlflow
|
| 4 |
+
[submodule "animation/third_partys/co_tracker"]
|
| 5 |
+
path = animation/third_partys/co_tracker
|
| 6 |
+
url = https://github.com/ChaoyueSong/co_tracker
|
| 7 |
+
[submodule "animation/third_partys/Video_Depth_Anything"]
|
| 8 |
+
path = animation/third_partys/Video_Depth_Anything
|
| 9 |
+
url = https://github.com/ChaoyueSong/Video_Depth_Anything
|
| 10 |
+
[submodule "skinning/third_partys/PartField"]
|
| 11 |
+
path = skinning/third_partys/PartField
|
| 12 |
+
url = https://github.com/ChaoyueSong/PartField
|
| 13 |
+
[submodule "skeleton/third_partys/Michelangelo"]
|
| 14 |
+
path = skeleton/third_partys/Michelangelo
|
| 15 |
+
url = https://github.com/ChaoyueSong/Michelangelo/
|
third_party/Puppeteer/LICENSE
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Apache License
|
| 2 |
+
Version 2.0, January 2004
|
| 3 |
+
http://www.apache.org/licenses/
|
| 4 |
+
|
| 5 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 6 |
+
|
| 7 |
+
1. Definitions.
|
| 8 |
+
|
| 9 |
+
"License" shall mean the terms and conditions for use, reproduction,
|
| 10 |
+
and distribution as defined by Sections 1 through 9 of this document.
|
| 11 |
+
|
| 12 |
+
"Licensor" shall mean the copyright owner or entity authorized by
|
| 13 |
+
the copyright owner that is granting the License.
|
| 14 |
+
|
| 15 |
+
"Legal Entity" shall mean the union of the acting entity and all
|
| 16 |
+
other entities that control, are controlled by, or are under common
|
| 17 |
+
control with that entity. For the purposes of this definition,
|
| 18 |
+
"control" means (i) the power, direct or indirect, to cause the
|
| 19 |
+
direction or management of such entity, whether by contract or
|
| 20 |
+
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
| 21 |
+
outstanding shares, or (iii) beneficial ownership of such entity.
|
| 22 |
+
|
| 23 |
+
"You" (or "Your") shall mean an individual or Legal Entity
|
| 24 |
+
exercising permissions granted by this License.
|
| 25 |
+
|
| 26 |
+
"Source" form shall mean the preferred form for making modifications,
|
| 27 |
+
including but not limited to software source code, documentation
|
| 28 |
+
source, and configuration files.
|
| 29 |
+
|
| 30 |
+
"Object" form shall mean any form resulting from mechanical
|
| 31 |
+
transformation or translation of a Source form, including but
|
| 32 |
+
not limited to compiled object code, generated documentation,
|
| 33 |
+
and conversions to other media types.
|
| 34 |
+
|
| 35 |
+
"Work" shall mean the work of authorship, whether in Source or
|
| 36 |
+
Object form, made available under the License, as indicated by a
|
| 37 |
+
copyright notice that is included in or attached to the work
|
| 38 |
+
(an example is provided in the Appendix below).
|
| 39 |
+
|
| 40 |
+
"Derivative Works" shall mean any work, whether in Source or Object
|
| 41 |
+
form, that is based on (or derived from) the Work and for which the
|
| 42 |
+
editorial revisions, annotations, elaborations, or other modifications
|
| 43 |
+
represent, as a whole, an original work of authorship. For the purposes
|
| 44 |
+
of this License, Derivative Works shall not include works that remain
|
| 45 |
+
separable from, or merely link (or bind by name) to the interfaces of,
|
| 46 |
+
the Work and Derivative Works thereof.
|
| 47 |
+
|
| 48 |
+
"Contribution" shall mean any work of authorship, including
|
| 49 |
+
the original version of the Work and any modifications or additions
|
| 50 |
+
to that Work or Derivative Works thereof, that is intentionally
|
| 51 |
+
submitted to Licensor for inclusion in the Work by the copyright owner
|
| 52 |
+
or by an individual or Legal Entity authorized to submit on behalf of
|
| 53 |
+
the copyright owner. For the purposes of this definition, "submitted"
|
| 54 |
+
means any form of electronic, verbal, or written communication sent
|
| 55 |
+
to the Licensor or its representatives, including but not limited to
|
| 56 |
+
communication on electronic mailing lists, source code control systems,
|
| 57 |
+
and issue tracking systems that are managed by, or on behalf of, the
|
| 58 |
+
Licensor for the purpose of discussing and improving the Work, but
|
| 59 |
+
excluding communication that is conspicuously marked or otherwise
|
| 60 |
+
designated in writing by the copyright owner as "Not a Contribution."
|
| 61 |
+
|
| 62 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity
|
| 63 |
+
on behalf of whom a Contribution has been received by Licensor and
|
| 64 |
+
subsequently incorporated within the Work.
|
| 65 |
+
|
| 66 |
+
2. Grant of Copyright License. Subject to the terms and conditions of
|
| 67 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 68 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 69 |
+
copyright license to reproduce, prepare Derivative Works of,
|
| 70 |
+
publicly display, publicly perform, sublicense, and distribute the
|
| 71 |
+
Work and such Derivative Works in Source or Object form.
|
| 72 |
+
|
| 73 |
+
3. Grant of Patent License. Subject to the terms and conditions of
|
| 74 |
+
this License, each Contributor hereby grants to You a perpetual,
|
| 75 |
+
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
| 76 |
+
(except as stated in this section) patent license to make, have made,
|
| 77 |
+
use, offer to sell, sell, import, and otherwise transfer the Work,
|
| 78 |
+
where such license applies only to those patent claims licensable
|
| 79 |
+
by such Contributor that are necessarily infringed by their
|
| 80 |
+
Contribution(s) alone or by combination of their Contribution(s)
|
| 81 |
+
with the Work to which such Contribution(s) was submitted. If You
|
| 82 |
+
institute patent litigation against any entity (including a
|
| 83 |
+
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
| 84 |
+
or a Contribution incorporated within the Work constitutes direct
|
| 85 |
+
or contributory patent infringement, then any patent licenses
|
| 86 |
+
granted to You under this License for that Work shall terminate
|
| 87 |
+
as of the date such litigation is filed.
|
| 88 |
+
|
| 89 |
+
4. Redistribution. You may reproduce and distribute copies of the
|
| 90 |
+
Work or Derivative Works thereof in any medium, with or without
|
| 91 |
+
modifications, and in Source or Object form, provided that You
|
| 92 |
+
meet the following conditions:
|
| 93 |
+
|
| 94 |
+
(a) You must give any other recipients of the Work or
|
| 95 |
+
Derivative Works a copy of this License; and
|
| 96 |
+
|
| 97 |
+
(b) You must cause any modified files to carry prominent notices
|
| 98 |
+
stating that You changed the files; and
|
| 99 |
+
|
| 100 |
+
(c) You must retain, in the Source form of any Derivative Works
|
| 101 |
+
that You distribute, all copyright, patent, trademark, and
|
| 102 |
+
attribution notices from the Source form of the Work,
|
| 103 |
+
excluding those notices that do not pertain to any part of
|
| 104 |
+
the Derivative Works; and
|
| 105 |
+
|
| 106 |
+
(d) If the Work includes a "NOTICE" text file as part of its
|
| 107 |
+
distribution, then any Derivative Works that You distribute must
|
| 108 |
+
include a readable copy of the attribution notices contained
|
| 109 |
+
within such NOTICE file, excluding those notices that do not
|
| 110 |
+
pertain to any part of the Derivative Works, in at least one
|
| 111 |
+
of the following places: within a NOTICE text file distributed
|
| 112 |
+
as part of the Derivative Works; within the Source form or
|
| 113 |
+
documentation, if provided along with the Derivative Works; or,
|
| 114 |
+
within a display generated by the Derivative Works, if and
|
| 115 |
+
wherever such third-party notices normally appear. The contents
|
| 116 |
+
of the NOTICE file are for informational purposes only and
|
| 117 |
+
do not modify the License. You may add Your own attribution
|
| 118 |
+
notices within Derivative Works that You distribute, alongside
|
| 119 |
+
or as an addendum to the NOTICE text from the Work, provided
|
| 120 |
+
that such additional attribution notices cannot be construed
|
| 121 |
+
as modifying the License.
|
| 122 |
+
|
| 123 |
+
You may add Your own copyright statement to Your modifications and
|
| 124 |
+
may provide additional or different license terms and conditions
|
| 125 |
+
for use, reproduction, or distribution of Your modifications, or
|
| 126 |
+
for any such Derivative Works as a whole, provided Your use,
|
| 127 |
+
reproduction, and distribution of the Work otherwise complies with
|
| 128 |
+
the conditions stated in this License.
|
| 129 |
+
|
| 130 |
+
5. Submission of Contributions. Unless You explicitly state otherwise,
|
| 131 |
+
any Contribution intentionally submitted for inclusion in the Work
|
| 132 |
+
by You to the Licensor shall be under the terms and conditions of
|
| 133 |
+
this License, without any additional terms or conditions.
|
| 134 |
+
Notwithstanding the above, nothing herein shall supersede or modify
|
| 135 |
+
the terms of any separate license agreement you may have executed
|
| 136 |
+
with Licensor regarding such Contributions.
|
| 137 |
+
|
| 138 |
+
6. Trademarks. This License does not grant permission to use the trade
|
| 139 |
+
names, trademarks, service marks, or product names of the Licensor,
|
| 140 |
+
except as required for reasonable and customary use in describing the
|
| 141 |
+
origin of the Work and reproducing the content of the NOTICE file.
|
| 142 |
+
|
| 143 |
+
7. Disclaimer of Warranty. Unless required by applicable law or
|
| 144 |
+
agreed to in writing, Licensor provides the Work (and each
|
| 145 |
+
Contributor provides its Contributions) on an "AS IS" BASIS,
|
| 146 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
| 147 |
+
implied, including, without limitation, any warranties or conditions
|
| 148 |
+
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
| 149 |
+
PARTICULAR PURPOSE. You are solely responsible for determining the
|
| 150 |
+
appropriateness of using or redistributing the Work and assume any
|
| 151 |
+
risks associated with Your exercise of permissions under this License.
|
| 152 |
+
|
| 153 |
+
8. Limitation of Liability. In no event and under no legal theory,
|
| 154 |
+
whether in tort (including negligence), contract, or otherwise,
|
| 155 |
+
unless required by applicable law (such as deliberate and grossly
|
| 156 |
+
negligent acts) or agreed to in writing, shall any Contributor be
|
| 157 |
+
liable to You for damages, including any direct, indirect, special,
|
| 158 |
+
incidental, or consequential damages of any character arising as a
|
| 159 |
+
result of this License or out of the use or inability to use the
|
| 160 |
+
Work (including but not limited to damages for loss of goodwill,
|
| 161 |
+
work stoppage, computer failure or malfunction, or any and all
|
| 162 |
+
other commercial damages or losses), even if such Contributor
|
| 163 |
+
has been advised of the possibility of such damages.
|
| 164 |
+
|
| 165 |
+
9. Accepting Warranty or Additional Liability. While redistributing
|
| 166 |
+
the Work or Derivative Works thereof, You may choose to offer,
|
| 167 |
+
and charge a fee for, acceptance of support, warranty, indemnity,
|
| 168 |
+
or other liability obligations and/or rights consistent with this
|
| 169 |
+
License. However, in accepting such obligations, You may act only
|
| 170 |
+
on Your own behalf and on Your sole responsibility, not on behalf
|
| 171 |
+
of any other Contributor, and only if You agree to indemnify,
|
| 172 |
+
defend, and hold each Contributor harmless for any liability
|
| 173 |
+
incurred by, or claims asserted against, such Contributor by reason
|
| 174 |
+
of your accepting any such warranty or additional liability.
|
| 175 |
+
|
| 176 |
+
END OF TERMS AND CONDITIONS
|
| 177 |
+
|
| 178 |
+
APPENDIX: How to apply the Apache License to your work.
|
| 179 |
+
|
| 180 |
+
To apply the Apache License to your work, attach the following
|
| 181 |
+
boilerplate notice, with the fields enclosed by brackets "[]"
|
| 182 |
+
replaced with your own identifying information. (Don't include
|
| 183 |
+
the brackets!) The text should be enclosed in the appropriate
|
| 184 |
+
comment syntax for the file format. We also recommend that a
|
| 185 |
+
file or class name and description of purpose be included on the
|
| 186 |
+
same "printed page" as the copyright notice for easier
|
| 187 |
+
identification within third-party archives.
|
| 188 |
+
|
| 189 |
+
Copyright [yyyy] [name of copyright owner]
|
| 190 |
+
|
| 191 |
+
Licensed under the Apache License, Version 2.0 (the "License");
|
| 192 |
+
you may not use this file except in compliance with the License.
|
| 193 |
+
You may obtain a copy of the License at
|
| 194 |
+
|
| 195 |
+
http://www.apache.org/licenses/LICENSE-2.0
|
| 196 |
+
|
| 197 |
+
Unless required by applicable law or agreed to in writing, software
|
| 198 |
+
distributed under the License is distributed on an "AS IS" BASIS,
|
| 199 |
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 200 |
+
See the License for the specific language governing permissions and
|
| 201 |
+
limitations under the License.
|
third_party/Puppeteer/README.md
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<div align="center">
|
| 2 |
+
|
| 3 |
+
<h1>Puppeteer: Rig and Animate Your 3D Models</h1>
|
| 4 |
+
|
| 5 |
+
<p>
|
| 6 |
+
<a href="https://chaoyuesong.github.io"><strong>Chaoyue Song</strong></a><sup>1,2</sup>,
|
| 7 |
+
<a href="https://lixiulive.com/"><strong>Xiu Li</strong></a><sup>2</sup>,
|
| 8 |
+
<a href="https://scholar.google.com/citations?user=afDvaa8AAAAJ&hl"><strong>Fan Yang</strong></a><sup>1</sup>,
|
| 9 |
+
<a href="https://zcxu-eric.github.io/"><strong>Zhongcong Xu</strong></a><sup>2</sup>,
|
| 10 |
+
<a href="https://plusmultiply.github.io/"><strong>Jiacheng Wei</strong></a><sup>1</sup>,
|
| 11 |
+
<br>
|
| 12 |
+
<a href="https://sites.google.com/site/fayaoliu"><strong>Fayao Liu</strong></a><sup>3</sup>,
|
| 13 |
+
<a href="https://scholar.google.com.sg/citations?user=Q8iay0gAAAAJ"><strong>Jiashi Feng</strong></a><sup>2</sup>,
|
| 14 |
+
<a href="https://guosheng.github.io/"><strong>Guosheng Lin</strong></a><sup>1*</sup>,
|
| 15 |
+
<a href="https://jfzhang95.github.io/"><strong>Jianfeng Zhang</strong></a><sup>2*</sup>
|
| 16 |
+
<br>
|
| 17 |
+
*Corresponding authors
|
| 18 |
+
<br>
|
| 19 |
+
<sup>1 </sup>Nanyang Technological University
|
| 20 |
+
<sup>2 </sup>Bytedance Seed
|
| 21 |
+
<sup>3 </sup>A*STAR
|
| 22 |
+
</p>
|
| 23 |
+
|
| 24 |
+
<h3>arXiv 2025</h3>
|
| 25 |
+
|
| 26 |
+
<div align="center">
|
| 27 |
+
<img width="80%" src="assets/puppeteer_teaser.gif">
|
| 28 |
+
</div>
|
| 29 |
+
|
| 30 |
+
<p>
|
| 31 |
+
<a href="https://chaoyuesong.github.io/Puppeteer/"><strong>Project</strong></a> |
|
| 32 |
+
<a href="https://arxiv.org/abs/2508.10898"><strong>Paper</strong></a> |
|
| 33 |
+
<a href="https://www.youtube.com/watch?v=DnKx803JHyI"><strong>Video</strong></a> |
|
| 34 |
+
<a href="https://huggingface.co/datasets/chaoyue7/Articulation-XL2.0"><strong>Data: Articulation-XL2.0</strong></a>
|
| 35 |
+
</p>
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
</div>
|
| 39 |
+
|
| 40 |
+
<br/>
|
| 41 |
+
|
| 42 |
+
Puppeteer is proposed for **automatic rigging and animation of 3D objects**. Given a 3D object, Puppeteer first automatically generates skeletal structures and skinning weights, then animates the rigged model with video guidance through a differentiable optimization pipeline. This comprehensive approach aims to enable fully automated transformation of static 3D models into dynamically animated assets, eliminating the need for manual rigging expertise and significantly streamlining 3D content creation workflows.
|
| 43 |
+
|
| 44 |
+
<br/>
|
| 45 |
+
|
| 46 |
+
## 🔥 News
|
| 47 |
+
- Sep 09, 2025: We uploaded the [video](https://www.youtube.com/watch?v=DnKx803JHyI) for Puppeteer.
|
| 48 |
+
- Sep 04, 2025: Release the inference codes and [model checkpoints](https://huggingface.co/Seed3D/Puppeteer).
|
| 49 |
+
- Aug 15, 2025: Release [paper](https://arxiv.org/abs/2508.10898) of Puppeteer!
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
## 🔧 Installtation
|
| 53 |
+
We use Python 3.10 with PyTorch 2.1.1 and CUDA 11.8. The environment and required packages can be installed as follows:
|
| 54 |
+
|
| 55 |
+
```
|
| 56 |
+
git clone https://github.com/ByteDance-Seed/Puppeteer.git --recursive && cd Puppeteer
|
| 57 |
+
conda create -n puppeteer python==3.10.13 -y
|
| 58 |
+
conda activate puppeteer
|
| 59 |
+
pip install torch==2.1.1 torchvision==0.16.1 torchaudio==2.1.1 --index-url https://download.pytorch.org/whl/cu118
|
| 60 |
+
pip install -r requirements.txt
|
| 61 |
+
pip install flash-attn==2.6.3 --no-build-isolation
|
| 62 |
+
pip install torch-scatter -f https://data.pyg.org/whl/torch-2.1.1+cu118.html
|
| 63 |
+
pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py310_cu118_pyt211/download.html
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
## 🚀 Demo
|
| 67 |
+
|
| 68 |
+
We provide a complete pipeline for rigging and animating 3D models. Before running the pipeline, visit each folder (skeleton, skinning, animation) to download the necessary model checkpoints. Example data is available in the [examples](https://github.com/ByteDance-Seed/Puppeteer/tree/main/examples) folder.
|
| 69 |
+
|
| 70 |
+
### Rigging
|
| 71 |
+
|
| 72 |
+
Given 3D meshes, we first predict the skeleton and skinning weights:
|
| 73 |
+
|
| 74 |
+
```
|
| 75 |
+
bash demo_rigging.sh
|
| 76 |
+
```
|
| 77 |
+
|
| 78 |
+
The final rig files will be saved in `results/final_rigging`. To evaluate the [skeleton](https://github.com/ByteDance-Seed/Puppeteer/tree/main/skeleton) and [skinning](https://github.com/ByteDance-Seed/Puppeteer/tree/main/skinning) components separately, refer to their respective folders.
|
| 79 |
+
|
| 80 |
+
### Video-guided 3D animation
|
| 81 |
+
|
| 82 |
+
To animate the rigged model using video guidance, run:
|
| 83 |
+
|
| 84 |
+
```
|
| 85 |
+
bash demo_animation.sh
|
| 86 |
+
```
|
| 87 |
+
|
| 88 |
+
The rendered 3D animation sequence from different views will be saved in `results/animation`. Refer to the [animation folder](https://github.com/ByteDance-Seed/Puppeteer/tree/main/animation) for comprehensive details on data processing and structure.
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
## 😊 Acknowledgment
|
| 92 |
+
|
| 93 |
+
The code builds upon [MagicArticulate](https://github.com/Seed3D/MagicArticulate), [MeshAnything](https://github.com/buaacyw/MeshAnything), [Functional Diffusion](https://1zb.github.io/functional-diffusion/), [RigNet](https://github.com/zhan-xu/RigNet), [Michelangelo](https://github.com/NeuralCarver/Michelangelo/), [PartField](https://github.com/nv-tlabs/PartField), [AnyMole](https://github.com/kwanyun/AnyMoLe) and [Lab4D](https://github.com/lab4d-org/lab4d). We gratefully acknowledge the authors for making their work publicly available.
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
## 📚 Citation
|
| 97 |
+
|
| 98 |
+
```
|
| 99 |
+
@article{song2025puppeteer,
|
| 100 |
+
title={Puppeteer: Rig and Animate Your 3D Models},
|
| 101 |
+
author={Chaoyue Song and Xiu Li and Fan Yang and Zhongcong Xu and Jiacheng Wei and Fayao Liu and Jiashi Feng and Guosheng Lin and Jianfeng Zhang},
|
| 102 |
+
journal={arXiv preprint arXiv:2508.10898},
|
| 103 |
+
year={2025}
|
| 104 |
+
}
|
| 105 |
+
```
|
third_party/Puppeteer/animation/README.md
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# 3D Animation with Video Guidance
|
| 2 |
+
This repository provides a complete pipeline for generating 3D object animations with video guidance. The system includes data processing and optimization algorithms for rigging-based animation.
|
| 3 |
+
|
| 4 |
+
## Overview
|
| 5 |
+
The pipeline takes a rigged 3D model and a reference video, then optimizes the object's motion to match the video guidance while maintaining realistic skeletal constraints.
|
| 6 |
+
|
| 7 |
+
## Prerequisites
|
| 8 |
+
|
| 9 |
+
### Model Downloads
|
| 10 |
+
Download the required pre-trained models:
|
| 11 |
+
|
| 12 |
+
- [Video-Depth-Anything](https://huggingface.co/depth-anything/Video-Depth-Anything-Large) - For depth estimation
|
| 13 |
+
- [CoTracker3](https://huggingface.co/facebook/cotracker3) - For point tracking
|
| 14 |
+
|
| 15 |
+
```
|
| 16 |
+
python download.py
|
| 17 |
+
```
|
| 18 |
+
|
| 19 |
+
### Input Data Structure
|
| 20 |
+
|
| 21 |
+
Organize your input data as follows:
|
| 22 |
+
```
|
| 23 |
+
inputs/
|
| 24 |
+
└── {seq_name}/
|
| 25 |
+
├── objs/
|
| 26 |
+
│ ├── mesh.obj # 3D mesh geometry
|
| 27 |
+
│ ├── rig.txt # Rigging definition
|
| 28 |
+
│ ├── material.mtl # Material properties (optional)
|
| 29 |
+
│ └── texture.png # Texture maps (optional)
|
| 30 |
+
├── first_frames/ # Rendered initial frames
|
| 31 |
+
├── imgs/ # Extracted video frames
|
| 32 |
+
├── flow/ # Optical flow data
|
| 33 |
+
├── flow_vis/ # Visualized optical flow
|
| 34 |
+
├── depth/ # Esitmated depth data
|
| 35 |
+
├── track/ # tracked joints/vertices
|
| 36 |
+
└── input.mp4 # Source video
|
| 37 |
+
```
|
| 38 |
+
|
| 39 |
+
## Data Processing
|
| 40 |
+
|
| 41 |
+
Given a 3D model with rigging under `inputs/{seq_name}/objs` (`mesh.obj, rig.txt`, optional `.mtl` and texture `.png`), we first render the object from a specified viewpoint. This image is used as the input (first frame) to the video generation model (e.g., [Jimeng AI](https://jimeng.jianying.com/ai-tool/home?type=video)).
|
| 42 |
+
|
| 43 |
+
```
|
| 44 |
+
python utils/render_first_frame.py --input_path inputs --seq_name {seq_name}
|
| 45 |
+
```
|
| 46 |
+
Replace `{seq_name}` with your sequence name. The first-frame images are saved to `inputs/{seq_name}/first_frames`. This generates reference images from 4 different viewpoints (you can add more). Choose the viewpoint that best shows the object's joints and key parts for optimal animation results. Save the generated videos to `inputs/{seq_name}/input.mp4`.
|
| 47 |
+
|
| 48 |
+
Then we extract the frames from the video by running:
|
| 49 |
+
|
| 50 |
+
```
|
| 51 |
+
cd inputs/{seq_name}; mkdir imgs
|
| 52 |
+
ffmpeg -i input.mp4 -vf fps=10 imgs/frame_%04d.png
|
| 53 |
+
cd ../../
|
| 54 |
+
```
|
| 55 |
+
|
| 56 |
+
Estimate optical flows by running:
|
| 57 |
+
|
| 58 |
+
```
|
| 59 |
+
python utils/save_flow.py --input_path inputs --seq_name {seq_name}
|
| 60 |
+
```
|
| 61 |
+
The flow `.flo` files are saved to `inputs/{seq_name}/flow`, the flow visualization are saved to `inputs/{seq_name}/flow_vis`. Depth and tracking information are saved during optimization.
|
| 62 |
+
|
| 63 |
+
## Optimization
|
| 64 |
+
|
| 65 |
+
To optimize the animation, you can run
|
| 66 |
+
|
| 67 |
+
```
|
| 68 |
+
bash demo.sh
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
The results are saved to `results/{seq_name}/{save_name}`. Modify `--main_renderer` and `--additional_renderers` to change rendering viewpoints. If animations exhibit jitter or instability, increase the root/joint smoothing weights for better temporal consistency.
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
## TODO
|
| 75 |
+
|
| 76 |
+
- [ ] Add multi-view supervisions.
|
third_party/Puppeteer/animation/demo.sh
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
python optimization.py --save_path results --iter 200 --input_path inputs --img_size 960 \
|
| 2 |
+
--seq_name 'fish' --save_name 'fish' --coherence_weight 5
|
| 3 |
+
|
| 4 |
+
# python optimization.py --save_path results --iter 200 --input_path inputs --img_size 960 \
|
| 5 |
+
# --seq_name 'crocodile' --save_name 'crocodile_demo' --coherence_weight 15
|
| 6 |
+
|
| 7 |
+
|
third_party/Puppeteer/animation/download.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from huggingface_hub import hf_hub_download
|
| 2 |
+
|
| 3 |
+
file_path = hf_hub_download(
|
| 4 |
+
repo_id="facebook/cotracker3",
|
| 5 |
+
filename="scaled_offline.pth",
|
| 6 |
+
local_dir="third_partys/co_tracker/ckpt"
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
file_path = hf_hub_download(
|
| 10 |
+
repo_id="depth-anything/Video-Depth-Anything-Large",
|
| 11 |
+
filename="video_depth_anything_vitl.pth",
|
| 12 |
+
local_dir="third_partys/Video_Depth_Anything/ckpt"
|
| 13 |
+
)
|
third_party/Puppeteer/animation/model.py
ADDED
|
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
from typing import List, Optional, Tuple, Union
|
| 17 |
+
from collections import deque
|
| 18 |
+
from pytorch3d.structures import Meshes, join_meshes_as_scene
|
| 19 |
+
from pytorch3d.renderer import TexturesVertex, TexturesUV
|
| 20 |
+
|
| 21 |
+
from utils.quat_utils import quat_to_transform_matrix, quat_multiply, quat_rotate_vector
|
| 22 |
+
|
| 23 |
+
class RiggingModel:
|
| 24 |
+
"""
|
| 25 |
+
A 3D rigged model supporting skeletal animation.
|
| 26 |
+
|
| 27 |
+
Handles mesh geometry, skeletal hierarchy, skinning weights, and
|
| 28 |
+
linear blend skinning (LBS) deformation.
|
| 29 |
+
"""
|
| 30 |
+
def __init__(self, device = "cuda:0"):
|
| 31 |
+
self.device = device
|
| 32 |
+
# Mesh data
|
| 33 |
+
self.vertices: List[torch.Tensor] = []
|
| 34 |
+
self.faces: List[torch.Tensor] = []
|
| 35 |
+
self.textures: List[Union[TexturesVertex, TexturesUV]] = []
|
| 36 |
+
|
| 37 |
+
# Skeletal data
|
| 38 |
+
self.bones: Optional[torch.Tensor] = None # (N, 2) [parent, child] pairs
|
| 39 |
+
self.parent_indices: Optional[torch.Tensor] = None # (J,) parent index for each joint
|
| 40 |
+
self.root_index: Optional[int] = None # Root joint index
|
| 41 |
+
self.joints_rest: Optional[torch.Tensor] = None # (J, 3) rest pose positions
|
| 42 |
+
self.skin_weights: List[torch.Tensor] = [] # List of (V_i, J) skinning weights
|
| 43 |
+
|
| 44 |
+
# Fixed local positions
|
| 45 |
+
self.rest_local_positions: Optional[torch.Tensor] = None # (J, 3)
|
| 46 |
+
|
| 47 |
+
# Computed data
|
| 48 |
+
self.bind_matrices_inv: Optional[torch.Tensor] = None # (J, 4, 4) inverse bind matrices
|
| 49 |
+
self.deformed_vertices: Optional[List[torch.Tensor]] = None # List of (T, V_i, 3)
|
| 50 |
+
self.joint_positions: Optional[torch.Tensor] = None # (T, J, 3) current joint positions
|
| 51 |
+
|
| 52 |
+
# Validation flags
|
| 53 |
+
self._bind_matrices_initialized = False
|
| 54 |
+
|
| 55 |
+
def initialize_bind_matrices(self, rest_local_pos):
|
| 56 |
+
"""Initialize bind matrices and store rest local positions."""
|
| 57 |
+
self.rest_local_positions = rest_local_pos.to(self.device)
|
| 58 |
+
|
| 59 |
+
J = rest_local_pos.shape[0]
|
| 60 |
+
rest_global_quats, rest_global_pos = self.forward_kinematics(
|
| 61 |
+
torch.tensor([[[1.0, 0.0, 0.0, 0.0]] * J], device=self.device), # unit quaternion
|
| 62 |
+
self.parent_indices,
|
| 63 |
+
self.root_index
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
bind_matrices = quat_to_transform_matrix(rest_global_quats, rest_global_pos) # (1,J,4,4)
|
| 67 |
+
self.bind_matrices_inv = torch.inverse(bind_matrices.squeeze(0)) # (J,4,4)
|
| 68 |
+
|
| 69 |
+
self._bind_matrices_initialized = True
|
| 70 |
+
|
| 71 |
+
def animate(self, local_quaternions, root_quaternion = None, root_position = None):
|
| 72 |
+
"""
|
| 73 |
+
Animate the model using local joint transformations.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
local_quaternions: (T, J, 4) local rotations per frame
|
| 77 |
+
root_quaternion: (T, 4) global root rotation
|
| 78 |
+
root_position: (T, 3) global root translation
|
| 79 |
+
"""
|
| 80 |
+
if not self._bind_matrices_initialized:
|
| 81 |
+
raise RuntimeError("Bind matrices not initialized. Call initialize_bind_matrices() first.")
|
| 82 |
+
|
| 83 |
+
# Forward kinematics
|
| 84 |
+
global_quats, global_pos = self.forward_kinematics(
|
| 85 |
+
local_quaternions,
|
| 86 |
+
self.parent_indices,
|
| 87 |
+
self.root_index
|
| 88 |
+
)
|
| 89 |
+
self.joint_positions = global_pos
|
| 90 |
+
|
| 91 |
+
joint_transforms = quat_to_transform_matrix(global_quats, global_pos) # (T, J, 4, 4)
|
| 92 |
+
|
| 93 |
+
# Apply global root transformation if provided
|
| 94 |
+
if root_quaternion is not None and root_position is not None:
|
| 95 |
+
root_transform = quat_to_transform_matrix(root_quaternion, root_position)
|
| 96 |
+
joint_transforms = root_transform[:, None] @ joint_transforms
|
| 97 |
+
self.joint_positions = joint_transforms[..., :3, 3]
|
| 98 |
+
|
| 99 |
+
# Linear blend skinning
|
| 100 |
+
self.deformed_vertices = []
|
| 101 |
+
for i, vertices in enumerate(self.vertices):
|
| 102 |
+
deformed = self._linear_blend_skinning(
|
| 103 |
+
vertices,
|
| 104 |
+
joint_transforms,
|
| 105 |
+
self.skin_weights[i],
|
| 106 |
+
self.bind_matrices_inv
|
| 107 |
+
)
|
| 108 |
+
self.deformed_vertices.append(deformed)
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def get_mesh(self, frame_idx=None):
|
| 112 |
+
meshes = []
|
| 113 |
+
for i in range(len(self.vertices)):
|
| 114 |
+
mesh = Meshes(
|
| 115 |
+
verts=[self.vertices[i]] if frame_idx is None or self.deformed_vertices is None else [self.deformed_vertices[i][frame_idx]],
|
| 116 |
+
faces=[self.faces[i]],
|
| 117 |
+
textures=self.textures[i]
|
| 118 |
+
)
|
| 119 |
+
meshes.append(mesh)
|
| 120 |
+
return join_meshes_as_scene(meshes)
|
| 121 |
+
|
| 122 |
+
def _linear_blend_skinning(self, vertices, joint_transforms, skin_weights, bind_matrices_inv):
|
| 123 |
+
"""
|
| 124 |
+
Apply linear blend skinning to vertices.
|
| 125 |
+
|
| 126 |
+
Args:
|
| 127 |
+
vertices: (V, 3) vertex positions
|
| 128 |
+
joint_transforms: (T, J, 4, 4) joint transformation matrices
|
| 129 |
+
skin_weights: (V, J) per-vertex joint weights
|
| 130 |
+
bind_matrices_inv: (J, 4, 4) inverse bind matrices
|
| 131 |
+
|
| 132 |
+
Returns:
|
| 133 |
+
(T, V, 3) deformed vertices
|
| 134 |
+
"""
|
| 135 |
+
# Compute final transformation matrices
|
| 136 |
+
transforms = torch.matmul(joint_transforms, bind_matrices_inv) # (T, J, 4, 4)
|
| 137 |
+
|
| 138 |
+
# Weight and blend transformations
|
| 139 |
+
weighted_transforms = torch.einsum('vj,tjab->tvab', skin_weights, transforms) # (T, V, 4, 4)
|
| 140 |
+
|
| 141 |
+
# Apply to vertices
|
| 142 |
+
vertices_hom = torch.cat([vertices, torch.ones(vertices.shape[0], 1, device=vertices.device)], dim=-1)
|
| 143 |
+
deformed = torch.matmul(weighted_transforms, vertices_hom.unsqueeze(-1)).squeeze(-1)
|
| 144 |
+
|
| 145 |
+
return deformed[..., :3]
|
| 146 |
+
|
| 147 |
+
def forward_kinematics(self, local_quaternions, parent_indices, root_index = 0):
|
| 148 |
+
"""
|
| 149 |
+
Compute global joint transformations from local ones.
|
| 150 |
+
|
| 151 |
+
Args:
|
| 152 |
+
local_quaternions: (B, J, 4) local rotations
|
| 153 |
+
parent_indices: (J,) parent index for each joint
|
| 154 |
+
root_index: Root joint index
|
| 155 |
+
|
| 156 |
+
Returns:
|
| 157 |
+
Tuple of (global_quaternions, global_positions)
|
| 158 |
+
"""
|
| 159 |
+
B, J = local_quaternions.shape[:2]
|
| 160 |
+
local_positions = self.rest_local_positions.unsqueeze(0).expand(B, -1, -1)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
# Initialize storage
|
| 164 |
+
global_quats = [None] * J
|
| 165 |
+
global_positions = [None] * J
|
| 166 |
+
|
| 167 |
+
# Build children mapping
|
| 168 |
+
children = [[] for _ in range(J)]
|
| 169 |
+
for child_idx in range(J):
|
| 170 |
+
parent_idx = parent_indices[child_idx]
|
| 171 |
+
if parent_idx >= 0:
|
| 172 |
+
children[parent_idx].append(child_idx)
|
| 173 |
+
|
| 174 |
+
# Breadth-first traversal from root
|
| 175 |
+
queue = deque([root_index])
|
| 176 |
+
visited = {root_index}
|
| 177 |
+
|
| 178 |
+
# Process root
|
| 179 |
+
global_quats[root_index] = local_quaternions[:, root_index]
|
| 180 |
+
global_positions[root_index] = local_positions[:, root_index]
|
| 181 |
+
|
| 182 |
+
while queue:
|
| 183 |
+
current = queue.popleft()
|
| 184 |
+
current_quat = global_quats[current]
|
| 185 |
+
current_pos = global_positions[current]
|
| 186 |
+
|
| 187 |
+
for child in children[current]:
|
| 188 |
+
if child not in visited:
|
| 189 |
+
visited.add(child)
|
| 190 |
+
queue.append(child)
|
| 191 |
+
|
| 192 |
+
# Transform child to global space
|
| 193 |
+
child_quat = quat_multiply(current_quat, local_quaternions[:, child])
|
| 194 |
+
child_pos = quat_rotate_vector(current_quat, local_positions[:, child]) + current_pos
|
| 195 |
+
|
| 196 |
+
global_quats[child] = child_quat
|
| 197 |
+
global_positions[child] = child_pos
|
| 198 |
+
|
| 199 |
+
return torch.stack(global_quats, dim=1), torch.stack(global_positions, dim=1)
|
third_party/Puppeteer/animation/optimization.py
ADDED
|
@@ -0,0 +1,626 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
import os
|
| 17 |
+
import argparse
|
| 18 |
+
import json
|
| 19 |
+
import numpy as np
|
| 20 |
+
import logging
|
| 21 |
+
import glob
|
| 22 |
+
import torch
|
| 23 |
+
import torch.nn.functional as F
|
| 24 |
+
from PIL import Image
|
| 25 |
+
from tqdm import tqdm
|
| 26 |
+
|
| 27 |
+
from renderer import MeshRenderer3D
|
| 28 |
+
from model import RiggingModel
|
| 29 |
+
from utils.quat_utils import (
|
| 30 |
+
compute_rest_local_positions, quat_inverse, quat_log, quat_multiply
|
| 31 |
+
)
|
| 32 |
+
from utils.loss_utils import (
|
| 33 |
+
DepthModule, compute_reprojection_loss, geodesic_loss, root_motion_reg,
|
| 34 |
+
calculate_flow_loss, compute_depth_loss_normalized, joint_motion_coherence
|
| 35 |
+
)
|
| 36 |
+
from utils.data_loader import load_model_from_obj_and_rig, prepare_depth
|
| 37 |
+
from utils.save_utils import (
|
| 38 |
+
save_args, visualize_joints_on_mesh, save_final_video,
|
| 39 |
+
save_and_smooth_results, visualize_points_on_mesh, save_track_points
|
| 40 |
+
)
|
| 41 |
+
from utils.misc import warmup_then_decay
|
| 42 |
+
from third_partys.co_tracker.save_track import save_track
|
| 43 |
+
|
| 44 |
+
class AnimationOptimizer:
|
| 45 |
+
"""Main class for animation optimization with video guidance."""
|
| 46 |
+
|
| 47 |
+
def __init__(self, args, device = 'cuda:0'):
|
| 48 |
+
self.args = args
|
| 49 |
+
self.device = device
|
| 50 |
+
self.logger = self._setup_logger()
|
| 51 |
+
|
| 52 |
+
# Training parameters
|
| 53 |
+
self.reinit_patience_threshold = 20
|
| 54 |
+
self.loss_divergence_factor = 2.0
|
| 55 |
+
self.gradient_clip_norm = 1.0
|
| 56 |
+
|
| 57 |
+
# Loss weights
|
| 58 |
+
self.target_ratios = {
|
| 59 |
+
'rgb': args.rgb_wt,
|
| 60 |
+
'flow': args.flow_wt,
|
| 61 |
+
'proj_joint': args.proj_joint_wt,
|
| 62 |
+
'proj_vert': args.proj_vert_wt,
|
| 63 |
+
'depth': args.depth_wt,
|
| 64 |
+
'mask': args.mask_wt
|
| 65 |
+
}
|
| 66 |
+
self.loss_weights = {
|
| 67 |
+
'rgb': 1.0,
|
| 68 |
+
'flow': 1.0,
|
| 69 |
+
'proj_joint': 1.0,
|
| 70 |
+
'proj_vert': 1.0,
|
| 71 |
+
'depth': 1.0,
|
| 72 |
+
'mask': 1.0
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
def _setup_logger(self):
|
| 76 |
+
"""Set up logging configuration."""
|
| 77 |
+
logger = logging.getLogger("animation_optimizer")
|
| 78 |
+
logger.setLevel(logging.INFO)
|
| 79 |
+
|
| 80 |
+
if not logger.handlers:
|
| 81 |
+
formatter = logging.Formatter(
|
| 82 |
+
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
| 83 |
+
)
|
| 84 |
+
console_handler = logging.StreamHandler()
|
| 85 |
+
console_handler.setFormatter(formatter)
|
| 86 |
+
logger.addHandler(console_handler)
|
| 87 |
+
|
| 88 |
+
return logger
|
| 89 |
+
def _add_file_handler(self, log_path):
|
| 90 |
+
"""Add file handler to logger."""
|
| 91 |
+
file_handler = logging.FileHandler(log_path)
|
| 92 |
+
formatter = logging.Formatter("%(asctime)s %(message)s")
|
| 93 |
+
file_handler.setFormatter(formatter)
|
| 94 |
+
self.logger.addHandler(file_handler)
|
| 95 |
+
|
| 96 |
+
def _initialize_parameters(self, batch_size, num_joints):
|
| 97 |
+
"""Initialize optimization parameters."""
|
| 98 |
+
|
| 99 |
+
# Fixed first frame quaternions (identity)
|
| 100 |
+
fixed_quat_0 = torch.zeros((1, num_joints, 4), device=self.device)
|
| 101 |
+
fixed_quat_0[..., 0] = 1.0
|
| 102 |
+
|
| 103 |
+
# Initialize learnable quaternions for frames 1 to B-1
|
| 104 |
+
learn_quats_init = torch.zeros((batch_size - 1, num_joints, 4), device=self.device)
|
| 105 |
+
learn_quats_init[..., 0] = 1.0
|
| 106 |
+
quats_to_optimize = learn_quats_init.clone().detach().requires_grad_(True)
|
| 107 |
+
|
| 108 |
+
# Initialize global transformations
|
| 109 |
+
fixed_global_quat_0 = torch.zeros((1, 4), device=self.device)
|
| 110 |
+
fixed_global_quat_0[:, 0] = 1.0
|
| 111 |
+
fixed_global_trans_0 = torch.zeros((1, 3), device=self.device)
|
| 112 |
+
|
| 113 |
+
# Initialize learnable global transformations
|
| 114 |
+
global_quats_init = torch.zeros((batch_size - 1, 4), device=self.device)
|
| 115 |
+
global_quats_init[:, 0] = 1.0
|
| 116 |
+
global_trans_init = torch.zeros((batch_size - 1, 3), device=self.device)
|
| 117 |
+
|
| 118 |
+
global_quats = global_quats_init.clone().detach().requires_grad_(True)
|
| 119 |
+
global_trans = global_trans_init.clone().detach().requires_grad_(True)
|
| 120 |
+
|
| 121 |
+
return quats_to_optimize, global_quats, global_trans, fixed_quat_0, fixed_global_quat_0, fixed_global_trans_0
|
| 122 |
+
|
| 123 |
+
def _setup_optimizer_and_scheduler(self, quats_to_optimize, global_quats, global_trans, n_iters):
|
| 124 |
+
"""Set up optimizer and learning rate scheduler."""
|
| 125 |
+
|
| 126 |
+
base_lr = self.args.warm_lr
|
| 127 |
+
max_lr = self.args.lr
|
| 128 |
+
warmup_steps = 20
|
| 129 |
+
|
| 130 |
+
min_lr = self.args.min_lr
|
| 131 |
+
quat_lr = base_lr # *2
|
| 132 |
+
|
| 133 |
+
optimizer = torch.optim.AdamW([
|
| 134 |
+
{'params': quats_to_optimize, 'lr': quat_lr},
|
| 135 |
+
{'params': global_quats, 'lr': quat_lr},
|
| 136 |
+
{'params': global_trans, 'lr': base_lr}
|
| 137 |
+
])
|
| 138 |
+
|
| 139 |
+
scheduler = warmup_then_decay(
|
| 140 |
+
optimizer=optimizer,
|
| 141 |
+
total_steps=n_iters,
|
| 142 |
+
warmup_steps=warmup_steps,
|
| 143 |
+
max_lr=max_lr,
|
| 144 |
+
min_lr=min_lr,
|
| 145 |
+
base_lr=base_lr
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
+
return optimizer, scheduler
|
| 149 |
+
|
| 150 |
+
def _compute_smoothness_losses(self, quats_normed, all_global_quats_normed, all_global_trans, model):
|
| 151 |
+
"""Compute various smoothness losses."""
|
| 152 |
+
|
| 153 |
+
# Rotation smoothness loss using geodesic distance
|
| 154 |
+
theta = geodesic_loss(quats_normed[1:], quats_normed[:-1])
|
| 155 |
+
rot_smoothness_loss = (theta ** 2).mean()
|
| 156 |
+
|
| 157 |
+
# Second-order rotation smoothness (acceleration)
|
| 158 |
+
omega = quat_log(quat_multiply(quat_inverse(quats_normed[:-1]), quats_normed[1:]))
|
| 159 |
+
rot_acc = omega[1:] - omega[:-1]
|
| 160 |
+
rot_acc_smoothness_loss = rot_acc.pow(2).mean()
|
| 161 |
+
|
| 162 |
+
# Joint motion coherence loss (parent-child relative motion smoothness)
|
| 163 |
+
joint_coherence_loss = joint_motion_coherence(quats_normed, model.parent_indices)
|
| 164 |
+
|
| 165 |
+
# Root motion regularization
|
| 166 |
+
root_pos_smooth_loss, root_quat_smooth_loss = root_motion_reg(
|
| 167 |
+
all_global_quats_normed, all_global_trans
|
| 168 |
+
)
|
| 169 |
+
|
| 170 |
+
return rot_smoothness_loss, rot_acc_smoothness_loss, joint_coherence_loss, root_pos_smooth_loss + root_quat_smooth_loss
|
| 171 |
+
|
| 172 |
+
def pre_calibrate_loss_weights(self, loss_components, target_ratios=None):
|
| 173 |
+
""" calibrate loss weights """
|
| 174 |
+
loss_for_ratio = {name: loss.detach().clone() for name, loss in loss_components.items()}
|
| 175 |
+
|
| 176 |
+
rgb_loss = loss_for_ratio['rgb'].item()
|
| 177 |
+
|
| 178 |
+
for name, loss_val in loss_for_ratio.items():
|
| 179 |
+
if name == 'rgb':
|
| 180 |
+
continue
|
| 181 |
+
|
| 182 |
+
if loss_val > 1e-8:
|
| 183 |
+
scale_factor = rgb_loss / loss_val.item()
|
| 184 |
+
target_ratio = target_ratios.get(name, 1.0)
|
| 185 |
+
new_weight = self.loss_weights.get(name, 1.0) * scale_factor * target_ratio
|
| 186 |
+
|
| 187 |
+
self.loss_weights[name] = new_weight
|
| 188 |
+
|
| 189 |
+
def _compute_losses(
|
| 190 |
+
self,
|
| 191 |
+
model,
|
| 192 |
+
renderer,
|
| 193 |
+
images_batch,
|
| 194 |
+
tracked_joints_2d,
|
| 195 |
+
joint_vis_mask,
|
| 196 |
+
track_verts_2d,
|
| 197 |
+
vert_vis_mask,
|
| 198 |
+
sampled_vertex_indices,
|
| 199 |
+
track_indices,
|
| 200 |
+
flow_dirs,
|
| 201 |
+
depth_gt_raw,
|
| 202 |
+
mask,
|
| 203 |
+
out_dir,
|
| 204 |
+
iteration
|
| 205 |
+
):
|
| 206 |
+
"""Compute all losses for the optimization."""
|
| 207 |
+
|
| 208 |
+
batch_size = images_batch.shape[0]
|
| 209 |
+
meshes = [model.get_mesh(t) for t in range(batch_size)]
|
| 210 |
+
pred_images_all = renderer.render_batch(meshes)
|
| 211 |
+
|
| 212 |
+
# 2D projection losses
|
| 213 |
+
pred_joints_3d = model.joint_positions
|
| 214 |
+
proj_joint_loss = compute_reprojection_loss(
|
| 215 |
+
renderer, joint_vis_mask, pred_joints_3d,
|
| 216 |
+
tracked_joints_2d, self.args.img_size
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
pred_points_3d = model.deformed_vertices[0]
|
| 220 |
+
proj_vert_loss = compute_reprojection_loss(
|
| 221 |
+
renderer, vert_vis_mask,
|
| 222 |
+
pred_points_3d[:, sampled_vertex_indices],
|
| 223 |
+
track_verts_2d[:, track_indices],
|
| 224 |
+
self.args.img_size
|
| 225 |
+
)
|
| 226 |
+
|
| 227 |
+
# RGB loss
|
| 228 |
+
pred_rgb = pred_images_all[..., :3]
|
| 229 |
+
real_rgb = images_batch[..., :3]
|
| 230 |
+
diff_rgb_masked = (pred_rgb - real_rgb) * mask.unsqueeze(-1)
|
| 231 |
+
|
| 232 |
+
mse_rgb_num = (diff_rgb_masked ** 2).sum()
|
| 233 |
+
mse_rgb_den = mask.sum() * 3
|
| 234 |
+
rgb_loss = mse_rgb_num / mse_rgb_den.clamp_min(1e-8)
|
| 235 |
+
|
| 236 |
+
# Mask loss
|
| 237 |
+
silhouette_soft = renderer.render_silhouette_batch(meshes).squeeze()
|
| 238 |
+
mask_loss = F.binary_cross_entropy(silhouette_soft, mask)
|
| 239 |
+
|
| 240 |
+
# Depth losses
|
| 241 |
+
fragments = renderer.get_rasterization_fragments(meshes)
|
| 242 |
+
zbuf_depths = fragments.zbuf[..., 0]
|
| 243 |
+
depth_loss = compute_depth_loss_normalized(depth_gt_raw, zbuf_depths, mask)
|
| 244 |
+
|
| 245 |
+
# Flow losses
|
| 246 |
+
flow_loss = calculate_flow_loss(flow_dirs, self.device, mask, renderer, model)
|
| 247 |
+
|
| 248 |
+
loss_components = {
|
| 249 |
+
'rgb': rgb_loss,
|
| 250 |
+
'proj_joint': proj_joint_loss,
|
| 251 |
+
'proj_vert': proj_vert_loss,
|
| 252 |
+
'depth': depth_loss,
|
| 253 |
+
'flow': flow_loss,
|
| 254 |
+
'mask': mask_loss
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
return loss_components
|
| 258 |
+
|
| 259 |
+
def optimization(
|
| 260 |
+
self,
|
| 261 |
+
images_batch,
|
| 262 |
+
model,
|
| 263 |
+
renderer,
|
| 264 |
+
tracked_joints_2d,
|
| 265 |
+
joint_vis_mask,
|
| 266 |
+
track_verts_2d,
|
| 267 |
+
vert_vis_mask,
|
| 268 |
+
sampled_vertex_indices,
|
| 269 |
+
track_indices,
|
| 270 |
+
flow_dirs,
|
| 271 |
+
n_iters,
|
| 272 |
+
out_dir):
|
| 273 |
+
"""
|
| 274 |
+
Optimize animation parameters with fixed first frame.
|
| 275 |
+
"""
|
| 276 |
+
torch.autograd.set_detect_anomaly(True)
|
| 277 |
+
|
| 278 |
+
batch_size, _, _, _ = images_batch.shape
|
| 279 |
+
num_joints = model.joints_rest.shape[0]
|
| 280 |
+
|
| 281 |
+
# Setup output directory and logging
|
| 282 |
+
os.makedirs(out_dir, exist_ok=True)
|
| 283 |
+
log_path = os.path.join(out_dir, "optimization.log")
|
| 284 |
+
self._add_file_handler(log_path)
|
| 285 |
+
|
| 286 |
+
# Initialize parameters
|
| 287 |
+
(quats_to_optimize, global_quats, global_trans,
|
| 288 |
+
fixed_quat_0, fixed_global_quat_0, fixed_global_trans_0) = self._initialize_parameters(batch_size, num_joints)
|
| 289 |
+
|
| 290 |
+
# Setup rest positions and bind matrices
|
| 291 |
+
rest_local_pos = compute_rest_local_positions(model.joints_rest, model.parent_indices)
|
| 292 |
+
model.initialize_bind_matrices(rest_local_pos)
|
| 293 |
+
|
| 294 |
+
# Setup optimizer and scheduler
|
| 295 |
+
optimizer, scheduler = self._setup_optimizer_and_scheduler(
|
| 296 |
+
quats_to_optimize, global_quats, global_trans, n_iters
|
| 297 |
+
)
|
| 298 |
+
|
| 299 |
+
# Initialize depth module and flow weights
|
| 300 |
+
depth_module = DepthModule(
|
| 301 |
+
encoder='vitl',
|
| 302 |
+
device=self.device,
|
| 303 |
+
input_size=images_batch.shape[1],
|
| 304 |
+
fp32=False
|
| 305 |
+
)
|
| 306 |
+
|
| 307 |
+
# Prepare masks
|
| 308 |
+
real_rgb = images_batch[..., :3]
|
| 309 |
+
threshold = 0.95
|
| 310 |
+
with torch.no_grad():
|
| 311 |
+
background_mask = (real_rgb > threshold).all(dim=-1)
|
| 312 |
+
mask = (~background_mask).float()
|
| 313 |
+
|
| 314 |
+
depth_gt_raw = prepare_depth(
|
| 315 |
+
flow_dirs.replace('flow', 'depth'), real_rgb, self.device, depth_module
|
| 316 |
+
)
|
| 317 |
+
|
| 318 |
+
# Optimization tracking
|
| 319 |
+
best_loss = float('inf')
|
| 320 |
+
patience = 0
|
| 321 |
+
best_params = None
|
| 322 |
+
|
| 323 |
+
pbar = tqdm(total=n_iters, desc="Optimizing animation")
|
| 324 |
+
|
| 325 |
+
for iteration in range(n_iters):
|
| 326 |
+
# Combine fixed and learnable parameters
|
| 327 |
+
quats_all = torch.cat([fixed_quat_0, quats_to_optimize], dim=0)
|
| 328 |
+
|
| 329 |
+
# Normalize quaternions
|
| 330 |
+
reshaped = quats_all.reshape(-1, 4)
|
| 331 |
+
norm = torch.norm(reshaped, dim=1, keepdim=True).clamp_min(1e-8)
|
| 332 |
+
quats_normed = (reshaped / norm).reshape(batch_size, num_joints, 4)
|
| 333 |
+
|
| 334 |
+
# Global transformations
|
| 335 |
+
all_global_quats = torch.cat([fixed_global_quat_0, global_quats], dim=0)
|
| 336 |
+
all_global_trans = torch.cat([fixed_global_trans_0, global_trans], dim=0)
|
| 337 |
+
all_global_quats_normed = all_global_quats / torch.norm(
|
| 338 |
+
all_global_quats, dim=-1, keepdim=True
|
| 339 |
+
).clamp_min(1e-8)
|
| 340 |
+
|
| 341 |
+
# Compute smoothness losses
|
| 342 |
+
(rot_smoothness_loss, rot_acc_smoothness_loss, joint_coherence_loss,
|
| 343 |
+
root_smooth_loss) = self._compute_smoothness_losses(
|
| 344 |
+
quats_normed, all_global_quats_normed, all_global_trans, model
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
# animate model
|
| 348 |
+
model.animate(quats_normed, all_global_quats_normed, all_global_trans)
|
| 349 |
+
|
| 350 |
+
# Verify first frame hasn't changed
|
| 351 |
+
verts0 = model.vertices[0]
|
| 352 |
+
de0 = model.deformed_vertices[0][0]
|
| 353 |
+
assert torch.allclose(de0, verts0, atol=1e-2), "First frame vertices have changed!"
|
| 354 |
+
|
| 355 |
+
# Compute all losses
|
| 356 |
+
loss_components = self._compute_losses(
|
| 357 |
+
model, renderer, images_batch, tracked_joints_2d, joint_vis_mask,
|
| 358 |
+
track_verts_2d, vert_vis_mask, sampled_vertex_indices, track_indices,
|
| 359 |
+
flow_dirs, depth_gt_raw, mask, out_dir, iteration
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
total_smoothness_loss = rot_smoothness_loss + rot_acc_smoothness_loss * 10
|
| 363 |
+
|
| 364 |
+
if iteration == 0:
|
| 365 |
+
self.pre_calibrate_loss_weights(loss_components, self.target_ratios)
|
| 366 |
+
|
| 367 |
+
total_loss = (
|
| 368 |
+
loss_components['rgb'] +
|
| 369 |
+
self.loss_weights['mask'] * loss_components['mask'] +
|
| 370 |
+
self.loss_weights['flow'] * loss_components['flow'] +
|
| 371 |
+
self.loss_weights['proj_joint'] * loss_components['proj_joint'] +
|
| 372 |
+
self.loss_weights['proj_vert'] * loss_components['proj_vert'] +
|
| 373 |
+
self.loss_weights['depth'] * loss_components['depth'] +
|
| 374 |
+
self.args.smooth_weight * total_smoothness_loss +
|
| 375 |
+
self.args.coherence_weight * joint_coherence_loss +
|
| 376 |
+
self.args.root_smooth_weight * root_smooth_loss
|
| 377 |
+
)
|
| 378 |
+
|
| 379 |
+
# Optimization step
|
| 380 |
+
optimizer.zero_grad()
|
| 381 |
+
total_loss.backward()
|
| 382 |
+
torch.nn.utils.clip_grad_norm_(
|
| 383 |
+
[quats_to_optimize, global_quats, global_trans],
|
| 384 |
+
max_norm=self.gradient_clip_norm
|
| 385 |
+
)
|
| 386 |
+
optimizer.step()
|
| 387 |
+
scheduler.step()
|
| 388 |
+
|
| 389 |
+
# Update progress bar and logging
|
| 390 |
+
loss_desc = (
|
| 391 |
+
f"Loss: {total_loss.item():.4f}, "
|
| 392 |
+
f"RGB: {loss_components['rgb'].item():.4f}, "
|
| 393 |
+
f"Mask: {self.loss_weights['mask'] * loss_components['mask'].item():.4f}, "
|
| 394 |
+
f"Flow: {self.loss_weights['flow'] * loss_components['flow'].item():.4f}, "
|
| 395 |
+
f"Proj_joint: {self.loss_weights['proj_joint'] * loss_components['proj_joint'].item():.4f}, "
|
| 396 |
+
f"Proj_vert: {self.loss_weights['proj_vert'] * loss_components['proj_vert'].item():.4f}, "
|
| 397 |
+
f"Depth: {self.loss_weights['depth'] * loss_components['depth'].item():.4f}, "
|
| 398 |
+
f"Smooth: {self.args.smooth_weight * total_smoothness_loss.item():.4f}, "
|
| 399 |
+
f"Joint smooth: {self.args.coherence_weight * joint_coherence_loss.item():.4f}, "
|
| 400 |
+
f"Root smooth: {self.args.root_smooth_weight * root_smooth_loss.item():.4f}"
|
| 401 |
+
)
|
| 402 |
+
pbar.set_description(loss_desc)
|
| 403 |
+
|
| 404 |
+
if iteration % 5 == 0:
|
| 405 |
+
self.logger.info(f"Iter {iteration}: {loss_desc}")
|
| 406 |
+
|
| 407 |
+
# Adaptive reinitialization
|
| 408 |
+
current_loss = total_loss.item()
|
| 409 |
+
if current_loss < best_loss:
|
| 410 |
+
best_loss = current_loss
|
| 411 |
+
best_params = {
|
| 412 |
+
'quats': quats_to_optimize.clone().detach(),
|
| 413 |
+
'global_quats': global_quats.clone().detach(),
|
| 414 |
+
'global_trans': global_trans.clone().detach()
|
| 415 |
+
}
|
| 416 |
+
patience = 0
|
| 417 |
+
elif (current_loss > best_loss * self.loss_divergence_factor or
|
| 418 |
+
patience > self.reinit_patience_threshold * 2):
|
| 419 |
+
# Reinitialize with best parameters
|
| 420 |
+
quats_to_optimize = best_params['quats'].clone().requires_grad_(True)
|
| 421 |
+
global_quats = best_params['global_quats'].clone().requires_grad_(True)
|
| 422 |
+
global_trans = best_params['global_trans'].clone().requires_grad_(True)
|
| 423 |
+
|
| 424 |
+
optimizer, scheduler = self._setup_optimizer_and_scheduler(
|
| 425 |
+
quats_to_optimize, global_quats, global_trans, n_iters
|
| 426 |
+
)
|
| 427 |
+
patience = 0
|
| 428 |
+
self.logger.info(f'Adaptive reset at iteration {iteration} with best loss: {best_loss:.6f}')
|
| 429 |
+
else:
|
| 430 |
+
patience += 1
|
| 431 |
+
|
| 432 |
+
pbar.update(1)
|
| 433 |
+
|
| 434 |
+
pbar.close()
|
| 435 |
+
|
| 436 |
+
# Prepare final results
|
| 437 |
+
quats_final = torch.cat([fixed_quat_0, best_params['quats']], dim=0)
|
| 438 |
+
|
| 439 |
+
# Final normalization
|
| 440 |
+
reshaped = quats_final.reshape(-1, 4)
|
| 441 |
+
norm = torch.norm(reshaped, dim=1, keepdim=True).clamp_min(1e-8)
|
| 442 |
+
quats_final = (reshaped / norm).reshape(batch_size, num_joints, 4)
|
| 443 |
+
|
| 444 |
+
global_quats_final = torch.cat([fixed_global_quat_0, best_params['global_quats']], dim=0)
|
| 445 |
+
global_trans_final = torch.cat([fixed_global_trans_0, best_params['global_trans']], dim=0)
|
| 446 |
+
global_quats_final = global_quats_final / torch.norm(
|
| 447 |
+
global_quats_final, dim=-1, keepdim=True
|
| 448 |
+
).clamp_min(1e-8)
|
| 449 |
+
|
| 450 |
+
return quats_final, global_quats_final, global_trans_final
|
| 451 |
+
|
| 452 |
+
def load_and_prepare_data(args):
|
| 453 |
+
"""Load and prepare all necessary data for optimization."""
|
| 454 |
+
|
| 455 |
+
# Define paths
|
| 456 |
+
base_path = f'{args.input_path}/{args.seq_name}'
|
| 457 |
+
mesh_path = f'{base_path}/objs/mesh.obj'
|
| 458 |
+
rig_path = f'{base_path}/objs/rig.txt'
|
| 459 |
+
img_path = f'{base_path}/imgs'
|
| 460 |
+
flow_dirs = f'{base_path}/flow'
|
| 461 |
+
|
| 462 |
+
# Load model
|
| 463 |
+
model = load_model_from_obj_and_rig(mesh_path, rig_path, device=args.device)
|
| 464 |
+
|
| 465 |
+
# Load images
|
| 466 |
+
img_files = sorted(glob.glob(os.path.join(img_path, "*.png")))
|
| 467 |
+
images = []
|
| 468 |
+
for f in img_files:
|
| 469 |
+
img = Image.open(f).convert("RGBA")
|
| 470 |
+
arr = np.array(img, dtype=np.float32) / 255.0
|
| 471 |
+
t = torch.from_numpy(arr).to(args.device)
|
| 472 |
+
images.append(t)
|
| 473 |
+
|
| 474 |
+
images_batch = torch.stack(images, dim=0)
|
| 475 |
+
|
| 476 |
+
return model, images_batch, flow_dirs, img_path
|
| 477 |
+
|
| 478 |
+
def setup_renderers(args):
|
| 479 |
+
"""Setup multiple renderers for different camera views."""
|
| 480 |
+
|
| 481 |
+
available_views = [
|
| 482 |
+
"front", "back", "left", "right",
|
| 483 |
+
"front_left", "front_right", "back_left", "back_right"
|
| 484 |
+
]
|
| 485 |
+
|
| 486 |
+
if args.main_renderer not in available_views:
|
| 487 |
+
raise ValueError(f"Main renderer '{args.main_renderer}' not found in available cameras: {available_views}")
|
| 488 |
+
|
| 489 |
+
main_cam_config = json.load(open(f"utils/cameras/{args.main_renderer}.json"))
|
| 490 |
+
main_renderer = MeshRenderer3D(args.device, image_size=args.img_size, cam_params=main_cam_config)
|
| 491 |
+
|
| 492 |
+
additional_views = [view.strip() for view in args.additional_renderers.split(',') if view.strip()]
|
| 493 |
+
if len(additional_views) > 3:
|
| 494 |
+
print(f"Warning: Only first 3 additional renderers will be used. Got: {additional_views}")
|
| 495 |
+
additional_views = additional_views[:3]
|
| 496 |
+
|
| 497 |
+
additional_renderers = {}
|
| 498 |
+
for view_name in additional_views:
|
| 499 |
+
if view_name in available_views and view_name != args.main_renderer:
|
| 500 |
+
cam_config = json.load(open(f"utils/cameras/{view_name}.json"))
|
| 501 |
+
renderer = MeshRenderer3D(args.device, image_size=args.img_size, cam_params=cam_config)
|
| 502 |
+
additional_renderers[f"{view_name}_renderer"] = renderer
|
| 503 |
+
elif view_name == args.main_renderer:
|
| 504 |
+
print(f"Warning: '{view_name}' is already the main renderer, skipping...")
|
| 505 |
+
elif view_name not in available_views:
|
| 506 |
+
print(f"Warning: Camera view '{view_name}' not found, skipping...")
|
| 507 |
+
|
| 508 |
+
return main_renderer, additional_renderers
|
| 509 |
+
|
| 510 |
+
def get_parser():
|
| 511 |
+
"""Create argument parser with all configuration options."""
|
| 512 |
+
|
| 513 |
+
parser = argparse.ArgumentParser(description="3D Rigging Optimization")
|
| 514 |
+
|
| 515 |
+
# Training parameters
|
| 516 |
+
training_group = parser.add_argument_group('Training')
|
| 517 |
+
training_group.add_argument("--iter", type=int, default=500, help="Number of training iterations")
|
| 518 |
+
training_group.add_argument("--img_size", type=int, default=512, help="Image resolution")
|
| 519 |
+
training_group.add_argument("--device", type=str, default="cuda:0", help="Device to use")
|
| 520 |
+
training_group.add_argument("--img_fps", type=int, default=15, help="Image frame rate")
|
| 521 |
+
training_group.add_argument('--main_renderer', type=str, default='front', help='Main renderer camera view (default: front)')
|
| 522 |
+
training_group.add_argument('--additional_renderers', type=str, default="back, right, left", help='Additional renderer views (max 3), comma-separated (e.g., "back,left,right"). ')
|
| 523 |
+
|
| 524 |
+
# Learning rates
|
| 525 |
+
lr_group = parser.add_argument_group('Learning Rates')
|
| 526 |
+
lr_group.add_argument("--lr", type=float, default=2e-3, help="Base learning rate")
|
| 527 |
+
lr_group.add_argument("--min_lr", type=float, default=1e-5, help="Minimum learning rate")
|
| 528 |
+
lr_group.add_argument("--warm_lr", type=float, default=1e-5, help="Warmup learning rate")
|
| 529 |
+
|
| 530 |
+
# Loss weights
|
| 531 |
+
loss_group = parser.add_argument_group('Loss Weights')
|
| 532 |
+
loss_group.add_argument("--smooth_weight", type=float, default=0.2)
|
| 533 |
+
loss_group.add_argument("--root_smooth_weight", type=float, default=1.0)
|
| 534 |
+
loss_group.add_argument("--coherence_weight", type=float, default=10)
|
| 535 |
+
loss_group.add_argument("--rgb_wt", type=float, default=1.0, help="RGB loss target ratio (relative importance)")
|
| 536 |
+
loss_group.add_argument("--mask_wt", type=float, default=1.0, help="Mask loss target ratio")
|
| 537 |
+
loss_group.add_argument("--proj_joint_wt", type=float, default=1.5, help="Joint projection loss target ratio")
|
| 538 |
+
loss_group.add_argument("--proj_vert_wt", type=float, default=3.0, help="Point projection loss target ratio")
|
| 539 |
+
loss_group.add_argument("--depth_wt", type=float, default=0.8, help="Depth loss target ratio")
|
| 540 |
+
loss_group.add_argument("--flow_wt", type=float, default=0.8, help="Flow loss target ratio")
|
| 541 |
+
|
| 542 |
+
# Data and output
|
| 543 |
+
data_group = parser.add_argument_group('Data and Output')
|
| 544 |
+
data_group.add_argument("--input_path", type=str, default="inputs")
|
| 545 |
+
data_group.add_argument("--save_path", type=str, default="results")
|
| 546 |
+
data_group.add_argument("--save_name", type=str, default="results")
|
| 547 |
+
data_group.add_argument("--seq_name", type=str, default=None)
|
| 548 |
+
|
| 549 |
+
# Flags
|
| 550 |
+
flag_group = parser.add_argument_group('Flags')
|
| 551 |
+
flag_group.add_argument('--gauss_filter', action='store_true', default=False)
|
| 552 |
+
return parser
|
| 553 |
+
|
| 554 |
+
def main():
|
| 555 |
+
parser = get_parser()
|
| 556 |
+
args = parser.parse_args()
|
| 557 |
+
|
| 558 |
+
# Setup output directory
|
| 559 |
+
out_dir = f'{args.save_path}/{args.seq_name}/{args.save_name}'
|
| 560 |
+
save_args(args, out_dir)
|
| 561 |
+
|
| 562 |
+
# Initialize optimizer
|
| 563 |
+
ani_optimizer = AnimationOptimizer(args, device=args.device)
|
| 564 |
+
|
| 565 |
+
# Setup renderers
|
| 566 |
+
renderer, additional_renderers = setup_renderers(args)
|
| 567 |
+
|
| 568 |
+
# Load and prepare data
|
| 569 |
+
model, images_batch, flow_dirs, img_path = load_and_prepare_data(args)
|
| 570 |
+
|
| 571 |
+
# Setup tracking
|
| 572 |
+
joint_vis_mask = visualize_joints_on_mesh(model, renderer, args.seq_name, out_dir=out_dir)
|
| 573 |
+
joint_vis_mask = torch.from_numpy(joint_vis_mask).float().to(args.device)
|
| 574 |
+
|
| 575 |
+
joint_project_2d = renderer.project_points(model.joints_rest)
|
| 576 |
+
|
| 577 |
+
# Setup track paths
|
| 578 |
+
track_2d_path = img_path.replace('imgs', 'track_2d_joints')
|
| 579 |
+
os.makedirs(track_2d_path, exist_ok=True)
|
| 580 |
+
|
| 581 |
+
# Load or generate tracks
|
| 582 |
+
if not os.listdir(track_2d_path):
|
| 583 |
+
print("Generating joint tracks")
|
| 584 |
+
tracked_joints_2d = save_track(args.seq_name, joint_project_2d, img_path, track_2d_path, out_dir)
|
| 585 |
+
else:
|
| 586 |
+
print("Loading existing joint tracks")
|
| 587 |
+
tracked_joints_2d = np.load(f'{track_2d_path}/pred_tracks.npy')
|
| 588 |
+
|
| 589 |
+
# Setup point tracking
|
| 590 |
+
vert_vis_mask = visualize_points_on_mesh(model, renderer, args.seq_name, out_dir=out_dir)
|
| 591 |
+
vert_vis_mask = torch.from_numpy(vert_vis_mask).float().to(args.device)
|
| 592 |
+
|
| 593 |
+
track_verts_2d, track_indices, sampled_vertex_indices = save_track_points(
|
| 594 |
+
vert_vis_mask, renderer, model, img_path, out_dir, args
|
| 595 |
+
)
|
| 596 |
+
vert_vis_mask = vert_vis_mask[sampled_vertex_indices]
|
| 597 |
+
|
| 598 |
+
# Run optimization
|
| 599 |
+
print(f"Starting optimization")
|
| 600 |
+
final_quats, root_quats, root_pos = ani_optimizer.optimization(
|
| 601 |
+
images_batch=images_batch,
|
| 602 |
+
model=model,
|
| 603 |
+
renderer=renderer,
|
| 604 |
+
tracked_joints_2d=tracked_joints_2d,
|
| 605 |
+
joint_vis_mask=joint_vis_mask,
|
| 606 |
+
track_verts_2d=track_verts_2d,
|
| 607 |
+
vert_vis_mask=vert_vis_mask,
|
| 608 |
+
sampled_vertex_indices=sampled_vertex_indices,
|
| 609 |
+
track_indices=track_indices,
|
| 610 |
+
flow_dirs=flow_dirs,
|
| 611 |
+
n_iters=args.iter,
|
| 612 |
+
out_dir=out_dir
|
| 613 |
+
)
|
| 614 |
+
|
| 615 |
+
# Save results
|
| 616 |
+
save_and_smooth_results(
|
| 617 |
+
args, model, renderer, final_quats, root_quats, root_pos,
|
| 618 |
+
out_dir, additional_renderers, fps=10
|
| 619 |
+
)
|
| 620 |
+
|
| 621 |
+
print("Optimization completed successfully")
|
| 622 |
+
save_final_video(args)
|
| 623 |
+
|
| 624 |
+
|
| 625 |
+
if __name__ == "__main__":
|
| 626 |
+
main()
|
third_party/Puppeteer/animation/renderer.py
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
import torch
|
| 17 |
+
import cv2
|
| 18 |
+
|
| 19 |
+
from pytorch3d.structures import join_meshes_as_scene, join_meshes_as_batch, Meshes
|
| 20 |
+
from pytorch3d.renderer import (
|
| 21 |
+
FoVPerspectiveCameras, look_at_view_transform,
|
| 22 |
+
RasterizationSettings, MeshRenderer, MeshRasterizer,
|
| 23 |
+
SoftPhongShader, PointLights, BlendParams, SoftSilhouetteShader
|
| 24 |
+
)
|
| 25 |
+
from utils.loss_utils import compute_visibility_mask_igl
|
| 26 |
+
|
| 27 |
+
def create_camera_from_blender_params(cam_params, device):
|
| 28 |
+
"""
|
| 29 |
+
Convert Blender camera parameters to PyTorch3D camera
|
| 30 |
+
|
| 31 |
+
Args:
|
| 32 |
+
cam_params (dict): Camera parameters from Blender JSON
|
| 33 |
+
device: Device to create camera on
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
FoVPerspectiveCameras: Converted camera
|
| 37 |
+
"""
|
| 38 |
+
# Extract matrix world and convert to rotation and translation
|
| 39 |
+
matrix_world = torch.tensor(cam_params['matrix_world'], dtype=torch.float32)
|
| 40 |
+
|
| 41 |
+
# Extract field of view (use x_fov, assuming symmetric FOV)
|
| 42 |
+
fov = cam_params['x_fov'] * 180 / np.pi # Convert radians to degrees
|
| 43 |
+
|
| 44 |
+
rotation_matrix = torch.tensor([
|
| 45 |
+
[1, 0, 0, 0],
|
| 46 |
+
[0, 0, 1, 0],
|
| 47 |
+
[0, -1, 0, 0],
|
| 48 |
+
[0, 0, 0, 1]
|
| 49 |
+
], dtype=torch.float32)
|
| 50 |
+
|
| 51 |
+
# Apply transformations
|
| 52 |
+
adjusted_matrix = rotation_matrix @ matrix_world
|
| 53 |
+
world2cam_matrix_tensor = torch.linalg.inv(adjusted_matrix)
|
| 54 |
+
|
| 55 |
+
aligned_matrix = torch.tensor([
|
| 56 |
+
[-1.0, 0.0, 0.0, 0.0],
|
| 57 |
+
[0.0, 1.0, 0.0, 0.0],
|
| 58 |
+
[0.0, 0.0, -1.0, 0.0],
|
| 59 |
+
[0.0, 0.0, 0.0, 1.0]
|
| 60 |
+
], dtype=torch.float32, device=device)
|
| 61 |
+
world2cam_matrix = aligned_matrix @ world2cam_matrix_tensor.to(device)
|
| 62 |
+
cam2world_matrix = torch.linalg.inv(world2cam_matrix)
|
| 63 |
+
|
| 64 |
+
# Extract rotation and translation
|
| 65 |
+
R = cam2world_matrix[:3, :3]
|
| 66 |
+
T = torch.tensor([
|
| 67 |
+
world2cam_matrix[0, 3],
|
| 68 |
+
world2cam_matrix[1, 3],
|
| 69 |
+
world2cam_matrix[2, 3]
|
| 70 |
+
], device=device, dtype=torch.float32)
|
| 71 |
+
|
| 72 |
+
return FoVPerspectiveCameras(
|
| 73 |
+
device=device,
|
| 74 |
+
fov=fov,
|
| 75 |
+
R=R[None],
|
| 76 |
+
T=T[None],
|
| 77 |
+
znear=0.1,
|
| 78 |
+
zfar=100.0
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
class MeshRenderer3D:
|
| 82 |
+
"""
|
| 83 |
+
PyTorch3D mesh renderer with support for various rendering modes.
|
| 84 |
+
|
| 85 |
+
Features:
|
| 86 |
+
- Standard mesh rendering with Phong shading
|
| 87 |
+
- Silhouette rendering
|
| 88 |
+
- Multi-frame batch rendering
|
| 89 |
+
- Point projection with visibility computation
|
| 90 |
+
"""
|
| 91 |
+
def __init__(self, device, image_size=1024, cam_params=None, light_params=None, raster_params=None):
|
| 92 |
+
self.device = device
|
| 93 |
+
# Initialize camera
|
| 94 |
+
self.camera = self._setup_camera(cam_params)
|
| 95 |
+
|
| 96 |
+
# Initialize light
|
| 97 |
+
self.light = self._setup_light(light_params)
|
| 98 |
+
|
| 99 |
+
# Initialize rasterization settings
|
| 100 |
+
self.raster_settings = self._setup_raster_settings(raster_params, image_size)
|
| 101 |
+
self.camera.image_size = self.raster_settings.image_size
|
| 102 |
+
|
| 103 |
+
# Initialize renderers
|
| 104 |
+
self._setup_renderers()
|
| 105 |
+
|
| 106 |
+
def _setup_camera(self, cam_params):
|
| 107 |
+
"""Setup camera based on parameters."""
|
| 108 |
+
if cam_params is None:
|
| 109 |
+
# Default camera
|
| 110 |
+
R, T = look_at_view_transform(3.0, 30, 20, at=[[0.0, 1.0, 0.0]])
|
| 111 |
+
return FoVPerspectiveCameras(device=self.device, R=R, T=T)
|
| 112 |
+
|
| 113 |
+
# Check if Blender parameters
|
| 114 |
+
if "matrix_world" in cam_params and "x_fov" in cam_params:
|
| 115 |
+
return create_camera_from_blender_params(cam_params, self.device)
|
| 116 |
+
else:
|
| 117 |
+
raise ValueError("Need to provide blender parameters.")
|
| 118 |
+
|
| 119 |
+
def _setup_light(self, light_params):
|
| 120 |
+
"""Setup light source."""
|
| 121 |
+
if light_params is None:
|
| 122 |
+
return PointLights(device=self.device, location=[[0.0, 0.0, 3.0]])
|
| 123 |
+
|
| 124 |
+
location = [[
|
| 125 |
+
light_params.get('light_x', 0.0),
|
| 126 |
+
light_params.get('light_y', 0.0),
|
| 127 |
+
light_params.get('light_z', 3.0)
|
| 128 |
+
]]
|
| 129 |
+
return PointLights(device=self.device, location=location)
|
| 130 |
+
|
| 131 |
+
def _setup_raster_settings(self, raster_params, default_size):
|
| 132 |
+
"""Setup rasterization settings."""
|
| 133 |
+
if raster_params is None:
|
| 134 |
+
raster_params = {
|
| 135 |
+
"image_size": [default_size, default_size],
|
| 136 |
+
"blur_radius": 0.0,
|
| 137 |
+
"faces_per_pixel": 1,
|
| 138 |
+
"bin_size": 0,
|
| 139 |
+
"cull_backfaces": False
|
| 140 |
+
}
|
| 141 |
+
|
| 142 |
+
return RasterizationSettings(**raster_params)
|
| 143 |
+
|
| 144 |
+
def _setup_renderers(self) -> None:
|
| 145 |
+
"""Initialize main and silhouette renderers."""
|
| 146 |
+
rasterizer = MeshRasterizer(
|
| 147 |
+
cameras=self.camera,
|
| 148 |
+
raster_settings=self.raster_settings
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
# Main renderer with Phong shading
|
| 152 |
+
self.renderer = MeshRenderer(
|
| 153 |
+
rasterizer=rasterizer,
|
| 154 |
+
shader=SoftPhongShader(
|
| 155 |
+
device=self.device,
|
| 156 |
+
cameras=self.camera,
|
| 157 |
+
lights=self.light
|
| 158 |
+
)
|
| 159 |
+
)
|
| 160 |
+
|
| 161 |
+
# Silhouette renderer
|
| 162 |
+
blend_params = BlendParams(
|
| 163 |
+
sigma=1e-4,
|
| 164 |
+
gamma=1e-4,
|
| 165 |
+
background_color=(0.0, 0.0, 0.0)
|
| 166 |
+
)
|
| 167 |
+
|
| 168 |
+
self.silhouette_renderer = MeshRenderer(
|
| 169 |
+
rasterizer=rasterizer,
|
| 170 |
+
shader=SoftSilhouetteShader(blend_params=blend_params)
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
def render(self, meshes):
|
| 174 |
+
"""
|
| 175 |
+
Render meshes with Phong shading.
|
| 176 |
+
|
| 177 |
+
Args:
|
| 178 |
+
meshes: Single mesh or list of meshes
|
| 179 |
+
|
| 180 |
+
Returns:
|
| 181 |
+
Rendered images tensor of shape (1, H, W, C)
|
| 182 |
+
"""
|
| 183 |
+
scene_mesh = self._prepare_scene_mesh(meshes)
|
| 184 |
+
return self.renderer(scene_mesh)
|
| 185 |
+
|
| 186 |
+
def render_batch(self, mesh_list):
|
| 187 |
+
"""
|
| 188 |
+
Render multiple frames as a batch.
|
| 189 |
+
|
| 190 |
+
Args:
|
| 191 |
+
mesh_list: List of mesh lists (one per frame)
|
| 192 |
+
|
| 193 |
+
Returns:
|
| 194 |
+
Batch of rendered images of shape (B, H, W, C)
|
| 195 |
+
"""
|
| 196 |
+
assert isinstance(mesh_list, list)
|
| 197 |
+
|
| 198 |
+
batch_meshes = []
|
| 199 |
+
for frame_meshes in mesh_list:
|
| 200 |
+
scene_mesh = self._prepare_scene_mesh(frame_meshes)
|
| 201 |
+
batch_meshes.append(scene_mesh)
|
| 202 |
+
|
| 203 |
+
batch_mesh = join_meshes_as_batch(batch_meshes)
|
| 204 |
+
return self.renderer(batch_mesh)
|
| 205 |
+
|
| 206 |
+
def get_rasterization_fragments(self, mesh_list):
|
| 207 |
+
"""
|
| 208 |
+
Get rasterization fragments for batch of meshes.
|
| 209 |
+
|
| 210 |
+
Args:
|
| 211 |
+
mesh_list: List of mesh lists (one per frame)
|
| 212 |
+
|
| 213 |
+
Returns:
|
| 214 |
+
Rasterization fragments
|
| 215 |
+
"""
|
| 216 |
+
assert isinstance(mesh_list, list)
|
| 217 |
+
|
| 218 |
+
batch_meshes = []
|
| 219 |
+
for frame_meshes in mesh_list:
|
| 220 |
+
scene_mesh = self._prepare_scene_mesh(frame_meshes)
|
| 221 |
+
batch_meshes.append(scene_mesh)
|
| 222 |
+
|
| 223 |
+
batch_mesh = join_meshes_as_batch(batch_meshes)
|
| 224 |
+
return self.renderer.rasterizer(batch_mesh)
|
| 225 |
+
|
| 226 |
+
def render_silhouette_batch(self, mesh_list):
|
| 227 |
+
"""
|
| 228 |
+
Render silhouette masks for multiple frames.
|
| 229 |
+
|
| 230 |
+
Args:
|
| 231 |
+
mesh_list: List of mesh lists (one per frame)
|
| 232 |
+
|
| 233 |
+
Returns:
|
| 234 |
+
Batch of silhouette masks of shape (B, H, W, 1)
|
| 235 |
+
"""
|
| 236 |
+
assert isinstance(mesh_list, list)
|
| 237 |
+
|
| 238 |
+
batch_meshes = []
|
| 239 |
+
for frame_meshes in mesh_list:
|
| 240 |
+
scene_mesh = self._prepare_scene_mesh(frame_meshes)
|
| 241 |
+
batch_meshes.append(scene_mesh)
|
| 242 |
+
|
| 243 |
+
batch_mesh = join_meshes_as_batch(batch_meshes)
|
| 244 |
+
silhouette = self.silhouette_renderer(batch_mesh)
|
| 245 |
+
return silhouette[..., 3:] # Return alpha channel
|
| 246 |
+
|
| 247 |
+
def tensor_to_image(self, tensor):
|
| 248 |
+
"""
|
| 249 |
+
Convert rendered tensor to numpy image array.
|
| 250 |
+
|
| 251 |
+
Args:
|
| 252 |
+
tensor: Rendered tensor of shape (B, H, W, C)
|
| 253 |
+
|
| 254 |
+
Returns:
|
| 255 |
+
Numpy array of shape (H, W, 3) with values in [0, 255]
|
| 256 |
+
"""
|
| 257 |
+
return (tensor[0, ..., :3].cpu().numpy() * 255).astype(np.uint8)
|
| 258 |
+
|
| 259 |
+
def project_points(self, points_3d):
|
| 260 |
+
"""
|
| 261 |
+
Project 3D joints/vertices to 2D image plane
|
| 262 |
+
|
| 263 |
+
Args:
|
| 264 |
+
points_3d: shape (N, 3) or (B, N, 3) tensor of 3D points
|
| 265 |
+
|
| 266 |
+
Returns:
|
| 267 |
+
points_2d: shape (N, 2) or (B, N, 2) tensor of 2D projected points
|
| 268 |
+
"""
|
| 269 |
+
if not torch.is_tensor(points_3d):
|
| 270 |
+
points_3d = torch.tensor(points_3d, device=self.device, dtype=torch.float32)
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
if len(points_3d.shape) == 2:
|
| 274 |
+
points_3d = points_3d.unsqueeze(0) # (1, N, 3)
|
| 275 |
+
|
| 276 |
+
# project points
|
| 277 |
+
projected = self.camera.transform_points_screen(points_3d, image_size=self.raster_settings.image_size)
|
| 278 |
+
|
| 279 |
+
if projected.shape[0] == 1:
|
| 280 |
+
projected_points = projected.squeeze(0)[:, :2]
|
| 281 |
+
else:
|
| 282 |
+
projected_points = projected[:, :, :2]
|
| 283 |
+
return projected_points
|
| 284 |
+
|
| 285 |
+
def render_with_points(self, meshes, points_3d, point_radius=3, for_vertices=False):
|
| 286 |
+
"""
|
| 287 |
+
render the mesh and visualize the joints/vertices on the image
|
| 288 |
+
|
| 289 |
+
Args:
|
| 290 |
+
meshes: mesh or list of meshes to be rendered
|
| 291 |
+
points_3d: shape (N, 3) tensor of 3D joints/vertices
|
| 292 |
+
point_radius: radius of the drawn points
|
| 293 |
+
for_vertices: if True, compute visibility for vertices, else for joints
|
| 294 |
+
|
| 295 |
+
Returns:
|
| 296 |
+
Image with joints/vertices drawn, visibility mask
|
| 297 |
+
"""
|
| 298 |
+
rendered_image = self.render(meshes)
|
| 299 |
+
|
| 300 |
+
# project 3D points to 2D
|
| 301 |
+
points_2d = self.project_points(points_3d)
|
| 302 |
+
|
| 303 |
+
image_np = rendered_image[0, ..., :3].cpu().numpy()
|
| 304 |
+
image_with_points = image_np.copy()
|
| 305 |
+
height, width = image_np.shape[:2]
|
| 306 |
+
|
| 307 |
+
ray_origins = self.camera.get_camera_center() # (B, 3)
|
| 308 |
+
ray_origins = np.tile(ray_origins.detach().cpu().numpy(), (points_3d.shape[0], 1))
|
| 309 |
+
|
| 310 |
+
verts = meshes.verts_packed().detach().cpu().numpy()
|
| 311 |
+
faces = meshes.faces_packed().detach().cpu().numpy()
|
| 312 |
+
|
| 313 |
+
ray_dirs = points_3d.detach().cpu().numpy() - ray_origins # calculate ray directions
|
| 314 |
+
distances = np.linalg.norm(ray_dirs, axis=1) # distances from camera to points
|
| 315 |
+
ray_dirs = (ray_dirs.T / distances).T # normalize to unit vectors
|
| 316 |
+
|
| 317 |
+
vis_mask = compute_visibility_mask_igl(ray_origins, ray_dirs, distances, verts, faces, distance_tolerance=1e-6, for_vertices=for_vertices)
|
| 318 |
+
|
| 319 |
+
# draw points
|
| 320 |
+
visible_color=(1, 0, 0) # visible points are red
|
| 321 |
+
invisible_color=(0, 0, 1) # invisible points are blue
|
| 322 |
+
for i, point in enumerate(points_2d):
|
| 323 |
+
x, y = int(point[0].item()), int(point[1].item())
|
| 324 |
+
|
| 325 |
+
if 0 <= x < width and 0 <= y < height:
|
| 326 |
+
point_color = visible_color if vis_mask[i] else invisible_color
|
| 327 |
+
cv2.circle(image_with_points, (x, y), point_radius, point_color, -1)
|
| 328 |
+
|
| 329 |
+
result = torch.from_numpy(image_with_points).to(self.device)
|
| 330 |
+
result = result.unsqueeze(0)
|
| 331 |
+
|
| 332 |
+
if rendered_image.shape[-1] == 4:
|
| 333 |
+
alpha = rendered_image[..., 3:]
|
| 334 |
+
result = torch.cat([result, alpha], dim=-1)
|
| 335 |
+
|
| 336 |
+
return result, vis_mask
|
| 337 |
+
|
| 338 |
+
def _prepare_scene_mesh(self, meshes):
|
| 339 |
+
"""Convert meshes to a single scene mesh."""
|
| 340 |
+
if isinstance(meshes, Meshes):
|
| 341 |
+
return meshes
|
| 342 |
+
elif isinstance(meshes, list):
|
| 343 |
+
return join_meshes_as_scene(meshes)
|
| 344 |
+
else:
|
| 345 |
+
raise ValueError("meshes must be Meshes object or list of Meshes")
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
|
third_party/Puppeteer/animation/utils/cameras/back.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"matrix_world": [[-1.0, -8.742277657347586e-08, -8.742277657347586e-08, 0.0], [-8.742277657347586e-08, 4.371138118131057e-08, 1.0, 2.0], [-8.74227694680485e-08, 1.0, -4.371138828673793e-08, 0.0], [0.0, 0.0, 0.0, 1.0]], "format_version": 6, "max_depth": 5.0, "bbox": [[-0.14632226526737213, -0.15228690207004547, -0.5013949275016785], [0.18149489164352417, 0.24675098061561584, 0.4873228073120117]], "origin": [0.0, 2.0, 0.0], "x_fov": 0.6911112070083618, "y_fov": 0.6911112070083618, "x": [-1.0, -8.742277657347586e-08, -8.74227694680485e-08], "y": [8.742277657347586e-08, -4.371138118131057e-08, -1.0], "z": [8.742277657347586e-08, -1.0, 4.371138828673793e-08]}
|
third_party/Puppeteer/animation/utils/cameras/back_left.json
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"matrix_world": [
|
| 3 |
+
[
|
| 4 |
+
-0.7071067235853873,
|
| 5 |
+
-1.5015359289272112e-08,
|
| 6 |
+
-0.7071068387877032,
|
| 7 |
+
-1.4142136775754064
|
| 8 |
+
],
|
| 9 |
+
[
|
| 10 |
+
-0.7071068387877031,
|
| 11 |
+
-1.1886282763606815e-08,
|
| 12 |
+
0.7071067235853874,
|
| 13 |
+
1.4142134471707748
|
| 14 |
+
],
|
| 15 |
+
[
|
| 16 |
+
-1.9022333375140477e-08,
|
| 17 |
+
1.0,
|
| 18 |
+
-2.2125928034189e-09,
|
| 19 |
+
-4.4251856068378e-09
|
| 20 |
+
],
|
| 21 |
+
[
|
| 22 |
+
0.0,
|
| 23 |
+
0.0,
|
| 24 |
+
0.0,
|
| 25 |
+
1.0
|
| 26 |
+
]
|
| 27 |
+
],
|
| 28 |
+
"format_version": 6,
|
| 29 |
+
"max_depth": 5.0,
|
| 30 |
+
"bbox": [
|
| 31 |
+
[
|
| 32 |
+
-0.14632226526737213,
|
| 33 |
+
-0.15228690207004547,
|
| 34 |
+
-0.5013949275016785
|
| 35 |
+
],
|
| 36 |
+
[
|
| 37 |
+
0.18149489164352417,
|
| 38 |
+
0.24675098061561584,
|
| 39 |
+
0.4873228073120117
|
| 40 |
+
]
|
| 41 |
+
],
|
| 42 |
+
"origin": [
|
| 43 |
+
-1.0,
|
| 44 |
+
1.0,
|
| 45 |
+
0.0
|
| 46 |
+
],
|
| 47 |
+
"x_fov": 0.6911112070083618,
|
| 48 |
+
"y_fov": 0.6911112070083618,
|
| 49 |
+
"x": [
|
| 50 |
+
-0.7071067235853873,
|
| 51 |
+
-1.5015359289272112e-08,
|
| 52 |
+
-0.7071068387877032
|
| 53 |
+
],
|
| 54 |
+
"y": [
|
| 55 |
+
-0.7071068387877031,
|
| 56 |
+
-1.1886282763606815e-08,
|
| 57 |
+
0.7071067235853874
|
| 58 |
+
],
|
| 59 |
+
"z": [
|
| 60 |
+
-1.9022333375140477e-08,
|
| 61 |
+
1.0,
|
| 62 |
+
-2.2125928034189e-09
|
| 63 |
+
]
|
| 64 |
+
}
|
third_party/Puppeteer/animation/utils/cameras/back_right.json
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"matrix_world": [
|
| 3 |
+
[
|
| 4 |
+
-0.7071067854026265,
|
| 5 |
+
-7.240741267677819e-08,
|
| 6 |
+
0.7071067769704649,
|
| 7 |
+
1.4142135539409297
|
| 8 |
+
],
|
| 9 |
+
[
|
| 10 |
+
0.7071067769704653,
|
| 11 |
+
2.4325415404202744e-08,
|
| 12 |
+
0.7071067854026294,
|
| 13 |
+
1.4142135708052588
|
| 14 |
+
],
|
| 15 |
+
[
|
| 16 |
+
-6.840043892397674e-08,
|
| 17 |
+
0.9999999999999971,
|
| 18 |
+
3.399910591950217e-08,
|
| 19 |
+
6.799821183900434e-08
|
| 20 |
+
],
|
| 21 |
+
[
|
| 22 |
+
0.0,
|
| 23 |
+
0.0,
|
| 24 |
+
0.0,
|
| 25 |
+
1.0
|
| 26 |
+
]
|
| 27 |
+
],
|
| 28 |
+
"format_version": 6,
|
| 29 |
+
"max_depth": 5.0,
|
| 30 |
+
"bbox": [
|
| 31 |
+
[
|
| 32 |
+
-0.14632226526737213,
|
| 33 |
+
-0.15228690207004547,
|
| 34 |
+
-0.5013949275016785
|
| 35 |
+
],
|
| 36 |
+
[
|
| 37 |
+
0.18149489164352417,
|
| 38 |
+
0.24675098061561584,
|
| 39 |
+
0.4873228073120117
|
| 40 |
+
]
|
| 41 |
+
],
|
| 42 |
+
"origin": [
|
| 43 |
+
1.0,
|
| 44 |
+
1.0,
|
| 45 |
+
0.0
|
| 46 |
+
],
|
| 47 |
+
"x_fov": 0.6911112070083618,
|
| 48 |
+
"y_fov": 0.6911112070083618,
|
| 49 |
+
"x": [
|
| 50 |
+
-0.7071067854026265,
|
| 51 |
+
-7.240741267677819e-08,
|
| 52 |
+
0.7071067769704649
|
| 53 |
+
],
|
| 54 |
+
"y": [
|
| 55 |
+
0.7071067769704653,
|
| 56 |
+
2.4325415404202744e-08,
|
| 57 |
+
0.7071067854026294
|
| 58 |
+
],
|
| 59 |
+
"z": [
|
| 60 |
+
-6.840043892397674e-08,
|
| 61 |
+
0.9999999999999971,
|
| 62 |
+
3.399910591950217e-08
|
| 63 |
+
]
|
| 64 |
+
}
|
third_party/Puppeteer/animation/utils/cameras/front.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"matrix_world": [[1.0, 0.0, 0.0, 0.0], [0.0, -4.371138828673793e-08, -1.0, -2.0], [0.0, 1.0, -4.371138828673793e-08, 0.0], [0.0, 0.0, 0.0, 1.0]], "format_version": 6, "max_depth": 5.0, "bbox": [[-0.14632226526737213, -0.15228690207004547, -0.5013949275016785], [0.18149489164352417, 0.24675098061561584, 0.4873228073120117]], "origin": [0.0, -2.0, 0.0], "x_fov": 0.6911112070083618, "y_fov": 0.6911112070083618, "x": [1.0, 0.0, 0.0], "y": [-0.0, 4.371138828673793e-08, -1.0], "z": [-0.0, 1.0, 4.371138828673793e-08]}
|
third_party/Puppeteer/animation/utils/cameras/front_left.json
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"matrix_world": [
|
| 3 |
+
[
|
| 4 |
+
0.7071068078790848,
|
| 5 |
+
2.869602372390645e-08,
|
| 6 |
+
-0.7071067544940086,
|
| 7 |
+
-1.4142135089880172
|
| 8 |
+
],
|
| 9 |
+
[
|
| 10 |
+
-0.7071067544940088,
|
| 11 |
+
-6.21956508517485e-09,
|
| 12 |
+
-0.7071068078790852,
|
| 13 |
+
-1.4142136157581704
|
| 14 |
+
],
|
| 15 |
+
[
|
| 16 |
+
-2.468905024866075e-08,
|
| 17 |
+
0.9999999999999996,
|
| 18 |
+
1.589325537842967e-08,
|
| 19 |
+
3.178651075685934e-08
|
| 20 |
+
],
|
| 21 |
+
[
|
| 22 |
+
0.0,
|
| 23 |
+
0.0,
|
| 24 |
+
0.0,
|
| 25 |
+
1.0
|
| 26 |
+
]
|
| 27 |
+
],
|
| 28 |
+
"format_version": 6,
|
| 29 |
+
"max_depth": 5.0,
|
| 30 |
+
"bbox": [
|
| 31 |
+
[
|
| 32 |
+
-0.14632226526737213,
|
| 33 |
+
-0.15228690207004547,
|
| 34 |
+
-0.5013949275016785
|
| 35 |
+
],
|
| 36 |
+
[
|
| 37 |
+
0.18149489164352417,
|
| 38 |
+
0.24675098061561584,
|
| 39 |
+
0.4873228073120117
|
| 40 |
+
]
|
| 41 |
+
],
|
| 42 |
+
"origin": [
|
| 43 |
+
-1.0,
|
| 44 |
+
-1.0,
|
| 45 |
+
0.0
|
| 46 |
+
],
|
| 47 |
+
"x_fov": 0.6911112070083618,
|
| 48 |
+
"y_fov": 0.6911112070083618,
|
| 49 |
+
"x": [
|
| 50 |
+
0.7071068078790848,
|
| 51 |
+
2.869602372390645e-08,
|
| 52 |
+
-0.7071067544940086
|
| 53 |
+
],
|
| 54 |
+
"y": [
|
| 55 |
+
-0.7071067544940088,
|
| 56 |
+
-6.21956508517485e-09,
|
| 57 |
+
-0.7071068078790852
|
| 58 |
+
],
|
| 59 |
+
"z": [
|
| 60 |
+
-2.468905024866075e-08,
|
| 61 |
+
0.9999999999999996,
|
| 62 |
+
1.589325537842967e-08
|
| 63 |
+
]
|
| 64 |
+
}
|
third_party/Puppeteer/animation/utils/cameras/front_right.json
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"matrix_world": [
|
| 3 |
+
[
|
| 4 |
+
0.7071068078790848,
|
| 5 |
+
-2.869602372390645e-08,
|
| 6 |
+
0.7071067544940086,
|
| 7 |
+
1.4142135089880172
|
| 8 |
+
],
|
| 9 |
+
[
|
| 10 |
+
0.7071067544940088,
|
| 11 |
+
-6.21956508517485e-09,
|
| 12 |
+
-0.7071068078790852,
|
| 13 |
+
-1.4142136157581704
|
| 14 |
+
],
|
| 15 |
+
[
|
| 16 |
+
2.468905024866075e-08,
|
| 17 |
+
0.9999999999999996,
|
| 18 |
+
1.589325537842967e-08,
|
| 19 |
+
3.178651075685934e-08
|
| 20 |
+
],
|
| 21 |
+
[
|
| 22 |
+
0.0,
|
| 23 |
+
0.0,
|
| 24 |
+
0.0,
|
| 25 |
+
1.0
|
| 26 |
+
]
|
| 27 |
+
],
|
| 28 |
+
"format_version": 6,
|
| 29 |
+
"max_depth": 5.0,
|
| 30 |
+
"bbox": [
|
| 31 |
+
[
|
| 32 |
+
-0.14632226526737213,
|
| 33 |
+
-0.15228690207004547,
|
| 34 |
+
-0.5013949275016785
|
| 35 |
+
],
|
| 36 |
+
[
|
| 37 |
+
0.18149489164352417,
|
| 38 |
+
0.24675098061561584,
|
| 39 |
+
0.4873228073120117
|
| 40 |
+
]
|
| 41 |
+
],
|
| 42 |
+
"origin": [
|
| 43 |
+
1.0,
|
| 44 |
+
-1.0,
|
| 45 |
+
0.0
|
| 46 |
+
],
|
| 47 |
+
"x_fov": 0.6911112070083618,
|
| 48 |
+
"y_fov": 0.6911112070083618,
|
| 49 |
+
"x": [
|
| 50 |
+
0.7071068078790848,
|
| 51 |
+
-2.869602372390645e-08,
|
| 52 |
+
0.7071067544940086
|
| 53 |
+
],
|
| 54 |
+
"y": [
|
| 55 |
+
0.7071067544940088,
|
| 56 |
+
-6.21956508517485e-09,
|
| 57 |
+
-0.7071068078790852
|
| 58 |
+
],
|
| 59 |
+
"z": [
|
| 60 |
+
2.468905024866075e-08,
|
| 61 |
+
0.9999999999999996,
|
| 62 |
+
1.589325537842967e-08
|
| 63 |
+
]
|
| 64 |
+
}
|
third_party/Puppeteer/animation/utils/cameras/left.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"matrix_world": [[7.549790126404332e-08, 7.549790126404332e-08, -1.0, -2.0], [-1.0, 5.6999328827277325e-15, -7.549790126404332e-08, 0.0], [0.0, 1.0, 7.549790126404332e-08, 0.0], [0.0, 0.0, 0.0, 1.0]], "format_version": 6, "max_depth": 5.0, "bbox": [[-0.14632226526737213, -0.15228690207004547, -0.5013949275016785], [0.18149489164352417, 0.24675098061561584, 0.4873228073120117]], "origin": [-2.0, 0.0, 0.0], "x_fov": 0.6911112070083618, "y_fov": 0.6911112070083618, "x": [7.549790126404332e-08, -1.0, 0.0], "y": [-7.549790126404332e-08, -5.6999328827277325e-15, -1.0], "z": [1.0, 7.549790126404332e-08, -7.549790126404332e-08]}
|
third_party/Puppeteer/animation/utils/cameras/right.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"matrix_world": [[7.549790126404332e-08, -7.549790126404332e-08, 1.0, 2.0], [1.0, 5.6999328827277325e-15, -7.549790126404332e-08, 0.0], [0.0, 1.0, 7.549790126404332e-08, 0.0], [0.0, 0.0, 0.0, 1.0]], "format_version": 6, "max_depth": 5.0, "bbox": [[-0.14632226526737213, -0.15228690207004547, -0.5013949275016785], [0.18149489164352417, 0.24675098061561584, 0.4873228073120117]], "origin": [2.0, 0.0, 0.0], "x_fov": 0.6911112070083618, "y_fov": 0.6911112070083618, "x": [7.549790126404332e-08, 1.0, 0.0], "y": [7.549790126404332e-08, -5.6999328827277325e-15, -1.0], "z": [-1.0, 7.549790126404332e-08, -7.549790126404332e-08]}
|
third_party/Puppeteer/animation/utils/data_loader.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
import numpy as np
|
| 17 |
+
import torch
|
| 18 |
+
import random
|
| 19 |
+
from pytorch3d.io import load_objs_as_meshes, load_obj
|
| 20 |
+
from pytorch3d.renderer import TexturesAtlas
|
| 21 |
+
from pytorch3d.structures import Meshes
|
| 22 |
+
from model import RiggingModel
|
| 23 |
+
|
| 24 |
+
def prepare_depth(depth_path, input_frames, device, depth_model):
|
| 25 |
+
os.makedirs(depth_path, exist_ok=True)
|
| 26 |
+
depth_path = f"{depth_path}/depth_gt_raw.pt"
|
| 27 |
+
if os.path.exists(depth_path):
|
| 28 |
+
print("load GT depth...")
|
| 29 |
+
depth_gt_raw = torch.load(depth_path, map_location=device)
|
| 30 |
+
else:
|
| 31 |
+
print("run VideoDepthAnything and save.")
|
| 32 |
+
with torch.no_grad():
|
| 33 |
+
depth_gt_raw = depth_model.get_depth_maps(input_frames)
|
| 34 |
+
torch.save(depth_gt_raw.cpu(), depth_path)
|
| 35 |
+
depth_gt_raw = depth_gt_raw.to(device)
|
| 36 |
+
return depth_gt_raw
|
| 37 |
+
|
| 38 |
+
def normalize_vertices(verts):
|
| 39 |
+
"""Normalize vertices to a unit cube."""
|
| 40 |
+
vmin, vmax = verts.min(dim=0).values, verts.max(dim=0).values
|
| 41 |
+
center = (vmax + vmin) / 2.0
|
| 42 |
+
scale = (vmax - vmin).max()
|
| 43 |
+
verts_norm = (verts - center) / scale
|
| 44 |
+
return verts_norm, center, scale
|
| 45 |
+
|
| 46 |
+
def build_atlas_texture(obj_path, atlas_size, device):
|
| 47 |
+
"""Load OBJ + materials and bake all textures into a single atlas."""
|
| 48 |
+
verts, faces, aux = load_obj(
|
| 49 |
+
obj_path,
|
| 50 |
+
device=device,
|
| 51 |
+
load_textures=True,
|
| 52 |
+
create_texture_atlas=True,
|
| 53 |
+
texture_atlas_size=atlas_size,
|
| 54 |
+
texture_wrap="repeat",
|
| 55 |
+
)
|
| 56 |
+
atlas = aux.texture_atlas # (F, R, R, 3)
|
| 57 |
+
verts_norm, _, _ = normalize_vertices(verts)
|
| 58 |
+
mesh_atlas = Meshes(
|
| 59 |
+
verts=[verts_norm],
|
| 60 |
+
faces=[faces.verts_idx],
|
| 61 |
+
textures=TexturesAtlas(atlas=[atlas]),
|
| 62 |
+
)
|
| 63 |
+
return mesh_atlas
|
| 64 |
+
|
| 65 |
+
def read_rig_file(file_path):
|
| 66 |
+
"""
|
| 67 |
+
Read rig from txt file, our format is the same as RigNet:
|
| 68 |
+
joints joint_name x y z
|
| 69 |
+
root root_joint_name
|
| 70 |
+
skin vertex_idx joint_name weight joint_name weight ...
|
| 71 |
+
hier parent_joint_name child_joint_name
|
| 72 |
+
"""
|
| 73 |
+
joints = []
|
| 74 |
+
bones = []
|
| 75 |
+
joint_names = []
|
| 76 |
+
|
| 77 |
+
joint_mapping = {}
|
| 78 |
+
joint_index = 0
|
| 79 |
+
|
| 80 |
+
skinning_data = {} # Dictionary to store vertex index -> [(joint_idx, weight), ...]
|
| 81 |
+
|
| 82 |
+
with open(file_path, 'r') as file:
|
| 83 |
+
lines = file.readlines()
|
| 84 |
+
|
| 85 |
+
for line in lines:
|
| 86 |
+
parts = line.split()
|
| 87 |
+
if line.startswith('joints'):
|
| 88 |
+
name = parts[1]
|
| 89 |
+
position = [float(parts[2]), float(parts[3]), float(parts[4])]
|
| 90 |
+
joints.append(position)
|
| 91 |
+
joint_names.append(name)
|
| 92 |
+
joint_mapping[name] = joint_index
|
| 93 |
+
joint_index += 1
|
| 94 |
+
elif line.startswith('hier'):
|
| 95 |
+
parent_joint = joint_mapping[parts[1]]
|
| 96 |
+
child_joint = joint_mapping[parts[2]]
|
| 97 |
+
bones.append([parent_joint, child_joint])
|
| 98 |
+
elif line.startswith('root'):
|
| 99 |
+
root = joint_mapping[parts[1]]
|
| 100 |
+
elif line.startswith('skin'):
|
| 101 |
+
vertex_idx = int(parts[1])
|
| 102 |
+
|
| 103 |
+
if vertex_idx not in skinning_data:
|
| 104 |
+
skinning_data[vertex_idx] = []
|
| 105 |
+
|
| 106 |
+
for i in range(2, len(parts), 2):
|
| 107 |
+
if i+1 < len(parts):
|
| 108 |
+
joint_name = parts[i]
|
| 109 |
+
weight = float(parts[i+1])
|
| 110 |
+
|
| 111 |
+
if joint_name in joint_mapping:
|
| 112 |
+
joint_idx = joint_mapping[joint_name]
|
| 113 |
+
skinning_data[vertex_idx].append((joint_idx, weight))
|
| 114 |
+
|
| 115 |
+
return np.array(joints), np.array(bones), root, joint_names, skinning_data
|
| 116 |
+
|
| 117 |
+
def load_model_from_obj_and_rig(
|
| 118 |
+
mesh_path: str,
|
| 119 |
+
rig_path: str,
|
| 120 |
+
device: str | torch.device = "cuda",
|
| 121 |
+
use_skin_color: bool = True,
|
| 122 |
+
atlas_size: int = 8,
|
| 123 |
+
):
|
| 124 |
+
"""Load a 3D model from OBJ and rig files."""
|
| 125 |
+
|
| 126 |
+
# 1) read raw mesh
|
| 127 |
+
raw_mesh = load_objs_as_meshes([mesh_path], device=device)
|
| 128 |
+
verts_raw = raw_mesh.verts_packed() # (V,3)
|
| 129 |
+
faces_idx = raw_mesh.faces_packed() # (F,3)
|
| 130 |
+
|
| 131 |
+
# 2) read rig data
|
| 132 |
+
joints_np, bones_np, root_idx, joint_names, skinning_data = read_rig_file(rig_path)
|
| 133 |
+
J = joints_np.shape[0]
|
| 134 |
+
|
| 135 |
+
# parent indices, default -1
|
| 136 |
+
parent_idx = [-1] * J
|
| 137 |
+
for p, c in bones_np:
|
| 138 |
+
parent_idx[c] = p
|
| 139 |
+
|
| 140 |
+
verts_norm, center, scale = normalize_vertices(verts_raw)
|
| 141 |
+
joints_t = torch.as_tensor(joints_np, dtype=torch.float32, device=device)
|
| 142 |
+
joints_norm = (joints_t - center) / scale
|
| 143 |
+
|
| 144 |
+
# skin weights tensor (V,J)
|
| 145 |
+
V = verts_raw.shape[0]
|
| 146 |
+
skin_weights = torch.zeros(V, J, dtype=torch.float32, device=device)
|
| 147 |
+
for v_idx, lst in skinning_data.items():
|
| 148 |
+
for j_idx, w in lst:
|
| 149 |
+
skin_weights[v_idx, j_idx] = w
|
| 150 |
+
|
| 151 |
+
# 3) texture strategy
|
| 152 |
+
mesh_norm = build_atlas_texture(mesh_path, atlas_size, device)
|
| 153 |
+
tex = mesh_norm.textures
|
| 154 |
+
|
| 155 |
+
# 4) pack into Model class
|
| 156 |
+
model = RiggingModel(device=device)
|
| 157 |
+
model.vertices = [mesh_norm.verts_packed()]
|
| 158 |
+
model.faces = [faces_idx]
|
| 159 |
+
model.textures = [tex]
|
| 160 |
+
|
| 161 |
+
# rig meta
|
| 162 |
+
model.bones = bones_np # (B,2)
|
| 163 |
+
model.parent_indices = parent_idx
|
| 164 |
+
model.root_index = root_idx
|
| 165 |
+
model.skin_weights = [skin_weights]
|
| 166 |
+
|
| 167 |
+
model.bind_matrices_inv = torch.eye(4, device=device).unsqueeze(0).expand(J, -1, -1).contiguous()
|
| 168 |
+
model.joints_rest = joints_norm
|
| 169 |
+
|
| 170 |
+
return model
|
third_party/Puppeteer/animation/utils/loss_utils.py
ADDED
|
@@ -0,0 +1,420 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from third_partys.Video_Depth_Anything.video_depth_anything.video_depth import VideoDepthAnything
|
| 16 |
+
import torch
|
| 17 |
+
import torch.nn as nn
|
| 18 |
+
import numpy as np
|
| 19 |
+
import igl
|
| 20 |
+
import cv2
|
| 21 |
+
import time
|
| 22 |
+
import torch.nn.functional as F
|
| 23 |
+
from utils.quat_utils import quat_inverse, quat_log, quat_multiply, normalize_quaternion
|
| 24 |
+
from pytorch3d.structures import join_meshes_as_scene, join_meshes_as_batch
|
| 25 |
+
import os
|
| 26 |
+
from pathlib import Path
|
| 27 |
+
|
| 28 |
+
class DepthModule:
|
| 29 |
+
def __init__(self, encoder='vitl', device='cuda', input_size=518, fp32=False):
|
| 30 |
+
"""
|
| 31 |
+
Initialize the depth loss module with Video Depth Anything
|
| 32 |
+
|
| 33 |
+
Args:
|
| 34 |
+
encoder: 'vitl' or 'vits'
|
| 35 |
+
device: device to run the model on
|
| 36 |
+
input_size: input size for the model
|
| 37 |
+
fp32: whether to use float32 for inference
|
| 38 |
+
"""
|
| 39 |
+
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 40 |
+
self.input_size = input_size
|
| 41 |
+
self.fp32 = fp32
|
| 42 |
+
|
| 43 |
+
# Initialize model configuration
|
| 44 |
+
model_configs = {
|
| 45 |
+
'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},
|
| 46 |
+
'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
# Load Video Depth Anything model
|
| 50 |
+
self.video_depth_model = VideoDepthAnything(**model_configs[encoder])
|
| 51 |
+
self.video_depth_model.load_state_dict(
|
| 52 |
+
torch.load(f'./third_partys/Video_Depth_Anything/ckpt/video_depth_anything_{encoder}.pth', map_location='cpu'),
|
| 53 |
+
strict=True
|
| 54 |
+
)
|
| 55 |
+
self.video_depth_model = self.video_depth_model.to(self.device).eval()
|
| 56 |
+
for param in self.video_depth_model.parameters():
|
| 57 |
+
param.requires_grad = False
|
| 58 |
+
|
| 59 |
+
def get_depth_maps(self, frames, target_fps=30):
|
| 60 |
+
"""
|
| 61 |
+
Get depth maps for video frames
|
| 62 |
+
"""
|
| 63 |
+
depths, _ = self.video_depth_model.infer_video_depth(
|
| 64 |
+
frames,
|
| 65 |
+
target_fps,
|
| 66 |
+
input_size=self.input_size,
|
| 67 |
+
device=self.device,
|
| 68 |
+
fp32=self.fp32
|
| 69 |
+
)
|
| 70 |
+
return depths
|
| 71 |
+
|
| 72 |
+
def save_depth_as_images(depth_np, output_dir='./depth_images'):
|
| 73 |
+
"""save depth images"""
|
| 74 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 75 |
+
|
| 76 |
+
for i, depth_map in enumerate(depth_np):
|
| 77 |
+
depth_map = depth_map.detach().cpu().numpy()
|
| 78 |
+
valid_mask = (depth_map > 0)
|
| 79 |
+
if not valid_mask.any():
|
| 80 |
+
continue
|
| 81 |
+
|
| 82 |
+
valid_min = depth_map[valid_mask].min()
|
| 83 |
+
valid_max = depth_map[valid_mask].max()
|
| 84 |
+
|
| 85 |
+
normalized = np.zeros_like(depth_map)
|
| 86 |
+
normalized[valid_mask] = 255.0 * (depth_map[valid_mask] - valid_min) / (valid_max - valid_min)
|
| 87 |
+
|
| 88 |
+
depth_img = normalized.astype(np.uint8)
|
| 89 |
+
|
| 90 |
+
cv2.imwrite(os.path.join(output_dir, f'depth_{i:04d}.png'), depth_img)
|
| 91 |
+
|
| 92 |
+
print(f"Save {len(depth_np)} depth images to {output_dir}")
|
| 93 |
+
|
| 94 |
+
def compute_visibility_mask_igl(ray_origins, ray_dirs, distances, verts, faces, distance_tolerance=1e-6, for_vertices=False):
|
| 95 |
+
"""
|
| 96 |
+
Compute visibility mask using IGL ray-mesh intersection.
|
| 97 |
+
"""
|
| 98 |
+
num_rays = ray_origins.shape[0]
|
| 99 |
+
visibility_mask = np.ones(num_rays, dtype=bool)
|
| 100 |
+
|
| 101 |
+
for i in range(num_rays):
|
| 102 |
+
ray_origin = ray_origins[i].reshape(1, 3)
|
| 103 |
+
ray_dir = ray_dirs[i].reshape(1, 3)
|
| 104 |
+
intersections = igl.ray_mesh_intersect(ray_origin, ray_dir, verts, faces)
|
| 105 |
+
if intersections:
|
| 106 |
+
# Count intersections that occur before the target point
|
| 107 |
+
count = sum(1 for h in intersections if h[4] < distances[i] - distance_tolerance)
|
| 108 |
+
# count=0 → ray completely missed the mesh; count=1 → ray stops exactly at the face containing the joint
|
| 109 |
+
# count>1 → ray was blocked by other faces along the way
|
| 110 |
+
if for_vertices:
|
| 111 |
+
if count != 1:
|
| 112 |
+
visibility_mask[i] = False
|
| 113 |
+
else: # for joints
|
| 114 |
+
if count > 2:
|
| 115 |
+
visibility_mask[i] = False
|
| 116 |
+
|
| 117 |
+
return visibility_mask
|
| 118 |
+
|
| 119 |
+
def compute_reprojection_loss(renderer, vis_mask, predicted_joints, tracked_joints_2d, image_size):
|
| 120 |
+
"""
|
| 121 |
+
Compute reprojection loss between predicted 3D points and tracked 2D points.
|
| 122 |
+
"""
|
| 123 |
+
if predicted_joints.dim() != 3:
|
| 124 |
+
raise ValueError(f"predicted_joints must be 3D tensor, got shape {predicted_joints.shape}")
|
| 125 |
+
|
| 126 |
+
B, J, _ = predicted_joints.shape
|
| 127 |
+
device = predicted_joints.device
|
| 128 |
+
|
| 129 |
+
# Project 3D joints to 2D screen coordinates
|
| 130 |
+
projected = renderer.camera.transform_points_screen(
|
| 131 |
+
predicted_joints,
|
| 132 |
+
image_size=[image_size, image_size]
|
| 133 |
+
)
|
| 134 |
+
projected_2d = projected[..., :2] # (B, J, 2)
|
| 135 |
+
|
| 136 |
+
# Convert and validate tracked joints
|
| 137 |
+
if not isinstance(tracked_joints_2d, torch.Tensor):
|
| 138 |
+
tracked_joints_2d = torch.from_numpy(tracked_joints_2d).float()
|
| 139 |
+
tracked_joints_2d = tracked_joints_2d.to(device)
|
| 140 |
+
|
| 141 |
+
if tracked_joints_2d.dim() == 2:
|
| 142 |
+
tracked_joints_2d = tracked_joints_2d.unsqueeze(0).expand(B, -1, -1)
|
| 143 |
+
|
| 144 |
+
vis_mask = vis_mask.to(device).float()
|
| 145 |
+
|
| 146 |
+
num_visible = vis_mask.sum()
|
| 147 |
+
if num_visible == 0:
|
| 148 |
+
# No visible joints - return zero loss
|
| 149 |
+
return torch.tensor(0.0, device=device, requires_grad=True)
|
| 150 |
+
|
| 151 |
+
squared_diff = (projected_2d - tracked_joints_2d).pow(2).sum(dim=-1) # (B, J)
|
| 152 |
+
|
| 153 |
+
vis_mask_expanded = vis_mask.unsqueeze(0) # (1, J)
|
| 154 |
+
masked_loss = squared_diff * vis_mask_expanded # (B, J)
|
| 155 |
+
per_frame_loss = masked_loss.sum(dim=1) / num_visible # (B,)
|
| 156 |
+
final_loss = per_frame_loss.mean() # scalar
|
| 157 |
+
|
| 158 |
+
return final_loss
|
| 159 |
+
|
| 160 |
+
def geodesic_loss(q1, q2, eps=1e-6):
|
| 161 |
+
"""
|
| 162 |
+
Compute geodesic distance loss between batches of quaternions for rot smooth loss.
|
| 163 |
+
"""
|
| 164 |
+
q1_norm = normalize_quaternion(q1, eps=eps)
|
| 165 |
+
q2_norm = normalize_quaternion(q2, eps=eps)
|
| 166 |
+
|
| 167 |
+
dot_product = (q1_norm * q2_norm).sum(dim=-1, keepdim=True)
|
| 168 |
+
q2_corrected = torch.where(dot_product < 0, -q2_norm, q2_norm)
|
| 169 |
+
inner_product = (q1_norm * q2_corrected).sum(dim=-1)
|
| 170 |
+
|
| 171 |
+
# Clamp to valid range for arccos to avoid numerical issues
|
| 172 |
+
inner_product_clamped = torch.clamp(inner_product, min=-1.0 + eps, max=1.0 - eps)
|
| 173 |
+
theta = 2.0 * torch.arccos(torch.abs(inner_product_clamped))
|
| 174 |
+
|
| 175 |
+
return theta
|
| 176 |
+
|
| 177 |
+
def root_motion_reg(root_quats, root_pos):
|
| 178 |
+
return ((root_pos[1:] - root_pos[:-1])**2).mean(), (geodesic_loss(root_quats[1:], root_quats[:-1])**2).mean()
|
| 179 |
+
|
| 180 |
+
def joint_motion_coherence(quats_normed, parent_idx):
|
| 181 |
+
"""
|
| 182 |
+
Compute joint motion coherence loss to enforce smooth relative motion between parent-child joints.
|
| 183 |
+
"""
|
| 184 |
+
coherence_loss = 0
|
| 185 |
+
|
| 186 |
+
for j, parent in enumerate(parent_idx):
|
| 187 |
+
if parent != -1: # Skip root joint
|
| 188 |
+
parent_rot = quats_normed[:, parent] # (T, 4)
|
| 189 |
+
child_rot = quats_normed[:, j] # (T, 4)
|
| 190 |
+
|
| 191 |
+
# Compute relative rotation of child w.r.t. parent's local frame
|
| 192 |
+
# local_rot = parent_rot^(-1) * child_rot
|
| 193 |
+
local_rot = quat_multiply(quat_inverse(parent_rot), child_rot)
|
| 194 |
+
local_rot_velocity = local_rot[1:] - local_rot[:-1] # (T-1, 4)
|
| 195 |
+
|
| 196 |
+
coherence_loss += local_rot_velocity.pow(2).mean()
|
| 197 |
+
|
| 198 |
+
return coherence_loss
|
| 199 |
+
|
| 200 |
+
def read_flo_file(file_path):
|
| 201 |
+
"""
|
| 202 |
+
Read optical flow from .flo format file.
|
| 203 |
+
"""
|
| 204 |
+
with open(file_path, 'rb') as f:
|
| 205 |
+
magic = np.fromfile(f, np.float32, count=1)
|
| 206 |
+
if len(magic) == 0 or magic[0] != 202021.25:
|
| 207 |
+
raise ValueError(f'Invalid .flo file format: magic number {magic}')
|
| 208 |
+
|
| 209 |
+
w = np.fromfile(f, np.int32, count=1)[0]
|
| 210 |
+
h = np.fromfile(f, np.int32, count=1)[0]
|
| 211 |
+
data = np.fromfile(f, np.float32, count=2*w*h)
|
| 212 |
+
flow = data.reshape(h, w, 2)
|
| 213 |
+
return flow
|
| 214 |
+
|
| 215 |
+
def load_optical_flows(flow_dir, num_frames):
|
| 216 |
+
"""
|
| 217 |
+
Load sequence of optical flow files.
|
| 218 |
+
"""
|
| 219 |
+
flow_dir = Path(flow_dir)
|
| 220 |
+
flows = []
|
| 221 |
+
|
| 222 |
+
for i in range(num_frames - 1):
|
| 223 |
+
flow_path = flow_dir / f'flow_{i:04d}.flo'
|
| 224 |
+
if flow_path.exists():
|
| 225 |
+
flow = read_flo_file(flow_path)
|
| 226 |
+
flows.append(flow)
|
| 227 |
+
else:
|
| 228 |
+
raise ValueError("No flow files found")
|
| 229 |
+
|
| 230 |
+
return np.stack(flows, axis=0)
|
| 231 |
+
|
| 232 |
+
def rasterize_vertex_flow(flow_vertices, meshes, faces, image_size, renderer, eps = 1e-8):
|
| 233 |
+
"""
|
| 234 |
+
Rasterize per-vertex flow to dense flow field using barycentric interpolation.
|
| 235 |
+
"""
|
| 236 |
+
B, V, _ = flow_vertices.shape
|
| 237 |
+
device = flow_vertices.device
|
| 238 |
+
|
| 239 |
+
if isinstance(image_size, int):
|
| 240 |
+
H = W = image_size
|
| 241 |
+
else:
|
| 242 |
+
H, W = image_size
|
| 243 |
+
|
| 244 |
+
batch_meshes = join_meshes_as_batch([join_meshes_as_scene(m) for m in meshes]).to(device)
|
| 245 |
+
fragments = renderer.renderer.rasterizer(batch_meshes)
|
| 246 |
+
|
| 247 |
+
pix_to_face = fragments.pix_to_face # (B, H, W, K)
|
| 248 |
+
bary_coords = fragments.bary_coords # (B, H, W, K, 3)
|
| 249 |
+
|
| 250 |
+
flow_scene_list = []
|
| 251 |
+
for mesh_idx in range(B):
|
| 252 |
+
mesh = meshes[mesh_idx]
|
| 253 |
+
V_mesh = mesh.verts_packed().shape[0]
|
| 254 |
+
|
| 255 |
+
if V_mesh > flow_vertices.shape[1]:
|
| 256 |
+
raise ValueError(f"Mesh {mesh_idx} has {V_mesh} vertices but flow has {flow_vertices.shape[1]}")
|
| 257 |
+
|
| 258 |
+
flow_scene_list.append(flow_vertices[mesh_idx, :V_mesh])
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
flow_vertices_scene = torch.cat(flow_scene_list, dim=0).to(device)
|
| 262 |
+
faces_scene = batch_meshes.faces_packed()
|
| 263 |
+
|
| 264 |
+
flow_pred = torch.zeros(B, H, W, 2, device=device)
|
| 265 |
+
valid = pix_to_face[..., 0] >= 0
|
| 266 |
+
|
| 267 |
+
for b in range(B):
|
| 268 |
+
b_valid = valid[b] # (H,W)
|
| 269 |
+
if torch.count_nonzero(b_valid) == 0:
|
| 270 |
+
print(f"No valid pixels found for batch {b}")
|
| 271 |
+
continue
|
| 272 |
+
|
| 273 |
+
valid_indices = torch.nonzero(b_valid, as_tuple=True)
|
| 274 |
+
h_indices, w_indices = valid_indices
|
| 275 |
+
|
| 276 |
+
face_idxs = pix_to_face[b, h_indices, w_indices, 0] # (N,)
|
| 277 |
+
bary = bary_coords[b, h_indices, w_indices, 0] # (N,3)
|
| 278 |
+
|
| 279 |
+
max_face_idx = faces_scene.shape[0] - 1
|
| 280 |
+
if face_idxs.max() > max_face_idx:
|
| 281 |
+
raise RuntimeError(f"Face index {face_idxs.max()} exceeds max {max_face_idx}")
|
| 282 |
+
|
| 283 |
+
face_verts = faces_scene[face_idxs] # (N, 3)
|
| 284 |
+
f0, f1, f2 = face_verts.unbind(-1) # Each (N,)
|
| 285 |
+
|
| 286 |
+
max_vert_idx = flow_vertices_scene.shape[0] - 1
|
| 287 |
+
if max(f0.max(), f1.max(), f2.max()) > max_vert_idx:
|
| 288 |
+
raise RuntimeError(f"Vertex index exceeds flow_vertices_scene size {max_vert_idx}")
|
| 289 |
+
|
| 290 |
+
v0_flow = flow_vertices_scene[f0] # (N, 2)
|
| 291 |
+
v1_flow = flow_vertices_scene[f1] # (N, 2)
|
| 292 |
+
v2_flow = flow_vertices_scene[f2] # (N, 2)
|
| 293 |
+
|
| 294 |
+
# Interpolate using barycentric coordinates
|
| 295 |
+
b0, b1, b2 = bary.unbind(-1) # Each (N,)
|
| 296 |
+
|
| 297 |
+
# Ensure barycentric coordinates sum to 1 (numerical stability)
|
| 298 |
+
bary_sum = b0 + b1 + b2
|
| 299 |
+
b0 = b0 / (bary_sum + eps)
|
| 300 |
+
b1 = b1 / (bary_sum + eps)
|
| 301 |
+
b2 = b2 / (bary_sum + eps)
|
| 302 |
+
|
| 303 |
+
flow_interpolated = (
|
| 304 |
+
b0.unsqueeze(-1) * v0_flow +
|
| 305 |
+
b1.unsqueeze(-1) * v1_flow +
|
| 306 |
+
b2.unsqueeze(-1) * v2_flow
|
| 307 |
+
) # (N, 2)
|
| 308 |
+
|
| 309 |
+
# Update flow prediction
|
| 310 |
+
flow_pred[b, h_indices, w_indices] = flow_interpolated
|
| 311 |
+
|
| 312 |
+
return flow_pred
|
| 313 |
+
|
| 314 |
+
def calculate_flow_loss(flow_dir, device, mask, renderer, model):
|
| 315 |
+
"""
|
| 316 |
+
Calculate optical flow loss with improved error handling and flexibility.
|
| 317 |
+
"""
|
| 318 |
+
if device is None:
|
| 319 |
+
device = mask.device
|
| 320 |
+
|
| 321 |
+
T = mask.shape[0]
|
| 322 |
+
H, W = mask.shape[1:3]
|
| 323 |
+
|
| 324 |
+
if mask.shape[0] == T:
|
| 325 |
+
flow_mask = mask[1:] # Use frames 1 to T-1
|
| 326 |
+
else:
|
| 327 |
+
flow_mask = mask
|
| 328 |
+
|
| 329 |
+
flows_np = load_optical_flows(flow_dir, T)
|
| 330 |
+
flow_gt = torch.from_numpy(flows_np).float().to(device) # [T-1, H, W, 2]
|
| 331 |
+
|
| 332 |
+
vertices = model.deformed_vertices[0] # (T,V,3)
|
| 333 |
+
# Project vertices to get 2D flow
|
| 334 |
+
proj_t = renderer.project_points(vertices[:-1]) # (T-1,V,2) in pixels
|
| 335 |
+
proj_tp = renderer.project_points(vertices[1:])
|
| 336 |
+
vertex_flow = proj_tp - proj_t # (T-1,V,2) Δx,Δy
|
| 337 |
+
|
| 338 |
+
meshes = [model.get_mesh(t) for t in range(T)]
|
| 339 |
+
flow_pred = rasterize_vertex_flow(vertex_flow, meshes, model.faces[0], (H,W), renderer) # (B,H,W,2)
|
| 340 |
+
|
| 341 |
+
eps = 1e-3
|
| 342 |
+
diff = (flow_pred - flow_gt) * flow_mask.unsqueeze(-1) # (T-1, H, W, 2)
|
| 343 |
+
loss = torch.sqrt(diff.pow(2).sum(dim=-1) + eps**2) # Charbonnier loss
|
| 344 |
+
loss = loss.sum() / (flow_mask.sum() + 1e-6)
|
| 345 |
+
|
| 346 |
+
return loss
|
| 347 |
+
|
| 348 |
+
def normalize_depth_from_reference(depth_maps, reference_idx=0, invalid_value=-1.0, invert=False, eps = 1e-8):
|
| 349 |
+
"""
|
| 350 |
+
Normalize depth maps based on a reference frame with improved robustness.
|
| 351 |
+
"""
|
| 352 |
+
if depth_maps.dim() != 3:
|
| 353 |
+
raise ValueError(f"Expected depth_maps with 3 dimensions, got {depth_maps.dim()}")
|
| 354 |
+
|
| 355 |
+
T, H, W = depth_maps.shape
|
| 356 |
+
device = depth_maps.device
|
| 357 |
+
|
| 358 |
+
reference_depth = depth_maps[reference_idx]
|
| 359 |
+
valid_mask = (
|
| 360 |
+
(reference_depth != invalid_value) &
|
| 361 |
+
(reference_depth > 1e-8) & # Avoid very small positive values
|
| 362 |
+
torch.isfinite(reference_depth) # Exclude inf/nan
|
| 363 |
+
)
|
| 364 |
+
|
| 365 |
+
valid_values = reference_depth[valid_mask]
|
| 366 |
+
min_depth = torch.quantile(valid_values, 0.01) # 1st percentile
|
| 367 |
+
max_depth = torch.quantile(valid_values, 0.99) # 99th percentile
|
| 368 |
+
|
| 369 |
+
depth_range = max_depth - min_depth
|
| 370 |
+
if depth_range < eps:
|
| 371 |
+
logger.warning(f"Very small depth range ({depth_range:.6f}), using fallback normalization")
|
| 372 |
+
min_depth = valid_values.min()
|
| 373 |
+
max_depth = valid_values.max()
|
| 374 |
+
depth_range = max(max_depth - min_depth, eps)
|
| 375 |
+
|
| 376 |
+
scale = 1.0 / (max_depth - min_depth)
|
| 377 |
+
offset = -min_depth * scale
|
| 378 |
+
|
| 379 |
+
all_valid_mask = (
|
| 380 |
+
(depth_maps != invalid_value) &
|
| 381 |
+
(depth_maps > eps) &
|
| 382 |
+
torch.isfinite(depth_maps)
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
normalized_depths = torch.full_like(depth_maps, invalid_value)
|
| 386 |
+
|
| 387 |
+
if all_valid_mask.any():
|
| 388 |
+
normalized_values = depth_maps[all_valid_mask] * scale + offset
|
| 389 |
+
|
| 390 |
+
if invert:
|
| 391 |
+
normalized_values = 1.0 - normalized_values
|
| 392 |
+
|
| 393 |
+
normalized_depths[all_valid_mask] = normalized_values
|
| 394 |
+
|
| 395 |
+
return normalized_depths, scale.item(), offset.item()
|
| 396 |
+
|
| 397 |
+
def compute_depth_loss_normalized(mono_depths, zbuf_depths, mask):
|
| 398 |
+
"""
|
| 399 |
+
Compute normalized depth loss.
|
| 400 |
+
"""
|
| 401 |
+
device = zbuf_depths.device
|
| 402 |
+
# Normalize both depth types
|
| 403 |
+
zbuf_norm, z_scale, z_offset = normalize_depth_from_reference(zbuf_depths)
|
| 404 |
+
mono_norm, m_scale, m_offset = normalize_depth_from_reference(mono_depths, invert=True)
|
| 405 |
+
|
| 406 |
+
valid_zbuf = (zbuf_norm >= 0) & (zbuf_norm <= 1)
|
| 407 |
+
valid_mono = (mono_norm >= 0) & (mono_norm <= 1)
|
| 408 |
+
if mask.dtype != torch.bool:
|
| 409 |
+
mask = mask > 0.5
|
| 410 |
+
combined_mask = mask & valid_zbuf & valid_mono
|
| 411 |
+
|
| 412 |
+
num_valid = combined_mask.sum().item()
|
| 413 |
+
if num_valid == 0:
|
| 414 |
+
print("No valid pixels for depth loss computation")
|
| 415 |
+
return torch.tensor(0.0, device=device, requires_grad=True)
|
| 416 |
+
|
| 417 |
+
depth_diff = (zbuf_norm - mono_norm) * combined_mask.float()
|
| 418 |
+
loss = (depth_diff**2).sum() / num_valid
|
| 419 |
+
|
| 420 |
+
return loss
|
third_party/Puppeteer/animation/utils/misc.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from torch.optim.lr_scheduler import LambdaLR
|
| 16 |
+
|
| 17 |
+
def warmup_then_decay(optimizer, total_steps, warmup_steps, max_lr=1e-3, min_lr=1e-5, base_lr=1e-5):
|
| 18 |
+
"""
|
| 19 |
+
Create a learning rate scheduler with warmup followed by decay.
|
| 20 |
+
"""
|
| 21 |
+
def lr_lambda(current_step):
|
| 22 |
+
if current_step < warmup_steps:
|
| 23 |
+
# warmup: min_lr -> max_lr
|
| 24 |
+
progress = float(current_step) / float(max(1, warmup_steps))
|
| 25 |
+
# LR(t) = min_lr + (max_lr - min_lr)*progress
|
| 26 |
+
return (min_lr + (max_lr - min_lr)*progress) / base_lr
|
| 27 |
+
else:
|
| 28 |
+
# decay: warmup_steps -> total_steps
|
| 29 |
+
progress = float(current_step - warmup_steps) / float(max(1, total_steps - warmup_steps))
|
| 30 |
+
# LR(t) = max_lr + (min_lr - max_lr)*progress
|
| 31 |
+
return (max_lr + (min_lr - max_lr)*progress) / base_lr
|
| 32 |
+
|
| 33 |
+
scheduler = LambdaLR(optimizer, lr_lambda)
|
| 34 |
+
return scheduler
|
third_party/Puppeteer/animation/utils/quat_utils.py
ADDED
|
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
from typing import List, Tuple, Optional
|
| 17 |
+
|
| 18 |
+
EPS = 1e-8
|
| 19 |
+
|
| 20 |
+
def normalize_quaternion(quat: torch.Tensor, eps: float = EPS) -> torch.Tensor:
|
| 21 |
+
"""
|
| 22 |
+
Normalize quaternions to unit length.
|
| 23 |
+
|
| 24 |
+
Args:
|
| 25 |
+
quat: Quaternion tensor of shape (..., 4) with (w, x, y, z) format
|
| 26 |
+
eps: Small value for numerical stability
|
| 27 |
+
|
| 28 |
+
Returns:
|
| 29 |
+
Normalized quaternions of same shape
|
| 30 |
+
"""
|
| 31 |
+
norm = torch.norm(quat, dim=-1, keepdim=True)
|
| 32 |
+
return quat / torch.clamp(norm, min=eps)
|
| 33 |
+
|
| 34 |
+
def quat_multiply(q1: torch.Tensor, q2: torch.Tensor) -> torch.Tensor:
|
| 35 |
+
"""
|
| 36 |
+
Multiply two quaternions using Hamilton product.
|
| 37 |
+
"""
|
| 38 |
+
w1, x1, y1, z1 = torch.unbind(q1, dim=-1)
|
| 39 |
+
w2, x2, y2, z2 = torch.unbind(q2, dim=-1)
|
| 40 |
+
|
| 41 |
+
w = w1 * w2 - x1 * x2 - y1 * y2 - z1 * z2
|
| 42 |
+
x = w1 * x2 + x1 * w2 + y1 * z2 - z1 * y2
|
| 43 |
+
y = w1 * y2 - x1 * z2 + y1 * w2 + z1 * x2
|
| 44 |
+
z = w1 * z2 + x1 * y2 - y1 * x2 + z1 * w2
|
| 45 |
+
|
| 46 |
+
return torch.stack((w, x, y, z), dim=-1)
|
| 47 |
+
|
| 48 |
+
def quat_conjugate(quat: torch.Tensor) -> torch.Tensor:
|
| 49 |
+
"""
|
| 50 |
+
Compute quaternion conjugate.
|
| 51 |
+
"""
|
| 52 |
+
w, xyz = quat[..., :1], quat[..., 1:]
|
| 53 |
+
return torch.cat([w, -xyz], dim=-1)
|
| 54 |
+
|
| 55 |
+
def quat_inverse(quat: torch.Tensor, eps: float = EPS) -> torch.Tensor:
|
| 56 |
+
"""
|
| 57 |
+
Compute quaternion inverse.
|
| 58 |
+
"""
|
| 59 |
+
conjugate = quat_conjugate(quat)
|
| 60 |
+
norm_squared = torch.sum(quat * quat, dim=-1, keepdim=True)
|
| 61 |
+
return conjugate / torch.clamp(norm_squared, min=eps)
|
| 62 |
+
|
| 63 |
+
def quat_log(quat: torch.Tensor, eps: float = 1e-6) -> torch.Tensor:
|
| 64 |
+
"""
|
| 65 |
+
Compute quaternion logarithm, mapping to rotation vectors (axis-angle).
|
| 66 |
+
"""
|
| 67 |
+
# quat_norm = normalize_quaternion(quat, eps)
|
| 68 |
+
q_norm = torch.sqrt(torch.sum(quat * quat, dim=-1, keepdim=True))
|
| 69 |
+
quat_norm = quat / torch.clamp(q_norm, min=eps)
|
| 70 |
+
|
| 71 |
+
w = quat_norm[..., 0:1] # Scalar part
|
| 72 |
+
xyz = quat_norm[..., 1:] # Vector part
|
| 73 |
+
|
| 74 |
+
xyz_norm = torch.norm(xyz, dim=-1, keepdim=True)
|
| 75 |
+
w_clamped = torch.clamp(w, min=-1.0 + eps, max=1.0 - eps)
|
| 76 |
+
|
| 77 |
+
# half-angle
|
| 78 |
+
half_angle = torch.acos(torch.abs(w_clamped))
|
| 79 |
+
|
| 80 |
+
safe_xyz_norm = torch.clamp(xyz_norm, min=eps)
|
| 81 |
+
|
| 82 |
+
# Scale factor
|
| 83 |
+
scale = torch.where(
|
| 84 |
+
xyz_norm < eps,
|
| 85 |
+
torch.ones_like(xyz_norm),
|
| 86 |
+
half_angle / safe_xyz_norm
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
# Handle quaternion sign ambiguity (q and -q represent same rotation)
|
| 90 |
+
sign = torch.where(w >= 0, torch.ones_like(w), -torch.ones_like(w))
|
| 91 |
+
|
| 92 |
+
rotation_vector = sign * scale * xyz
|
| 93 |
+
|
| 94 |
+
return rotation_vector
|
| 95 |
+
|
| 96 |
+
def quat_rotate_vector(quat: torch.Tensor, vec: torch.Tensor) -> torch.Tensor:
|
| 97 |
+
"""
|
| 98 |
+
Rotate a 3D vector by a quaternion.
|
| 99 |
+
"""
|
| 100 |
+
q_vec = quat[..., 1:] # vector part
|
| 101 |
+
q_w = quat[..., 0:1] # scalar part
|
| 102 |
+
|
| 103 |
+
cross1 = torch.cross(q_vec, vec, dim=-1)
|
| 104 |
+
cross2 = torch.cross(q_vec, cross1, dim=-1)
|
| 105 |
+
|
| 106 |
+
# Apply the rotation formula
|
| 107 |
+
rotated_vec = vec + 2.0 * q_w * cross1 + 2.0 * cross2
|
| 108 |
+
|
| 109 |
+
return rotated_vec
|
| 110 |
+
|
| 111 |
+
def quat_to_rotation_matrix(quat: torch.Tensor, eps: float = EPS) -> torch.Tensor:
|
| 112 |
+
"""
|
| 113 |
+
Convert quaternions to rotation matrices.
|
| 114 |
+
"""
|
| 115 |
+
quat_norm = normalize_quaternion(quat, eps)
|
| 116 |
+
w, x, y, z = torch.unbind(quat_norm, dim=-1)
|
| 117 |
+
|
| 118 |
+
xx, yy, zz = x * x, y * y, z * z
|
| 119 |
+
xy, xz, yz = x * y, x * z, y * z
|
| 120 |
+
wx, wy, wz = w * x, w * y, w * z
|
| 121 |
+
|
| 122 |
+
r00 = 1.0 - 2.0 * (yy + zz)
|
| 123 |
+
r01 = 2.0 * (xy - wz)
|
| 124 |
+
r02 = 2.0 * (xz + wy)
|
| 125 |
+
|
| 126 |
+
r10 = 2.0 * (xy + wz)
|
| 127 |
+
r11 = 1.0 - 2.0 * (xx + zz)
|
| 128 |
+
r12 = 2.0 * (yz - wx)
|
| 129 |
+
|
| 130 |
+
r20 = 2.0 * (xz - wy)
|
| 131 |
+
r21 = 2.0 * (yz + wx)
|
| 132 |
+
r22 = 1.0 - 2.0 * (xx + yy)
|
| 133 |
+
|
| 134 |
+
rotation_matrix = torch.stack([
|
| 135 |
+
r00, r01, r02,
|
| 136 |
+
r10, r11, r12,
|
| 137 |
+
r20, r21, r22
|
| 138 |
+
], dim=-1)
|
| 139 |
+
|
| 140 |
+
return rotation_matrix.reshape(quat.shape[:-1] + (3, 3))
|
| 141 |
+
|
| 142 |
+
def quat_to_transform_matrix(quat: torch.Tensor, pos: torch.Tensor) -> torch.Tensor:
|
| 143 |
+
"""
|
| 144 |
+
Convert quaternion and position to 4x4 transformation matrix.
|
| 145 |
+
"""
|
| 146 |
+
# rotation part
|
| 147 |
+
rotation = quat_to_rotation_matrix(quat)
|
| 148 |
+
batch_shape = rotation.shape[:-2]
|
| 149 |
+
|
| 150 |
+
# homogeneous transformation matrix
|
| 151 |
+
transform = torch.zeros(batch_shape + (4, 4), dtype=rotation.dtype, device=rotation.device)
|
| 152 |
+
transform[..., :3, :3] = rotation
|
| 153 |
+
transform[..., :3, 3] = pos
|
| 154 |
+
transform[..., 3, 3] = 1.0
|
| 155 |
+
|
| 156 |
+
return transform
|
| 157 |
+
|
| 158 |
+
def compute_rest_local_positions(
|
| 159 |
+
joint_positions: torch.Tensor,
|
| 160 |
+
parent_indices: List[int]
|
| 161 |
+
) -> torch.Tensor:
|
| 162 |
+
"""
|
| 163 |
+
Compute local positions relative to parent joints from global joint positions.
|
| 164 |
+
"""
|
| 165 |
+
|
| 166 |
+
num_joints = joint_positions.shape[0]
|
| 167 |
+
local_positions = torch.zeros_like(joint_positions)
|
| 168 |
+
|
| 169 |
+
for j in range(num_joints):
|
| 170 |
+
parent_idx = parent_indices[j]
|
| 171 |
+
|
| 172 |
+
if parent_idx >= 0 and parent_idx != j and parent_idx < num_joints:
|
| 173 |
+
# Child joint: local offset = global_pos - parent_global_pos
|
| 174 |
+
local_positions[j] = joint_positions[j] - joint_positions[parent_idx]
|
| 175 |
+
else:
|
| 176 |
+
# Root joint: use global position as local position
|
| 177 |
+
local_positions[j] = joint_positions[j]
|
| 178 |
+
|
| 179 |
+
return local_positions
|
third_party/Puppeteer/animation/utils/render_first_frame.py
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import os
|
| 16 |
+
import json
|
| 17 |
+
import argparse
|
| 18 |
+
from pathlib import Path
|
| 19 |
+
|
| 20 |
+
import torch
|
| 21 |
+
from pytorch3d.io import load_objs_as_meshes
|
| 22 |
+
from pytorch3d.renderer import TexturesVertex
|
| 23 |
+
from pytorch3d.structures import Meshes
|
| 24 |
+
from PIL import Image
|
| 25 |
+
|
| 26 |
+
from renderer import MeshRenderer3D
|
| 27 |
+
from utils.save_utils import render_single_mesh
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def render_mesh_all_cameras(mesh_path, cameras_dir, output_dir="renders", image_size=512, device="cuda:0"):
|
| 31 |
+
"""
|
| 32 |
+
Render mesh from all camera viewpoints in the cameras directory.
|
| 33 |
+
|
| 34 |
+
Args:
|
| 35 |
+
mesh_path: Path to OBJ mesh file
|
| 36 |
+
cameras_dir: Directory containing camera JSON config files
|
| 37 |
+
output_dir: Output directory for rendered images
|
| 38 |
+
image_size: Output image size
|
| 39 |
+
device: Device to use
|
| 40 |
+
"""
|
| 41 |
+
cameras_dir = Path(cameras_dir)
|
| 42 |
+
output_dir = Path(output_dir)
|
| 43 |
+
|
| 44 |
+
# Find all JSON camera config files
|
| 45 |
+
json_files = list(cameras_dir.glob("*.json"))
|
| 46 |
+
if not json_files:
|
| 47 |
+
print(f"No JSON camera files found in {cameras_dir}")
|
| 48 |
+
return
|
| 49 |
+
|
| 50 |
+
print(f"Found {len(json_files)} camera configurations")
|
| 51 |
+
|
| 52 |
+
# Render from each camera viewpoint
|
| 53 |
+
for json_file in json_files:
|
| 54 |
+
# Load camera config
|
| 55 |
+
with open(json_file, 'r') as f:
|
| 56 |
+
cam_params = json.load(f)
|
| 57 |
+
|
| 58 |
+
# Setup renderer for this camera
|
| 59 |
+
renderer = MeshRenderer3D(device=device, image_size=image_size, cam_params=cam_params)
|
| 60 |
+
|
| 61 |
+
camera_name = json_file.stem
|
| 62 |
+
output_path = output_dir / f"render_{camera_name}.png"
|
| 63 |
+
|
| 64 |
+
render_single_mesh(renderer, mesh_path, str(output_path))
|
| 65 |
+
|
| 66 |
+
print(f"All renders saved to: {output_dir}")
|
| 67 |
+
|
| 68 |
+
def main():
|
| 69 |
+
parser = argparse.ArgumentParser(description="Render a mesh to an image")
|
| 70 |
+
parser.add_argument('--input_path', type=str, help="base input path")
|
| 71 |
+
parser.add_argument('--seq_name', type=str, help="sequence name")
|
| 72 |
+
parser.add_argument("--cameras_dir", default="utils/cameras", help="Camera config JSON file")
|
| 73 |
+
parser.add_argument("-s", "--size", type=int, default=512, help="Image size")
|
| 74 |
+
parser.add_argument("-d", "--device", default="cuda:0", help="Device to use")
|
| 75 |
+
|
| 76 |
+
args = parser.parse_args()
|
| 77 |
+
|
| 78 |
+
mesh_path = f'{args.input_path}/{args.seq_name}/objs/mesh.obj'
|
| 79 |
+
if not os.path.exists(mesh_path):
|
| 80 |
+
print(f"Error: Mesh file not found: {mesh_path}")
|
| 81 |
+
output_dir = f'{args.input_path}/{args.seq_name}/first_frames/'
|
| 82 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 83 |
+
|
| 84 |
+
render_mesh_all_cameras(
|
| 85 |
+
mesh_path=mesh_path,
|
| 86 |
+
cameras_dir=args.cameras_dir,
|
| 87 |
+
output_dir=output_dir,
|
| 88 |
+
image_size=args.size,
|
| 89 |
+
device=args.device
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
if __name__ == "__main__":
|
| 93 |
+
main()
|
third_party/Puppeteer/animation/utils/save_flow.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2021 Henrique Morimitsu
|
| 2 |
+
# Copyright (c) 2025 ByteDance Ltd. and/or its affiliates.
|
| 3 |
+
# SPDX-License-Identifier: Apache License 2.0
|
| 4 |
+
#
|
| 5 |
+
# This file has been modified by ByteDance Ltd. and/or its affiliates. on 2025.09.04
|
| 6 |
+
#
|
| 7 |
+
# Original file was released under Apache License 2.0, with the full license text
|
| 8 |
+
# available at https://github.com/hmorimitsu/ptlflow/blob/main/LICENSE.
|
| 9 |
+
#
|
| 10 |
+
# This modified file is released under the same license.
|
| 11 |
+
|
| 12 |
+
#!/usr/bin/env python3
|
| 13 |
+
# -*- coding: utf-8 -*-
|
| 14 |
+
"""
|
| 15 |
+
This module processes PNG frame sequences to generate optical flow using PTLFlow,
|
| 16 |
+
with support for visualization and video generation.
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
import argparse
|
| 20 |
+
import os
|
| 21 |
+
import subprocess
|
| 22 |
+
import shutil
|
| 23 |
+
import logging
|
| 24 |
+
from pathlib import Path
|
| 25 |
+
from typing import List, Tuple, Optional, Union
|
| 26 |
+
|
| 27 |
+
import cv2 as cv
|
| 28 |
+
import torch
|
| 29 |
+
import numpy as np
|
| 30 |
+
from tqdm import tqdm
|
| 31 |
+
|
| 32 |
+
from third_partys.ptlflow.ptlflow.utils import flow_utils
|
| 33 |
+
from third_partys.ptlflow.ptlflow.utils.io_adapter import IOAdapter
|
| 34 |
+
import third_partys.ptlflow.ptlflow as ptlflow
|
| 35 |
+
|
| 36 |
+
class OpticalFlowProcessor:
|
| 37 |
+
"""Handles optical flow computation and visualization."""
|
| 38 |
+
|
| 39 |
+
def __init__(
|
| 40 |
+
self,
|
| 41 |
+
model_name: str = 'dpflow',
|
| 42 |
+
checkpoint: str = 'sintel',
|
| 43 |
+
device: Optional[str] = None,
|
| 44 |
+
resize_to: Optional[Tuple[int, int]] = None
|
| 45 |
+
):
|
| 46 |
+
"""
|
| 47 |
+
Initialize optical flow processor.
|
| 48 |
+
|
| 49 |
+
Args:
|
| 50 |
+
model_name: Name of the flow model to use
|
| 51 |
+
checkpoint: Checkpoint/dataset name for the model
|
| 52 |
+
device: Device to run on (auto-detect if None)
|
| 53 |
+
resize_to: Optional (width, height) to resize frames
|
| 54 |
+
"""
|
| 55 |
+
self.model_name = model_name
|
| 56 |
+
self.checkpoint = checkpoint
|
| 57 |
+
self.device = device or ('cuda' if torch.cuda.is_available() else 'cpu')
|
| 58 |
+
self.resize_to = resize_to
|
| 59 |
+
|
| 60 |
+
# Initialize model
|
| 61 |
+
self.model = ptlflow.get_model(model_name, ckpt_path=checkpoint).to(self.device).eval()
|
| 62 |
+
print(f"Loaded {model_name} model on {self.device}")
|
| 63 |
+
|
| 64 |
+
self.io_adapter = None
|
| 65 |
+
|
| 66 |
+
def load_frame_sequence(self, frames_dir: Union[str, Path]) -> Tuple[List[np.ndarray], List[Path]]:
|
| 67 |
+
"""
|
| 68 |
+
Load PNG frame sequence from directory.
|
| 69 |
+
"""
|
| 70 |
+
frames_dir = Path(frames_dir)
|
| 71 |
+
|
| 72 |
+
if not frames_dir.exists():
|
| 73 |
+
raise FileNotFoundError(f"Frames directory not found: {frames_dir}")
|
| 74 |
+
|
| 75 |
+
# Find PNG files and sort naturally
|
| 76 |
+
png_files = list(frames_dir.glob('*.png'))
|
| 77 |
+
if len(png_files) < 2:
|
| 78 |
+
raise ValueError(f"Need at least 2 PNG frames, found {len(png_files)} in {frames_dir}")
|
| 79 |
+
|
| 80 |
+
# Natural sorting for proper frame order
|
| 81 |
+
png_files.sort(key=lambda x: self._natural_sort_key(x.name))
|
| 82 |
+
|
| 83 |
+
frames = []
|
| 84 |
+
for png_path in tqdm(png_files, desc="Loading frames"):
|
| 85 |
+
# Load image in color
|
| 86 |
+
img_bgr = cv.imread(str(png_path), cv.IMREAD_COLOR)
|
| 87 |
+
|
| 88 |
+
if self.resize_to:
|
| 89 |
+
img_bgr = cv.resize(img_bgr, self.resize_to, cv.INTER_LINEAR)
|
| 90 |
+
|
| 91 |
+
img_rgb = cv.cvtColor(img_bgr, cv.COLOR_BGR2RGB)
|
| 92 |
+
frames.append(img_rgb)
|
| 93 |
+
|
| 94 |
+
return frames, png_files
|
| 95 |
+
|
| 96 |
+
def _natural_sort_key(self, filename: str) -> List[Union[int, str]]:
|
| 97 |
+
"""Natural sorting key for filenames with numbers."""
|
| 98 |
+
import re
|
| 99 |
+
return [int(text) if text.isdigit() else text.lower()
|
| 100 |
+
for text in re.split('([0-9]+)', filename)]
|
| 101 |
+
|
| 102 |
+
def compute_optical_flow_sequence(
|
| 103 |
+
self,
|
| 104 |
+
frames: List[np.ndarray],
|
| 105 |
+
flow_vis_dir: Union[str, Path],
|
| 106 |
+
flow_save_dir: Optional[Union[str, Path]] = None,
|
| 107 |
+
save_visualizations: bool = True
|
| 108 |
+
) -> List[torch.Tensor]:
|
| 109 |
+
"""
|
| 110 |
+
Compute optical flow for entire frame sequence.
|
| 111 |
+
"""
|
| 112 |
+
if len(frames) < 2:
|
| 113 |
+
raise ValueError("Need at least 2 frames for optical flow")
|
| 114 |
+
|
| 115 |
+
flow_vis_dir = Path(flow_vis_dir)
|
| 116 |
+
flow_save_dir = Path(flow_save_dir) if flow_save_dir else flow_vis_dir
|
| 117 |
+
|
| 118 |
+
H, W = frames[0].shape[:2]
|
| 119 |
+
|
| 120 |
+
# Initialize IO adapter
|
| 121 |
+
if self.io_adapter is None:
|
| 122 |
+
self.io_adapter = IOAdapter(self.model, (H, W))
|
| 123 |
+
|
| 124 |
+
flows = []
|
| 125 |
+
for i in tqdm(range(len(frames) - 1), desc="Computing optical flow"):
|
| 126 |
+
# Prepare frame pair
|
| 127 |
+
frame_pair = [frames[i], frames[i + 1]]
|
| 128 |
+
raw_inputs = self.io_adapter.prepare_inputs(frame_pair)
|
| 129 |
+
|
| 130 |
+
imgs = raw_inputs['images'][0] # (2, 3, H, W)
|
| 131 |
+
|
| 132 |
+
pair_tensor = torch.stack((imgs[0:1], imgs[1:2]), dim=1).squeeze(0) # (1, 2, 3, H, W)
|
| 133 |
+
pair_tensor = pair_tensor.to(self.device, non_blocking=True).contiguous()
|
| 134 |
+
|
| 135 |
+
with torch.no_grad():
|
| 136 |
+
flow_result = self.model({'images': pair_tensor.unsqueeze(0)})
|
| 137 |
+
flow = flow_result['flows'][0] # (1, 2, H, W)
|
| 138 |
+
|
| 139 |
+
flows.append(flow)
|
| 140 |
+
|
| 141 |
+
if save_visualizations:
|
| 142 |
+
self._save_flow_outputs(flow, i, flow_vis_dir, flow_save_dir)
|
| 143 |
+
|
| 144 |
+
return flows
|
| 145 |
+
|
| 146 |
+
def _save_flow_outputs(
|
| 147 |
+
self,
|
| 148 |
+
flow_tensor: torch.Tensor,
|
| 149 |
+
frame_idx: int,
|
| 150 |
+
viz_dir: Path,
|
| 151 |
+
flow_dir: Path
|
| 152 |
+
) -> None:
|
| 153 |
+
"""Save flow outputs in both .flo and visualization formats."""
|
| 154 |
+
# Save raw flow (.flo format)
|
| 155 |
+
flow_hw2 = flow_tensor[0] # (2, H, W)
|
| 156 |
+
flow_np = flow_hw2.permute(1, 2, 0).cpu().numpy() # (H, W, 2)
|
| 157 |
+
|
| 158 |
+
flow_path = flow_dir / f'flow_{frame_idx:04d}.flo'
|
| 159 |
+
flow_utils.flow_write(flow_path, flow_np)
|
| 160 |
+
|
| 161 |
+
# Save visualization
|
| 162 |
+
flow_rgb = flow_utils.flow_to_rgb(flow_tensor)[0] # Remove batch dimension
|
| 163 |
+
|
| 164 |
+
if flow_rgb.dim() == 4: # (Npred, 3, H, W)
|
| 165 |
+
flow_rgb = flow_rgb[0]
|
| 166 |
+
|
| 167 |
+
flow_rgb_np = (flow_rgb * 255).byte().permute(1, 2, 0).cpu().numpy() # (H, W, 3)
|
| 168 |
+
viz_bgr = cv.cvtColor(flow_rgb_np, cv.COLOR_RGB2BGR)
|
| 169 |
+
|
| 170 |
+
viz_path = viz_dir / f'flow_viz_{frame_idx:04d}.png'
|
| 171 |
+
cv.imwrite(str(viz_path), viz_bgr)
|
| 172 |
+
|
| 173 |
+
def create_flow_video(
|
| 174 |
+
image_dir: Union[str, Path],
|
| 175 |
+
output_filename: str = 'flow.mp4',
|
| 176 |
+
fps: int = 10,
|
| 177 |
+
pattern: str = 'flow_viz_*.png',
|
| 178 |
+
cleanup_temp: bool = True
|
| 179 |
+
) -> bool:
|
| 180 |
+
"""
|
| 181 |
+
Create MP4 video from flow visualization images.
|
| 182 |
+
"""
|
| 183 |
+
image_dir = Path(image_dir)
|
| 184 |
+
|
| 185 |
+
if not image_dir.exists():
|
| 186 |
+
print(f"Image directory not found: {image_dir}")
|
| 187 |
+
|
| 188 |
+
image_files = sorted(image_dir.glob(pattern))
|
| 189 |
+
if not image_files:
|
| 190 |
+
print(f"No images found matching pattern '{pattern}' in {image_dir}")
|
| 191 |
+
|
| 192 |
+
temp_dir = image_dir / 'temp_sequence'
|
| 193 |
+
temp_dir.mkdir(exist_ok=True)
|
| 194 |
+
|
| 195 |
+
try:
|
| 196 |
+
# Copy files with sequential naming
|
| 197 |
+
for i, img_file in enumerate(image_files):
|
| 198 |
+
temp_name = temp_dir / f'frame_{i:05d}.png'
|
| 199 |
+
shutil.copy2(img_file, temp_name)
|
| 200 |
+
|
| 201 |
+
# Create video using ffmpeg
|
| 202 |
+
output_path = image_dir / output_filename
|
| 203 |
+
|
| 204 |
+
cmd = [
|
| 205 |
+
'ffmpeg', '-y',
|
| 206 |
+
'-framerate', str(fps),
|
| 207 |
+
'-i', str(temp_dir / 'frame_%05d.png'),
|
| 208 |
+
'-c:v', 'libx264',
|
| 209 |
+
'-pix_fmt', 'yuv420p',
|
| 210 |
+
str(output_path)
|
| 211 |
+
]
|
| 212 |
+
|
| 213 |
+
subprocess.run(
|
| 214 |
+
cmd,
|
| 215 |
+
capture_output=True,
|
| 216 |
+
text=True,
|
| 217 |
+
check=True
|
| 218 |
+
)
|
| 219 |
+
return True
|
| 220 |
+
except Exception as e:
|
| 221 |
+
print(f"Video creation failed: {e}")
|
| 222 |
+
return False
|
| 223 |
+
finally:
|
| 224 |
+
if cleanup_temp and temp_dir.exists():
|
| 225 |
+
shutil.rmtree(temp_dir)
|
| 226 |
+
|
| 227 |
+
def main(
|
| 228 |
+
frames_dir: Union[str, Path],
|
| 229 |
+
flow_vis_dir: Union[str, Path] = 'flow_out',
|
| 230 |
+
flow_save_dir: Optional[Union[str, Path]] = None,
|
| 231 |
+
resize_to: Optional[Tuple[int, int]] = None,
|
| 232 |
+
model_name: str = 'dpflow',
|
| 233 |
+
checkpoint: str = 'sintel'
|
| 234 |
+
) -> bool:
|
| 235 |
+
|
| 236 |
+
# Initialize processor
|
| 237 |
+
processor = OpticalFlowProcessor(
|
| 238 |
+
model_name=model_name,
|
| 239 |
+
checkpoint=checkpoint,
|
| 240 |
+
resize_to=resize_to
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
# Load frames
|
| 244 |
+
frames, png_paths = processor.load_frame_sequence(frames_dir)
|
| 245 |
+
|
| 246 |
+
# Compute optical flow
|
| 247 |
+
flows = processor.compute_optical_flow_sequence(
|
| 248 |
+
frames=frames,
|
| 249 |
+
flow_vis_dir=flow_vis_dir,
|
| 250 |
+
flow_save_dir=flow_save_dir,
|
| 251 |
+
save_visualizations=True
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
# Create video
|
| 255 |
+
create_flow_video(flow_vis_dir)
|
| 256 |
+
|
| 257 |
+
def get_parser():
|
| 258 |
+
parser = argparse.ArgumentParser(description="Optical flow inference on frame sequences")
|
| 259 |
+
|
| 260 |
+
parser.add_argument('--input_path', type=str, help="base input path")
|
| 261 |
+
parser.add_argument('--seq_name', type=str, help="sequence name")
|
| 262 |
+
parser.add_argument('--model_name', type=str, default='dpflow', help="Optical flow model to use")
|
| 263 |
+
parser.add_argument('--checkpoint', type=str, default='sintel', help="Model checkpoint/dataset name")
|
| 264 |
+
parser.add_argument('--resize_width', type=int, default=None, help="Resize frame width (must specify both width and height)")
|
| 265 |
+
parser.add_argument('--resize_height', type=int, default=None, help="Resize frame height (must specify both width and height)")
|
| 266 |
+
parser.add_argument('--fps', type=int, default=10, help="Frame rate for output video")
|
| 267 |
+
|
| 268 |
+
return parser
|
| 269 |
+
|
| 270 |
+
if __name__ == '__main__':
|
| 271 |
+
parser = get_parser()
|
| 272 |
+
args = parser.parse_args()
|
| 273 |
+
|
| 274 |
+
# Path
|
| 275 |
+
frames_dir = f'{args.input_path}/{args.seq_name}/imgs'
|
| 276 |
+
flow_vis_dir = frames_dir.replace("imgs", "flow_vis")
|
| 277 |
+
flow_save_dir = frames_dir.replace("imgs", "flow")
|
| 278 |
+
|
| 279 |
+
os.makedirs(flow_vis_dir, exist_ok=True)
|
| 280 |
+
os.makedirs(flow_save_dir, exist_ok=True)
|
| 281 |
+
|
| 282 |
+
# Prepare resize parameter
|
| 283 |
+
resize_to = None
|
| 284 |
+
if args.resize_width and args.resize_height:
|
| 285 |
+
resize_to = (args.resize_width, args.resize_height)
|
| 286 |
+
|
| 287 |
+
# Process optical flow
|
| 288 |
+
success = main(
|
| 289 |
+
frames_dir=frames_dir,
|
| 290 |
+
flow_vis_dir=flow_vis_dir,
|
| 291 |
+
flow_save_dir=flow_save_dir,
|
| 292 |
+
resize_to=resize_to,
|
| 293 |
+
model_name=args.model_name,
|
| 294 |
+
checkpoint=args.checkpoint
|
| 295 |
+
)
|
| 296 |
+
|
| 297 |
+
print("Optical flow processing completed successfully")
|
third_party/Puppeteer/animation/utils/save_utils.py
ADDED
|
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
from pytorch3d.io import load_obj
|
| 16 |
+
from pytorch3d.renderer import TexturesAtlas
|
| 17 |
+
from pytorch3d.structures import Meshes
|
| 18 |
+
|
| 19 |
+
import os
|
| 20 |
+
import torch
|
| 21 |
+
import json
|
| 22 |
+
import numpy as np
|
| 23 |
+
from tqdm import tqdm
|
| 24 |
+
from pathlib import Path
|
| 25 |
+
import subprocess
|
| 26 |
+
from PIL import Image
|
| 27 |
+
from scipy.ndimage import gaussian_filter1d
|
| 28 |
+
from third_partys.co_tracker.save_track import save_track
|
| 29 |
+
|
| 30 |
+
def render_single_mesh(renderer, mesh_path, out_path="render_result.png", atlas_size=8):
|
| 31 |
+
"""
|
| 32 |
+
Test render a single mesh and save the result.
|
| 33 |
+
"""
|
| 34 |
+
device = renderer.device
|
| 35 |
+
|
| 36 |
+
verts, faces, aux = load_obj(
|
| 37 |
+
mesh_path,
|
| 38 |
+
device=device,
|
| 39 |
+
load_textures=True,
|
| 40 |
+
create_texture_atlas=True,
|
| 41 |
+
texture_atlas_size=atlas_size,
|
| 42 |
+
texture_wrap="repeat"
|
| 43 |
+
)
|
| 44 |
+
atlas = aux.texture_atlas # (F, atlas_size, atlas_size, 3)
|
| 45 |
+
|
| 46 |
+
vmin, vmax = verts.min(0).values, verts.max(0).values
|
| 47 |
+
center = (vmax + vmin) / 2.
|
| 48 |
+
scale = (vmax - vmin).max()
|
| 49 |
+
verts = (verts - center) / scale
|
| 50 |
+
|
| 51 |
+
mesh_norm = Meshes(
|
| 52 |
+
verts=[verts],
|
| 53 |
+
faces=[faces.verts_idx],
|
| 54 |
+
textures=TexturesAtlas(atlas=[atlas])
|
| 55 |
+
)
|
| 56 |
+
with torch.no_grad():
|
| 57 |
+
rendered = renderer.render(mesh_norm) # shape=[1, H, W, 4]
|
| 58 |
+
|
| 59 |
+
rendered_img = renderer.tensor_to_image(rendered)
|
| 60 |
+
|
| 61 |
+
pil_img = Image.fromarray(rendered_img)
|
| 62 |
+
pil_img.save(out_path)
|
| 63 |
+
print(f"Saved render to {out_path}")
|
| 64 |
+
|
| 65 |
+
def apply_gaussian_smoothing(data, sigma = 1.0, preserve_first_frame = True, eps = 1e-8):
|
| 66 |
+
"""
|
| 67 |
+
Apply Gaussian smoothing along the time axis with quaternion normalization.
|
| 68 |
+
"""
|
| 69 |
+
smoothed = gaussian_filter1d(data, sigma=sigma, axis=0)
|
| 70 |
+
|
| 71 |
+
# Preserve first frame if requested
|
| 72 |
+
if preserve_first_frame and data.shape[0] > 0:
|
| 73 |
+
smoothed[0] = data[0]
|
| 74 |
+
|
| 75 |
+
if data.shape[-1] == 4:
|
| 76 |
+
norms = np.linalg.norm(smoothed, axis=-1, keepdims=True)
|
| 77 |
+
smoothed = smoothed / np.maximum(norms, eps)
|
| 78 |
+
|
| 79 |
+
return smoothed
|
| 80 |
+
|
| 81 |
+
def render_single_view_sequence(quats, root_quats, root_pos, renderer, model, output_dir, view_name, fps = 25):
|
| 82 |
+
"""
|
| 83 |
+
Render animation sequence from a single viewpoint.
|
| 84 |
+
"""
|
| 85 |
+
output_dir = Path(output_dir)
|
| 86 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
| 87 |
+
|
| 88 |
+
T = quats.shape[0]
|
| 89 |
+
|
| 90 |
+
model.animate(quats, root_quats, root_pos)
|
| 91 |
+
|
| 92 |
+
for i in tqdm(range(T), desc=f"Rendering {view_name}"):
|
| 93 |
+
mesh = model.get_mesh(i)
|
| 94 |
+
rendered = renderer.render(mesh)
|
| 95 |
+
|
| 96 |
+
img_array = renderer.tensor_to_image(rendered)
|
| 97 |
+
img = Image.fromarray(img_array)
|
| 98 |
+
|
| 99 |
+
frame_path = output_dir / f"{view_name}_frame_{i:04d}.png"
|
| 100 |
+
img.save(frame_path)
|
| 101 |
+
|
| 102 |
+
# Create video
|
| 103 |
+
video_path = output_dir / f"{view_name}_output_video.mp4"
|
| 104 |
+
cmd = f"ffmpeg -y -framerate {fps} -i {output_dir}/{view_name}_frame_%04d.png -c:v libx264 -pix_fmt yuv420p {video_path}"
|
| 105 |
+
subprocess.call(cmd, shell=True)
|
| 106 |
+
|
| 107 |
+
def save_and_smooth_results(args, model, renderer, final_quats, root_quats, root_pos, out_dir, additional_renderers = None, load_pt = False, sigma = 1.0, fps = 25):
|
| 108 |
+
"""
|
| 109 |
+
Save and smooth animation results with multi-view rendering.
|
| 110 |
+
"""
|
| 111 |
+
device = final_quats.device
|
| 112 |
+
T = final_quats.shape[0]
|
| 113 |
+
# Save Raw Results
|
| 114 |
+
if not load_pt:
|
| 115 |
+
raw_dir = os.path.join(out_dir, "raw")
|
| 116 |
+
os.makedirs(raw_dir, exist_ok=True)
|
| 117 |
+
|
| 118 |
+
torch.save(final_quats, os.path.join(raw_dir, "local_quats.pt"))
|
| 119 |
+
torch.save(root_quats, os.path.join(raw_dir, "root_quats.pt"))
|
| 120 |
+
torch.save(root_pos, os.path.join(raw_dir, "root_pos.pt"))
|
| 121 |
+
if hasattr(model, 'rest_local_positions'):
|
| 122 |
+
torch.save(model.rest_local_positions, os.path.join(raw_dir, "rest_local_positions.pt"))
|
| 123 |
+
|
| 124 |
+
print(f"Saved raw motion to {raw_dir}")
|
| 125 |
+
|
| 126 |
+
quats_np = final_quats.cpu().numpy()
|
| 127 |
+
root_quats_np = root_quats.cpu().numpy()
|
| 128 |
+
root_pos_np = root_pos.cpu().numpy()
|
| 129 |
+
|
| 130 |
+
# Apply Gaussian smoothing if enabled
|
| 131 |
+
if args.gauss_filter:
|
| 132 |
+
print(f"Applying Gaussian smoothing (sigma={sigma})")
|
| 133 |
+
|
| 134 |
+
smooth_quats_np = apply_gaussian_smoothing(
|
| 135 |
+
quats_np, sigma=sigma, preserve_first_frame=True
|
| 136 |
+
)
|
| 137 |
+
smooth_root_quats_np = apply_gaussian_smoothing(
|
| 138 |
+
root_quats_np, sigma=sigma, preserve_first_frame=True
|
| 139 |
+
)
|
| 140 |
+
smooth_root_pos_np = apply_gaussian_smoothing(
|
| 141 |
+
root_pos_np, sigma=sigma, preserve_first_frame=True
|
| 142 |
+
)
|
| 143 |
+
smooth_dir = os.path.join(out_dir, "smoothed")
|
| 144 |
+
os.makedirs(smooth_dir, exist_ok=True)
|
| 145 |
+
save_dir = smooth_dir
|
| 146 |
+
|
| 147 |
+
else:
|
| 148 |
+
smooth_quats_np = quats_np
|
| 149 |
+
smooth_root_quats_np = root_quats_np
|
| 150 |
+
smooth_root_pos_np = root_pos_np
|
| 151 |
+
save_dir = raw_dir
|
| 152 |
+
|
| 153 |
+
smooth_quats = torch.tensor(smooth_quats_np, dtype=torch.float32, device=device)
|
| 154 |
+
smooth_root_quats = torch.tensor(smooth_root_quats_np, dtype=torch.float32, device=device)
|
| 155 |
+
smooth_root_pos = torch.tensor(smooth_root_pos_np, dtype=torch.float32, device=device)
|
| 156 |
+
|
| 157 |
+
# Render Sequences
|
| 158 |
+
if not load_pt and args.gauss_filter:
|
| 159 |
+
smooth_dir_path = Path(smooth_dir)
|
| 160 |
+
torch.save(smooth_quats, smooth_dir_path / "local_quats.pt")
|
| 161 |
+
torch.save(smooth_root_quats, smooth_dir_path / "root_quats.pt")
|
| 162 |
+
torch.save(smooth_root_pos, smooth_dir_path / "root_pos.pt")
|
| 163 |
+
print(f"Saved smoothed motion to {smooth_dir}")
|
| 164 |
+
|
| 165 |
+
# Render main view
|
| 166 |
+
print(f"Rendering {args.main_renderer} view ({T} frames)")
|
| 167 |
+
render_single_view_sequence(
|
| 168 |
+
smooth_quats, smooth_root_quats, smooth_root_pos,
|
| 169 |
+
renderer, model, save_dir, args.main_renderer, fps
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
# Render additional views if provided
|
| 173 |
+
if additional_renderers:
|
| 174 |
+
for renderer_key, view_renderer in additional_renderers.items():
|
| 175 |
+
view_name = renderer_key.replace("_renderer", "")
|
| 176 |
+
render_single_view_sequence(
|
| 177 |
+
smooth_quats, smooth_root_quats, smooth_root_pos,
|
| 178 |
+
view_renderer, model, save_dir, view_name, fps
|
| 179 |
+
)
|
| 180 |
+
|
| 181 |
+
def save_args(args, output_dir, filename="config.json"):
|
| 182 |
+
args_dict = vars(args)
|
| 183 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 184 |
+
|
| 185 |
+
config_path = os.path.join(output_dir, filename)
|
| 186 |
+
with open(config_path, 'w') as f:
|
| 187 |
+
json.dump(args_dict, f, indent=4)
|
| 188 |
+
|
| 189 |
+
def visualize_joints_on_mesh(model, renderer, seq_name, out_dir):
|
| 190 |
+
"""
|
| 191 |
+
Render mesh with joint visualizations and return visibility mask.
|
| 192 |
+
"""
|
| 193 |
+
joints_2d = renderer.project_points(model.joints_rest)
|
| 194 |
+
|
| 195 |
+
mesh = model.get_mesh()
|
| 196 |
+
image_with_joints, vis_mask = renderer.render_with_points(mesh, model.joints_rest)
|
| 197 |
+
image_np = image_with_joints[0].cpu().numpy()
|
| 198 |
+
if image_np.shape[2] == 4:
|
| 199 |
+
image_rgb = image_np[..., :3]
|
| 200 |
+
else:
|
| 201 |
+
image_rgb = image_np
|
| 202 |
+
if image_rgb.max() <= 1.0:
|
| 203 |
+
image_rgb = (image_rgb * 255).astype(np.uint8)
|
| 204 |
+
img = Image.fromarray(image_rgb)
|
| 205 |
+
output_path = f"{out_dir}/mesh_with_joints_{seq_name}_visible.png"
|
| 206 |
+
img.save(output_path)
|
| 207 |
+
return vis_mask
|
| 208 |
+
|
| 209 |
+
def visualize_points_on_mesh(model, renderer, seq_name, out_dir):
|
| 210 |
+
"""
|
| 211 |
+
Render mesh with point visualizations and return visibility mask.
|
| 212 |
+
"""
|
| 213 |
+
points_2d = renderer.project_points(model.vertices[0])
|
| 214 |
+
|
| 215 |
+
mesh = model.get_mesh()
|
| 216 |
+
image_with_points, vis_mask = renderer.render_with_points(mesh, model.vertices[0], for_vertices=True)
|
| 217 |
+
image_np = image_with_points[0].cpu().numpy()
|
| 218 |
+
if image_np.shape[2] == 4:
|
| 219 |
+
image_rgb = image_np[..., :3]
|
| 220 |
+
else:
|
| 221 |
+
image_rgb = image_np
|
| 222 |
+
if image_rgb.max() <= 1.0:
|
| 223 |
+
image_rgb = (image_rgb * 255).astype(np.uint8)
|
| 224 |
+
img = Image.fromarray(image_rgb)
|
| 225 |
+
output_path = f"{out_dir}/mesh_with_verts_{seq_name}_visible.png"
|
| 226 |
+
img.save(output_path)
|
| 227 |
+
return vis_mask
|
| 228 |
+
|
| 229 |
+
def save_track_points(point_vis_mask, renderer, model, img_path, out_dir, args):
|
| 230 |
+
"""
|
| 231 |
+
Save and track selected points on the mesh with intelligent sampling.
|
| 232 |
+
"""
|
| 233 |
+
|
| 234 |
+
vertex_project_2d = renderer.project_points(model.vertices[0])
|
| 235 |
+
visible_indices = torch.where(point_vis_mask)[0]
|
| 236 |
+
|
| 237 |
+
track_2d_point_path = img_path.replace('imgs', 'track_2d_verts')
|
| 238 |
+
os.makedirs(track_2d_point_path, exist_ok=True)
|
| 239 |
+
|
| 240 |
+
num_visible = len(visible_indices)
|
| 241 |
+
MAX_VISIBLE_POINTS = 15000
|
| 242 |
+
MAX_SAMPLE_POINTS = 4000
|
| 243 |
+
|
| 244 |
+
# Determine tracking strategy
|
| 245 |
+
tracking_mode = "full" if num_visible <= MAX_VISIBLE_POINTS else "sampled"
|
| 246 |
+
|
| 247 |
+
if not os.listdir(track_2d_point_path):
|
| 248 |
+
# Generate new tracking data
|
| 249 |
+
if tracking_mode == "full":
|
| 250 |
+
print(f"Saving tracks for all visible vertices (count: {num_visible})")
|
| 251 |
+
|
| 252 |
+
# Track all visible points
|
| 253 |
+
visible_vertex_project_2d = vertex_project_2d[visible_indices]
|
| 254 |
+
track_2d_point = save_track(
|
| 255 |
+
args.seq_name, visible_vertex_project_2d, img_path,
|
| 256 |
+
track_2d_point_path, out_dir, for_point=True
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
np.save(f'{track_2d_point_path}/visible_indices.npy',
|
| 260 |
+
visible_indices.cpu().numpy())
|
| 261 |
+
|
| 262 |
+
# Sample subset for final use
|
| 263 |
+
num_sample = min(MAX_SAMPLE_POINTS, num_visible)
|
| 264 |
+
sampled_local_indices = torch.randperm(num_visible)[:num_sample]
|
| 265 |
+
sampled_vertex_indices = visible_indices[sampled_local_indices]
|
| 266 |
+
np.save(f'{track_2d_point_path}/sampled_indices.npy',
|
| 267 |
+
sampled_vertex_indices.cpu().numpy())
|
| 268 |
+
|
| 269 |
+
else:
|
| 270 |
+
print(f"Too many visible vertices ({num_visible} > {MAX_VISIBLE_POINTS}), "
|
| 271 |
+
f"tracking only {MAX_SAMPLE_POINTS} sampled vertices")
|
| 272 |
+
|
| 273 |
+
# Sample points directly from visible set
|
| 274 |
+
num_sample = min(MAX_SAMPLE_POINTS, num_visible)
|
| 275 |
+
sampled_local_indices = torch.randperm(num_visible)[:num_sample]
|
| 276 |
+
sampled_vertex_indices = visible_indices[sampled_local_indices]
|
| 277 |
+
|
| 278 |
+
# Track only sampled points
|
| 279 |
+
sampled_vertex_project_2d = vertex_project_2d[sampled_vertex_indices]
|
| 280 |
+
track_2d_point = save_track(
|
| 281 |
+
args.seq_name, sampled_vertex_project_2d, img_path,
|
| 282 |
+
track_2d_point_path, out_dir, for_point=True
|
| 283 |
+
)
|
| 284 |
+
|
| 285 |
+
np.save(f'{track_2d_point_path}/visible_indices.npy',
|
| 286 |
+
visible_indices.cpu().numpy())
|
| 287 |
+
np.save(f'{track_2d_point_path}/sampled_indices.npy',
|
| 288 |
+
sampled_vertex_indices.cpu().numpy())
|
| 289 |
+
|
| 290 |
+
else:
|
| 291 |
+
# Load existing tracking data
|
| 292 |
+
print("Loading existing vertex tracks")
|
| 293 |
+
track_2d_point = np.load(f'{track_2d_point_path}/pred_tracks.npy')
|
| 294 |
+
|
| 295 |
+
visible_indices = np.load(f'{track_2d_point_path}/visible_indices.npy')
|
| 296 |
+
visible_indices = torch.from_numpy(visible_indices).long().to(args.device)
|
| 297 |
+
|
| 298 |
+
sampled_vertex_indices = np.load(f'{track_2d_point_path}/sampled_indices.npy')
|
| 299 |
+
sampled_vertex_indices = torch.from_numpy(sampled_vertex_indices).long().to(args.device)
|
| 300 |
+
|
| 301 |
+
track_2d_point = torch.from_numpy(track_2d_point).float().to(args.device)
|
| 302 |
+
|
| 303 |
+
# Create index mapping for tracking data
|
| 304 |
+
if tracking_mode == "full":
|
| 305 |
+
# Map from original vertex indices to positions in tracking data
|
| 306 |
+
vertex_to_track_idx = {idx.item(): i for i, idx in enumerate(visible_indices)}
|
| 307 |
+
|
| 308 |
+
track_indices = torch.tensor(
|
| 309 |
+
[vertex_to_track_idx[idx.item()] for idx in sampled_vertex_indices],
|
| 310 |
+
device=args.device, dtype=torch.long
|
| 311 |
+
)
|
| 312 |
+
else:
|
| 313 |
+
# Direct mapping for sampled-only tracking
|
| 314 |
+
track_indices = torch.arange(len(sampled_vertex_indices),
|
| 315 |
+
device=args.device, dtype=torch.long)
|
| 316 |
+
|
| 317 |
+
return track_2d_point, track_indices, sampled_vertex_indices
|
| 318 |
+
|
| 319 |
+
def save_final_video(args):
|
| 320 |
+
|
| 321 |
+
additional_views = [view.strip() for view in args.additional_renderers.split(',') if view.strip()]
|
| 322 |
+
if len(additional_views) > 3:
|
| 323 |
+
additional_views = additional_views[:3]
|
| 324 |
+
additional_views = [view for view in additional_views if view != args.main_renderer]
|
| 325 |
+
|
| 326 |
+
save_dir = 'raw' if not args.gauss_filter else 'smoothed'
|
| 327 |
+
import subprocess
|
| 328 |
+
cmd = (
|
| 329 |
+
f'ffmpeg '
|
| 330 |
+
f'-i {args.input_path}/{args.seq_name}/input.mp4 '
|
| 331 |
+
f'-i {args.save_path}/{args.seq_name}/{args.save_name}/{save_dir}/{args.main_renderer}_output_video.mp4 '
|
| 332 |
+
'-filter_complex "'
|
| 333 |
+
'[0:v][1:v]hstack=inputs=2[stacked]; '
|
| 334 |
+
'[stacked]drawtext=fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf:text=\'gt\':x=(w/4-text_w/2):y=20:fontsize=24:fontcolor=white:box=1:boxcolor=black:boxborderw=10, '
|
| 335 |
+
f'drawtext=fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf:text=\'ours\':x=(3*w/4-text_w/2):y=20:fontsize=24:fontcolor=white:box=1:boxcolor=black:boxborderw=10" '
|
| 336 |
+
f'-c:a copy {args.save_path}/{args.seq_name}/{args.save_name}/concat_output.mp4'
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
subprocess.call(cmd, shell=True)
|
| 340 |
+
cmd = (
|
| 341 |
+
f'ffmpeg '
|
| 342 |
+
f'-i {args.input_path}/{args.seq_name}/input.mp4 '
|
| 343 |
+
f'-i {args.save_path}/{args.seq_name}/{args.save_name}/{save_dir}/{args.main_renderer}_output_video.mp4 '
|
| 344 |
+
f'-i {args.save_path}/{args.seq_name}/{args.save_name}/{save_dir}/{additional_views[0]}_output_video.mp4 '
|
| 345 |
+
f'-i {args.save_path}/{args.seq_name}/{args.save_name}/{save_dir}/{additional_views[1]}_output_video.mp4 '
|
| 346 |
+
f'-i {args.save_path}/{args.seq_name}/{args.save_name}/{save_dir}/{additional_views[2]}_output_video.mp4 '
|
| 347 |
+
'-filter_complex "'
|
| 348 |
+
'[0:v][1:v][2:v][3:v][4:v]hstack=inputs=5[stacked]; '
|
| 349 |
+
'[stacked]drawtext=fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf:text=\'gt\':x=(w/10-text_w/2):y=20:fontsize=24:fontcolor=white:box=1:boxcolor=black:boxborderw=10, '
|
| 350 |
+
f'drawtext=fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf:text=\'{args.main_renderer}\':x=(3*w/10-text_w/2):y=20:fontsize=24:fontcolor=white:box=1:boxcolor=black:boxborderw=10, '
|
| 351 |
+
f'drawtext=fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf:text=\'{additional_views[0]}\':x=(5*w/10-text_w/2):y=20:fontsize=24:fontcolor=white:box=1:boxcolor=black:boxborderw=10, '
|
| 352 |
+
f'drawtext=fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf:text=\'{additional_views[1]}\':x=(7*w/10-text_w/2):y=20:fontsize=24:fontcolor=white:box=1:boxcolor=black:boxborderw=10, '
|
| 353 |
+
f'drawtext=fontfile=/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf:text=\'{additional_views[2]}\':x=(9*w/10-text_w/2):y=20:fontsize=24:fontcolor=white:box=1:boxcolor=black:boxborderw=10" '
|
| 354 |
+
f'-c:a copy {args.save_path}/{args.seq_name}/{args.save_name}/concat_output_4view.mp4'
|
| 355 |
+
)
|
| 356 |
+
subprocess.call(cmd, shell=True)
|
| 357 |
+
|
| 358 |
+
def load_motion_data(motion_dir, device="cuda:0"):
|
| 359 |
+
"""
|
| 360 |
+
Load saved motion data.
|
| 361 |
+
"""
|
| 362 |
+
local_quats = torch.load(os.path.join(motion_dir, "local_quats.pt"), map_location=device)
|
| 363 |
+
root_quats = torch.load(os.path.join(motion_dir, "root_quats.pt"), map_location=device)
|
| 364 |
+
root_pos = torch.load(os.path.join(motion_dir, "root_pos.pt"), map_location=device)
|
| 365 |
+
|
| 366 |
+
# Load rest positions if available (for reference)
|
| 367 |
+
rest_pos_path = os.path.join(motion_dir, "rest_local_positions.pt")
|
| 368 |
+
if os.path.exists(rest_pos_path):
|
| 369 |
+
rest_positions = torch.load(rest_pos_path, map_location=device)
|
| 370 |
+
else:
|
| 371 |
+
rest_positions = None
|
| 372 |
+
print("Warning: rest_local_positions.pt not found, model should have them initialized")
|
| 373 |
+
|
| 374 |
+
return local_quats, root_quats, root_pos, rest_positions
|
third_party/Puppeteer/checkpoints/rig.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0019dfc4b32d63c1392aa264aed2253c1e0c2fb09216f8e2cc269bbfb8bb49b5
|
| 3 |
+
size 9
|
third_party/Puppeteer/demo_animation.sh
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
echo "Running animation..."
|
| 4 |
+
|
| 5 |
+
# copy rig and mesh for animation
|
| 6 |
+
for txt_file in results/final_rigging/*.txt; do
|
| 7 |
+
if [ -f "$txt_file" ]; then
|
| 8 |
+
seq_name=$(basename "$txt_file" .txt)
|
| 9 |
+
|
| 10 |
+
mkdir -p "examples/$seq_name/objs/"
|
| 11 |
+
|
| 12 |
+
cp "$txt_file" "examples/$seq_name/objs/rig.txt"
|
| 13 |
+
echo "Copied $txt_file -> examples/$seq_name/objs/rig.txt"
|
| 14 |
+
|
| 15 |
+
obj_file="examples/$seq_name.obj"
|
| 16 |
+
if [ -f "$obj_file" ]; then
|
| 17 |
+
cp "$obj_file" "examples/$seq_name/objs/mesh.obj"
|
| 18 |
+
echo "Copied $obj_file -> examples/$seq_name/objs/mesh.obj"
|
| 19 |
+
else
|
| 20 |
+
echo "Warning: $obj_file not found"
|
| 21 |
+
fi
|
| 22 |
+
|
| 23 |
+
# extract frames
|
| 24 |
+
video_file="examples/$seq_name/input.mp4"
|
| 25 |
+
if [ -f "$video_file" ]; then
|
| 26 |
+
echo "Found video file: $video_file"
|
| 27 |
+
cd "examples/$seq_name"
|
| 28 |
+
mkdir -p imgs
|
| 29 |
+
ffmpeg -i input.mp4 -vf fps=10 imgs/frame_%04d.png -y
|
| 30 |
+
echo "Extracted frames from $video_file to imgs/"
|
| 31 |
+
cd ../../
|
| 32 |
+
else
|
| 33 |
+
echo "No video file found: $video_file"
|
| 34 |
+
fi
|
| 35 |
+
fi
|
| 36 |
+
done
|
| 37 |
+
|
| 38 |
+
cd animation
|
| 39 |
+
|
| 40 |
+
# save flow
|
| 41 |
+
echo "Processing sequences with save_flow.py..."
|
| 42 |
+
for seq_dir in ../examples/*/; do
|
| 43 |
+
if [ -d "$seq_dir" ]; then
|
| 44 |
+
seq_name=$(basename "$seq_dir")
|
| 45 |
+
echo "Processing sequence: $seq_name"
|
| 46 |
+
python utils/save_flow.py --input_path ../examples --seq_name "$seq_name"
|
| 47 |
+
fi
|
| 48 |
+
done
|
| 49 |
+
|
| 50 |
+
# animation
|
| 51 |
+
echo "Running optimization for each sequence..."
|
| 52 |
+
mkdir -p ../results/animation
|
| 53 |
+
|
| 54 |
+
python optimization.py --save_path ../results/animation --iter 200 --input_path ../examples --img_size 960 \
|
| 55 |
+
--seq_name 'spiderman' --save_name 'spiderman_demo'
|
| 56 |
+
|
| 57 |
+
python optimization.py --save_path ../results/animation --iter 200 --input_path ../examples --img_size 960 \
|
| 58 |
+
--seq_name 'deer' --save_name 'deer_demo' --smooth_weight 1 --main_renderer front_left --additional_renderer "right,front_right,back_right"
|
| 59 |
+
|
| 60 |
+
echo "Animation completed."
|
| 61 |
+
|
| 62 |
+
cd ..
|
| 63 |
+
echo "Puppeteer pipeline completed successfully!"
|
third_party/Puppeteer/demo_rigging.sh
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# Robust Puppeteer rigging pipeline
|
| 3 |
+
# - stop on errors
|
| 4 |
+
# - safe path checks
|
| 5 |
+
# - always copy artifacts into /data/results
|
| 6 |
+
|
| 7 |
+
set -euo pipefail
|
| 8 |
+
|
| 9 |
+
echo "[INFO] Starting Puppeteer rigging pipeline..."
|
| 10 |
+
|
| 11 |
+
# ---------- env / paths ----------
|
| 12 |
+
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
| 13 |
+
# repo 레이아웃이 /app/Puppeteer/{skeleton,skinning,...} 라고 가정
|
| 14 |
+
ROOT_DIR="$(dirname "$SCRIPT_DIR")" # /app/Puppeteer
|
| 15 |
+
TMP_DIR="/tmp/puppeteer_run"
|
| 16 |
+
WORK_DIR="${TMP_DIR}" # 통합 작업 디렉토리
|
| 17 |
+
IN_EXAMPLES="${ROOT_DIR}/examples"
|
| 18 |
+
OUT_ROOT="${WORK_DIR}/results"
|
| 19 |
+
OUT_SKEL="${OUT_ROOT}/skel_results"
|
| 20 |
+
OUT_SKEL_FOR_SKIN="${OUT_ROOT}/skeletons"
|
| 21 |
+
OUT_SKIN="${OUT_ROOT}/skin_results"
|
| 22 |
+
OUT_FINAL="${OUT_ROOT}/final_rigging"
|
| 23 |
+
RESULT_DIR="${RESULT_DIR:-/data/results}" # app.py와 동일 환경변수 사용
|
| 24 |
+
|
| 25 |
+
mkdir -p "$WORK_DIR" "$OUT_ROOT" "$OUT_SKEL_FOR_SKIN" "$OUT_FINAL" "$RESULT_DIR"
|
| 26 |
+
|
| 27 |
+
# python 경로 보강 (third_party / third_partys 호환)
|
| 28 |
+
export PYTHONPATH="/app:/app/Puppeteer:/app/Puppeteer/third_party:${PYTHONPATH:-}"
|
| 29 |
+
[ -d /app/third_partys ] || ln -s /app/Puppeteer/third_party /app/third_partys 2>/dev/null || true
|
| 30 |
+
[ -f /app/Puppeteer/third_party/__init__.py ] || touch /app/Puppeteer/third_party/__init__.py
|
| 31 |
+
|
| 32 |
+
# ---------- skeleton ----------
|
| 33 |
+
echo "[INFO] Running skeleton generation..."
|
| 34 |
+
cd "${ROOT_DIR}/skeleton"
|
| 35 |
+
|
| 36 |
+
python demo.py \
|
| 37 |
+
--input_dir "${IN_EXAMPLES}" \
|
| 38 |
+
--pretrained_weights skeleton_ckpts/puppeteer_skeleton_w_diverse_pose.pth \
|
| 39 |
+
--output_dir "${OUT_ROOT}" \
|
| 40 |
+
--save_name skel_results \
|
| 41 |
+
--input_pc_num 8192 \
|
| 42 |
+
--save_render \
|
| 43 |
+
--apply_marching_cubes \
|
| 44 |
+
--joint_token \
|
| 45 |
+
--seq_shuffle
|
| 46 |
+
|
| 47 |
+
echo "[INFO] Skeleton generation completed."
|
| 48 |
+
|
| 49 |
+
# skel 결과를 skinning 입력 폴더로 복사 (존재 검증)
|
| 50 |
+
echo "[INFO] Preparing skeletons for skinning..."
|
| 51 |
+
if [ -d "${OUT_SKEL}" ]; then
|
| 52 |
+
mkdir -p "${OUT_SKEL_FOR_SKIN}"
|
| 53 |
+
shopt -s nullglob
|
| 54 |
+
for f in "${OUT_SKEL}"/*_pred.txt; do
|
| 55 |
+
cp -f "$f" "${OUT_SKEL_FOR_SKIN}/$(basename "${f/_pred.txt/.txt}")"
|
| 56 |
+
done
|
| 57 |
+
shopt -u nullglob
|
| 58 |
+
else
|
| 59 |
+
echo "[ERR] ${OUT_SKEL} not found (skeleton step failed?)"
|
| 60 |
+
exit 1
|
| 61 |
+
fi
|
| 62 |
+
echo "[INFO] Copied rig files to ${OUT_SKEL_FOR_SKIN}"
|
| 63 |
+
|
| 64 |
+
# ---------- skinning ----------
|
| 65 |
+
echo "[INFO] Running skinning..."
|
| 66 |
+
cd "${ROOT_DIR}/skinning"
|
| 67 |
+
|
| 68 |
+
# CUDA_VISIBLE_DEVICES는 Space에서 보통 0 하나만 할당됨
|
| 69 |
+
CUDA_VISIBLE_DEVICES="${CUDA_VISIBLE_DEVICES:-0}" \
|
| 70 |
+
torchrun --nproc_per_node=1 --master_port=10009 \
|
| 71 |
+
main.py \
|
| 72 |
+
--num_workers 1 \
|
| 73 |
+
--batch_size 1 \
|
| 74 |
+
--generate \
|
| 75 |
+
--save_skin_npy \
|
| 76 |
+
--pretrained_weights skinning_ckpts/puppeteer_skin_w_diverse_pose_depth1.pth \
|
| 77 |
+
--input_skel_folder "${OUT_SKEL_FOR_SKIN}" \
|
| 78 |
+
--mesh_folder "${IN_EXAMPLES}" \
|
| 79 |
+
--post_filter \
|
| 80 |
+
--depth 1 \
|
| 81 |
+
--save_folder "${OUT_SKIN}"
|
| 82 |
+
|
| 83 |
+
echo "[INFO] Skinning completed."
|
| 84 |
+
|
| 85 |
+
# ---------- collect artifacts ----------
|
| 86 |
+
echo "[INFO] Collecting final artifacts..."
|
| 87 |
+
mkdir -p "${OUT_FINAL}"
|
| 88 |
+
|
| 89 |
+
# 선호 산출물: output/rigged.glb (있으면 우선 복사)
|
| 90 |
+
if [ -f "${WORK_DIR}/output/rigged.glb" ]; then
|
| 91 |
+
cp -f "${WORK_DIR}/output/rigged.glb" "${OUT_FINAL}/rigged.glb"
|
| 92 |
+
fi
|
| 93 |
+
|
| 94 |
+
# skin 결과(glb) 있으면 함께 수집
|
| 95 |
+
if [ -d "${OUT_SKIN}/generate" ]; then
|
| 96 |
+
shopt -s nullglob
|
| 97 |
+
cp -f "${OUT_SKIN}/generate/"*.glb "${OUT_FINAL}/" 2>/dev/null || true
|
| 98 |
+
shopt -u nullglob
|
| 99 |
+
fi
|
| 100 |
+
|
| 101 |
+
# ---------- export to /data/results ----------
|
| 102 |
+
echo "[INFO] Exporting to ${RESULT_DIR} ..."
|
| 103 |
+
mkdir -p "${RESULT_DIR}"
|
| 104 |
+
shopt -s nullglob
|
| 105 |
+
cp -f "${OUT_FINAL}/"*.glb "${RESULT_DIR}/" 2>/dev/null || true
|
| 106 |
+
cp -f "${OUT_FINAL}/"*.gltf "${RESULT_DIR}/" 2>/dev/null || true
|
| 107 |
+
shopt -u nullglob
|
| 108 |
+
|
| 109 |
+
# 결과 검증: 최소 하나라도 존재해야 성공 처리
|
| 110 |
+
if compgen -G "${RESULT_DIR}/*.glb" > /dev/null || compgen -G "${RESULT_DIR}/*.gltf" > /dev/null ; then
|
| 111 |
+
echo "[OK] Artifacts saved to ${RESULT_DIR}"
|
| 112 |
+
else
|
| 113 |
+
echo "[ERR] No .glb/.gltf produced. Check skeleton/skinning logs."
|
| 114 |
+
exit 2
|
| 115 |
+
fi
|
| 116 |
+
|
| 117 |
+
echo "[INFO] Pipeline finished successfully."
|
third_party/Puppeteer/requirements.txt
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
trimesh==4.2.3
|
| 2 |
+
accelerate==0.28.0
|
| 3 |
+
mesh2sdf==1.1.0
|
| 4 |
+
transformers==4.46.1
|
| 5 |
+
numpy==1.26.4
|
| 6 |
+
pyrender==0.1.45
|
| 7 |
+
tqdm
|
| 8 |
+
opencv-python==4.9.0.80
|
| 9 |
+
omegaconf==2.3.0
|
| 10 |
+
einops==0.7.0
|
| 11 |
+
timm
|
| 12 |
+
lightning==2.2
|
| 13 |
+
boto3
|
| 14 |
+
cython==0.29.36
|
| 15 |
+
tetgen==0.5.2
|
| 16 |
+
loguru
|
| 17 |
+
pytz
|
| 18 |
+
h5py
|
| 19 |
+
plyfile
|
| 20 |
+
pymeshlab
|
| 21 |
+
yacs
|
| 22 |
+
fvcore
|
| 23 |
+
easydict
|
| 24 |
+
libigl==2.5.1
|
| 25 |
+
scikit-learn
|
| 26 |
+
jsonargparse
|
| 27 |
+
ptlflow
|
| 28 |
+
imageio-ffmpeg==0.4.7
|
| 29 |
+
xformers==0.0.23
|
third_party/Puppeteer/skeleton/README.md
ADDED
|
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Auto-regressive Skeleton Generation
|
| 2 |
+
This folder provides the skeleton generation implementation and scripts to evaluate the paper’s metrics on three test sets. You can also run inference on your own 3D objects.
|
| 3 |
+
|
| 4 |
+
## Weights Download
|
| 5 |
+
First download [checkpoints of Michelangelo](https://huggingface.co/Maikou/Michelangelo/tree/main/checkpoints/aligned_shape_latents) and our [released weights](https://huggingface.co/Seed3D/Puppeteer) for skeleton generation:
|
| 6 |
+
|
| 7 |
+
```
|
| 8 |
+
python download.py
|
| 9 |
+
```
|
| 10 |
+
|
| 11 |
+
## Evaluation
|
| 12 |
+
|
| 13 |
+
To reproduce our evaluations, run the following command on `Articulation-XL2.0-test`, `ModelsResource-test` and `Diverse-pose-test`. `Articulation-XL2.0-test` and `Diverse-pose-test` are available [here](https://huggingface.co/datasets/chaoyue7/Articulation-XL2.0). For your convenience, we also save `ModelsResource-test` in our format (download it [here](https://drive.google.com/file/d/12U2ZuZWcKCQRI3IheBbG6I9-jfpG4KF5/view?usp=sharing)). The inference process requires 4.6 GB of VRAM and takes 1–2 seconds per inference.
|
| 14 |
+
|
| 15 |
+
```
|
| 16 |
+
bash eval.sh
|
| 17 |
+
```
|
| 18 |
+
You can change `save_name` for different evaluation and check the quantitative results afterwards in `evaluate_results.txt`. The pipeline saves mesh and skeleton as `.obj` files; pass `--save_render` to additionally generate rendered previews of the mesh and skeleton.
|
| 19 |
+
|
| 20 |
+
These are the numbers (the metrics are in units of 10−2) that you should be able to reproduce using the released weights and the current version of the codebase.
|
| 21 |
+
<table>
|
| 22 |
+
<thead>
|
| 23 |
+
<tr>
|
| 24 |
+
<th rowspan="2">Test set</th>
|
| 25 |
+
<th colspan="3">Articulation-XL2.0-test</th>
|
| 26 |
+
<th colspan="3">ModelsResource-test</th>
|
| 27 |
+
<th colspan="3">Diverse-pose-test</th>
|
| 28 |
+
</tr>
|
| 29 |
+
<tr>
|
| 30 |
+
<th>CD-J2J</th>
|
| 31 |
+
<th>CD-J2B</th>
|
| 32 |
+
<th>CD-B2B</th>
|
| 33 |
+
<th>CD-J2J</th>
|
| 34 |
+
<th>CD-J2B</th>
|
| 35 |
+
<th>CD-B2B</th>
|
| 36 |
+
<th>CD-J2J</th>
|
| 37 |
+
<th>CD-J2B</th>
|
| 38 |
+
<th>CD-B2B</th>
|
| 39 |
+
</tr>
|
| 40 |
+
</thead>
|
| 41 |
+
<tbody>
|
| 42 |
+
<tr>
|
| 43 |
+
<td>train on Arti-XL2.0 w/o diverse-pose subset</td>
|
| 44 |
+
<td>3.062</td>
|
| 45 |
+
<td>2.342</td>
|
| 46 |
+
<td>1.963</td>
|
| 47 |
+
<td>3.843</td>
|
| 48 |
+
<td>2.876</td>
|
| 49 |
+
<td>2.465</td>
|
| 50 |
+
<td>3.276</td>
|
| 51 |
+
<td>2.597</td>
|
| 52 |
+
<td>2.074</td>
|
| 53 |
+
</tr>
|
| 54 |
+
<tr>
|
| 55 |
+
<td>train on Arti-XL2.0 w/ diverse-pose subset</td>
|
| 56 |
+
<td><b>3.047</b></td>
|
| 57 |
+
<td><b>2.337</b></td>
|
| 58 |
+
<td><b>1.952</b></td>
|
| 59 |
+
<td><b>3.785</b></td>
|
| 60 |
+
<td><b>2.847</b></td>
|
| 61 |
+
<td><b>2.430</b></td>
|
| 62 |
+
<td><b>2.483</b></td>
|
| 63 |
+
<td><b>1.922</b></td>
|
| 64 |
+
<td><b>1.600</b></td>
|
| 65 |
+
</tr>
|
| 66 |
+
</tbody>
|
| 67 |
+
</table>
|
| 68 |
+
|
| 69 |
+
Note: If your results differ from the reported numbers in the table above (e.g., 3.78-->~3.90 for CD-J2J on ModelsResource), check the version of `transformers` which may cause the following warnings:
|
| 70 |
+
```
|
| 71 |
+
Flash Attention 2.0 only supports torch.float16 and torch.bfloat16 dtypes, but the current dype in SkeletonOPTModel is torch.float32. You should run training or inference using Automatic Mixed-Precision via the with torch.autocast(device_type='torch_device'): decorator, or load the model with the torch_dtype argument. Example: model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="flash_attention_2", torch_dtype=torch.float16)
|
| 72 |
+
Flash Attention 2.0 only supports torch.float16 and torch.bfloat16 dtypes, but the current dype in SkeletonOPTDecoder is torch.float32. You should run training or inference using Automatic Mixed-Precision via the with torch.autocast(device_type='torch_device'): decorator, or load the model with the torch_dtype argument. Example: model = AutoModel.from_pretrained("openai/whisper-tiny", attn_implementation="flash_attention_2", torch_dtype=torch.float16)
|
| 73 |
+
```
|
| 74 |
+
These results were obtained using `CUDA 11.8`. We observed that switching to CUDA 12.1 or other versions, while keeping all package versions identical, resulted in slight numerical variations.
|
| 75 |
+
|
| 76 |
+
## Demo
|
| 77 |
+
We provide some examples (download [here](https://drive.google.com/file/d/1bjtA3JSqW-t0YoSd2vOZy3iKvuOMLIrm/view?usp=sharing)) to test our models by running the following command. You can also test our models on your 3D objects, remeber to change the `input_dir`.
|
| 78 |
+
```
|
| 79 |
+
bash demo.sh
|
| 80 |
+
```
|
| 81 |
+
|
| 82 |
+
Input mesh quality directly affects model performance, since the pre-trained shape encoder was trained on high-quality meshes. You can test reconstruction using the shape latents extracted from the shape encoder to check your data. The example below shows results from an input mesh with coarse surface.
|
| 83 |
+
|
| 84 |
+
<p align="center">
|
| 85 |
+
<img width="80%" src="../assets/reconstruction.png"/>
|
| 86 |
+
</p>
|
| 87 |
+
|
| 88 |
+
## Visualization
|
| 89 |
+
We use MeshLab for skeleton visualization in paper. The skeleton can be saved using `save_skeleton_obj` in `utils/save_utils.py`. Bones are represented as blue cones oriented from the parent joint to the child joint, joints as red spheres, and the root joint as a green sphere. Example results are shown below.
|
| 90 |
+
|
| 91 |
+
<p align="center">
|
| 92 |
+
<img width="80%" src="../assets/skeleton_results.png"/>
|
| 93 |
+
</p>
|
third_party/Puppeteer/skeleton/data_utils/README.md
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
## Preprocessed data
|
| 2 |
+
We provide the preprocessed data that saved in NPZ files, which contain the following information:
|
| 3 |
+
```
|
| 4 |
+
'vertices', 'faces', 'normals', 'joints', 'bones', 'root_index', 'uuid', 'pc_w_norm', 'joint_names', 'skinning_weights_value', 'skinning_weights_rows', 'skinning_weights_cols', 'skinning_weights_shape'
|
| 5 |
+
```
|
| 6 |
+
You can check `read_npz.py` for how to read the NPZ files and `save_npz.py` for how we save them.
|
| 7 |
+
|
| 8 |
+
Before saving them into NPZ files, we extract mesh(.obj) and rig(.txt) from downloaded 3D models from Objaverse-XL using Blender. The rig file follows the format in [RigNet](https://github.com/zhan-xu/RigNet), which includes the following entries:
|
| 9 |
+
```
|
| 10 |
+
joints [joint_name] [x] [y] [z]
|
| 11 |
+
root [root_joint_name]
|
| 12 |
+
skin [vertex_index] [joints_name1] [skinning_weight1] [joints_name2] [skinning_weight2] ...
|
| 13 |
+
hier [parent_joint_name] [child_joint_name]
|
| 14 |
+
```
|
| 15 |
+
For an example, please see `examples/0a59c5ffa4a1476bac6d540b79947f31.txt`.
|
| 16 |
+
|
| 17 |
+
If you want to convert NPZ file back to OBJ and TXT files, we give an example by running:
|
| 18 |
+
```
|
| 19 |
+
python convert_npz_to_mesh_rig.py
|
| 20 |
+
```
|
| 21 |
+
|
| 22 |
+
## Visualization
|
| 23 |
+
We provide a method for visualizing 3D models with skeleton using [Pyrender](https://github.com/mmatl/pyrender), modified from [Lab4D](https://github.com/lab4d-org/lab4d/tree/ppr/). This visualization also serves as input to the VLM for skeleton quality rating. Make sure you have installed the following packages before running visualization:
|
| 24 |
+
```
|
| 25 |
+
pip install trimesh opencv-python pyrender
|
| 26 |
+
```
|
| 27 |
+
|
| 28 |
+
We provide an example to demonstrate the process. For this example, we prepare an OBJ file along with a TXT file containing rigging information. Then, run:
|
| 29 |
+
```
|
| 30 |
+
python render_data.py
|
| 31 |
+
```
|
| 32 |
+
You will obtain the following outputs:
|
| 33 |
+
|
| 34 |
+
<p align="center">
|
| 35 |
+
<img width="80%" src="examples/0a59c5ffa4a1476bac6d540b79947f31_render_results.png"/>
|
| 36 |
+
</p>
|
| 37 |
+
|
| 38 |
+
### Reading rig and mesh from GLBs
|
| 39 |
+
We provide the script we use for reading rig (.txt) and mesh (.obj) from glb files. You can run:
|
| 40 |
+
```
|
| 41 |
+
python read_rig_mesh_from_glb.py
|
| 42 |
+
```
|
| 43 |
+
Remember to download Blender (we use 4.2.0) and also bpy in your conda environment.
|
third_party/Puppeteer/skeleton/data_utils/convert_npz_to_mesh_rig.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""
|
| 15 |
+
You can convert npz file back to obj(mesh) and txt(rig) files using this python script.
|
| 16 |
+
"""
|
| 17 |
+
import os
|
| 18 |
+
import numpy as np
|
| 19 |
+
import scipy.sparse as sp
|
| 20 |
+
|
| 21 |
+
def export_obj(vertices, faces, normals, output_path):
|
| 22 |
+
with open(output_path, 'w') as f:
|
| 23 |
+
for v in vertices:
|
| 24 |
+
f.write(f"v {v[0]} {v[1]} {v[2]}\n")
|
| 25 |
+
for n in normals:
|
| 26 |
+
f.write(f"vn {n[0]} {n[1]} {n[2]}\n")
|
| 27 |
+
for i, face in enumerate(faces):
|
| 28 |
+
# OBJ format is 1-based, so we add 1 to all indices
|
| 29 |
+
f.write(f"f {face[0]+1}//{face[0]+1} {face[1]+1}//{face[1]+1} {face[2]+1}//{face[2]+1}\n")
|
| 30 |
+
|
| 31 |
+
def export_rig_txt(joints, bones, root_index, joint_names, skinning_weights, output_path):
|
| 32 |
+
"""
|
| 33 |
+
joints [joint_name] [x] [y] [z]
|
| 34 |
+
root [root_joint_name]
|
| 35 |
+
skin [vertex_index] [joint_name1] [weight1] [joint_name2] [weight2] ...
|
| 36 |
+
hier [parent_joint_name] [child_joint_name]
|
| 37 |
+
"""
|
| 38 |
+
n_joints = len(joints)
|
| 39 |
+
n_verts = skinning_weights.shape[0] # (n_vertex, n_joints)
|
| 40 |
+
|
| 41 |
+
with open(output_path, 'w') as f:
|
| 42 |
+
# 1) joints
|
| 43 |
+
for i in range(n_joints):
|
| 44 |
+
x, y, z = joints[i]
|
| 45 |
+
jn = joint_names[i]
|
| 46 |
+
f.write(f"joints {jn} {x} {y} {z}\n")
|
| 47 |
+
|
| 48 |
+
# 2) root
|
| 49 |
+
root_name = joint_names[root_index]
|
| 50 |
+
f.write(f"root {root_name}\n")
|
| 51 |
+
|
| 52 |
+
# 3) skin
|
| 53 |
+
for vidx in range(n_verts):
|
| 54 |
+
row_weights = skinning_weights[vidx]
|
| 55 |
+
non_zero_indices = np.where(row_weights != 0)[0]
|
| 56 |
+
if len(non_zero_indices) == 0:
|
| 57 |
+
continue
|
| 58 |
+
|
| 59 |
+
line_parts = [f"skin {vidx}"] # vertex_idx
|
| 60 |
+
for jidx in non_zero_indices:
|
| 61 |
+
w = row_weights[jidx]
|
| 62 |
+
jn = joint_names[jidx]
|
| 63 |
+
line_parts.append(jn)
|
| 64 |
+
line_parts.append(str(w))
|
| 65 |
+
|
| 66 |
+
f.write(" ".join(line_parts) + "\n")
|
| 67 |
+
|
| 68 |
+
# 4) hier
|
| 69 |
+
for p_idx, c_idx in bones:
|
| 70 |
+
p_name = joint_names[p_idx]
|
| 71 |
+
c_name = joint_names[c_idx]
|
| 72 |
+
f.write(f"hier {p_name} {c_name}\n")
|
| 73 |
+
|
| 74 |
+
if __name__ == "__main__":
|
| 75 |
+
|
| 76 |
+
data = np.load('articulation_xlv2_test.npz', allow_pickle=True)
|
| 77 |
+
data_list = data['arr_0']
|
| 78 |
+
|
| 79 |
+
print(f"Loaded {len(data_list)} data entries")
|
| 80 |
+
|
| 81 |
+
model_data = data_list[0]
|
| 82 |
+
print("Data keys:", model_data.keys())
|
| 83 |
+
# 'vertices', 'faces', 'normals', 'joints', 'bones', 'root_index', 'uuid', 'joint_names',
|
| 84 |
+
# 'skinning_weights_value', 'skinning_weights_row', 'skinning_weights_col', 'skinning_weights_shape'
|
| 85 |
+
|
| 86 |
+
vertices = model_data['vertices'] # (n_vertex, 3)
|
| 87 |
+
faces = model_data['faces'] # (n_faces, 3)
|
| 88 |
+
normals = model_data['normals'] # (n_vertex, 3)
|
| 89 |
+
joints = model_data['joints'] # (n_joints, 3)
|
| 90 |
+
bones = model_data['bones'] # (n_bones, 2)
|
| 91 |
+
root_index = model_data['root_index'] # int
|
| 92 |
+
joint_names = model_data['joint_names'] # list of str
|
| 93 |
+
uuid_str = model_data['uuid']
|
| 94 |
+
|
| 95 |
+
skin_val = model_data['skinning_weights_value']
|
| 96 |
+
skin_row = model_data['skinning_weights_row']
|
| 97 |
+
skin_col = model_data['skinning_weights_col']
|
| 98 |
+
skin_shape = model_data['skinning_weights_shape']
|
| 99 |
+
skin_sparse = sp.coo_matrix((skin_val, (skin_row, skin_col)), shape=skin_shape)
|
| 100 |
+
skinning_weights = skin_sparse.toarray() # (n_vertex, n_joints)
|
| 101 |
+
|
| 102 |
+
obj_path = f"{uuid_str}.obj"
|
| 103 |
+
export_obj(vertices, faces, normals, obj_path)
|
| 104 |
+
rig_txt_path = f"{uuid_str}.txt"
|
| 105 |
+
export_rig_txt(joints, bones, root_index, joint_names, skinning_weights, rig_txt_path)
|
| 106 |
+
|
| 107 |
+
print("Done!")
|
third_party/Puppeteer/skeleton/data_utils/data_loader.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
import json
|
| 16 |
+
import glob
|
| 17 |
+
import numpy as np
|
| 18 |
+
import trimesh
|
| 19 |
+
|
| 20 |
+
class DataLoader:
|
| 21 |
+
def __init__(self):
|
| 22 |
+
self.joint_name_to_idx = {}
|
| 23 |
+
|
| 24 |
+
def load_rig_data(self, rig_path):
|
| 25 |
+
joints = []
|
| 26 |
+
joints_names = []
|
| 27 |
+
bones = []
|
| 28 |
+
|
| 29 |
+
with open(rig_path, 'r') as f:
|
| 30 |
+
for line in f:
|
| 31 |
+
parts = line.strip().split()
|
| 32 |
+
if parts[0] == 'joints':
|
| 33 |
+
joint_name = parts[1]
|
| 34 |
+
joint_pos = [float(parts[2]), float(parts[3]), float(parts[4])]
|
| 35 |
+
self.joint_name_to_idx[joint_name] = len(joints)
|
| 36 |
+
joints.append(joint_pos)
|
| 37 |
+
joints_names.append(joint_name)
|
| 38 |
+
elif parts[0] == 'root':
|
| 39 |
+
self.root_name = parts[1]
|
| 40 |
+
elif parts[0] == 'hier':
|
| 41 |
+
parent_joint = self.joint_name_to_idx[parts[1]]
|
| 42 |
+
child_joint = self.joint_name_to_idx[parts[2]]
|
| 43 |
+
bones.append([parent_joint, child_joint])
|
| 44 |
+
|
| 45 |
+
self.joints = np.array(joints)
|
| 46 |
+
self.bones = np.array(bones)
|
| 47 |
+
self.joints_names = joints_names
|
| 48 |
+
self.root_idx = None
|
| 49 |
+
if self.root_name is not None:
|
| 50 |
+
self.root_idx = self.joint_name_to_idx[self.root_name]
|
| 51 |
+
|
| 52 |
+
def load_mesh(self, mesh_path):
|
| 53 |
+
mesh = trimesh.load(mesh_path, process=False)
|
| 54 |
+
mesh.visual.vertex_colors[:, 3] = 100 # set transparency
|
| 55 |
+
self.mesh = mesh
|
| 56 |
+
|
| 57 |
+
# Compute the centroid normal of the mesh
|
| 58 |
+
v = self.mesh.vertices
|
| 59 |
+
xmin, ymin, zmin = v.min(axis=0)
|
| 60 |
+
xmax, ymax, zmax = v.max(axis=0)
|
| 61 |
+
self.bbox_center = np.array([(xmax + xmin)/2, (ymax + ymin)/2, (zmax + zmin)/2])
|
| 62 |
+
self.bbox_size = np.array([xmax - xmin, ymax - ymin, zmax - zmin])
|
| 63 |
+
self.bbox_scale = max(xmax - xmin, ymax - ymin, zmax - zmin)
|
| 64 |
+
|
| 65 |
+
normal = mesh.center_mass - self.bbox_center
|
| 66 |
+
normal = normal / (np.linalg.norm(normal)+1e-5)
|
| 67 |
+
|
| 68 |
+
# Choose axis order based on normal direction
|
| 69 |
+
if abs(normal[1]) > abs(normal[2]): # if Y component is dominant
|
| 70 |
+
self.axis_order = [0, 1, 2] # swapping Y and Z
|
| 71 |
+
else:
|
| 72 |
+
self.axis_order =[0, 2, 1] # keep default order
|
| 73 |
+
|
| 74 |
+
self.mesh.vertices = self.mesh.vertices[:, self.axis_order]
|
| 75 |
+
self.joints = self.joints[:, self.axis_order]
|
| 76 |
+
self.normalize_coordinates()
|
| 77 |
+
|
| 78 |
+
def normalize_coordinates(self):
|
| 79 |
+
|
| 80 |
+
# Compute scale and offset
|
| 81 |
+
scale = 1.0 / (self.bbox_scale+1e-5)
|
| 82 |
+
offset = -self.bbox_center
|
| 83 |
+
|
| 84 |
+
self.mesh.vertices = (self.mesh.vertices + offset) * scale
|
| 85 |
+
self.joints = (self.joints + offset) * scale
|
| 86 |
+
|
| 87 |
+
# Calculate appropriate radii based on the mean size
|
| 88 |
+
self.joint_radius = 0.01
|
| 89 |
+
self.bone_radius = 0.005
|
| 90 |
+
|
| 91 |
+
def query_mesh_rig(self):
|
| 92 |
+
|
| 93 |
+
input_dict = {"shape": self.mesh}
|
| 94 |
+
|
| 95 |
+
# Create joints as spheres
|
| 96 |
+
joint_meshes = []
|
| 97 |
+
for i, joint in enumerate(self.joints):
|
| 98 |
+
|
| 99 |
+
sphere = trimesh.creation.icosphere(
|
| 100 |
+
radius=self.joint_radius, subdivisions=2
|
| 101 |
+
)
|
| 102 |
+
sphere.apply_translation(joint)
|
| 103 |
+
if i == self.root_idx:
|
| 104 |
+
# root green
|
| 105 |
+
sphere.visual.vertex_colors = [0, 255, 0, 255]
|
| 106 |
+
else:
|
| 107 |
+
sphere.visual.vertex_colors = [0, 0, 255, 255]
|
| 108 |
+
|
| 109 |
+
joint_meshes.append(sphere)
|
| 110 |
+
input_dict["joint_meshes"] = trimesh.util.concatenate(joint_meshes)
|
| 111 |
+
|
| 112 |
+
# Create bones as cylinders
|
| 113 |
+
bone_meshes = []
|
| 114 |
+
for bone in self.bones:
|
| 115 |
+
start, end = self.joints[bone[0]], self.joints[bone[1]]
|
| 116 |
+
cylinder = trimesh.creation.cylinder(radius=self.bone_radius, segment=np.array([[0, 0, 0], end - start]))
|
| 117 |
+
cylinder.apply_translation(start)
|
| 118 |
+
cylinder.visual.vertex_colors = [255, 0, 0, 255] #[0, 0, 255, 255] # blue
|
| 119 |
+
bone_meshes.append(cylinder)
|
| 120 |
+
input_dict["bone_meshes"] = trimesh.util.concatenate(bone_meshes)
|
| 121 |
+
|
| 122 |
+
return input_dict
|
third_party/Puppeteer/skeleton/data_utils/pyrender_wrapper.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2023 Gengshan Yang
|
| 2 |
+
# Copyright (c) 2025 ByteDance Ltd. and/or its affiliates.
|
| 3 |
+
# SPDX-License-Identifier: MIT
|
| 4 |
+
#
|
| 5 |
+
# This file has been modified by ByteDance Ltd. and/or its affiliates. on 2025.09.04
|
| 6 |
+
#
|
| 7 |
+
# Original file was released under MIT, with the full license text
|
| 8 |
+
# available at https://github.com/lab4d-org/lab4d/blob/main/LICENSE.
|
| 9 |
+
#
|
| 10 |
+
# This modified file is released under the same license.
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import numpy as np
|
| 14 |
+
import cv2
|
| 15 |
+
import pyrender
|
| 16 |
+
import trimesh
|
| 17 |
+
from pyrender import (
|
| 18 |
+
IntrinsicsCamera,
|
| 19 |
+
Mesh,
|
| 20 |
+
Node,
|
| 21 |
+
Scene,
|
| 22 |
+
OffscreenRenderer,
|
| 23 |
+
MetallicRoughnessMaterial,
|
| 24 |
+
RenderFlags
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
os.environ["PYOPENGL_PLATFORM"] = "egl"
|
| 28 |
+
|
| 29 |
+
def look_at(eye, center, up):
|
| 30 |
+
"""Create a look-at (view) matrix."""
|
| 31 |
+
f = np.array(center, dtype=np.float32) - np.array(eye, dtype=np.float32)
|
| 32 |
+
f /= np.linalg.norm(f)
|
| 33 |
+
|
| 34 |
+
u = np.array(up, dtype=np.float32)
|
| 35 |
+
u /= np.linalg.norm(u)
|
| 36 |
+
|
| 37 |
+
s = np.cross(f, u)
|
| 38 |
+
u = np.cross(s, f)
|
| 39 |
+
|
| 40 |
+
m = np.identity(4, dtype=np.float32)
|
| 41 |
+
m[0, :3] = s
|
| 42 |
+
m[1, :3] = u
|
| 43 |
+
m[2, :3] = -f
|
| 44 |
+
m[:3, 3] = -np.matmul(m[:3, :3], np.array(eye, dtype=np.float32))
|
| 45 |
+
|
| 46 |
+
return m
|
| 47 |
+
|
| 48 |
+
class PyRenderWrapper:
|
| 49 |
+
def __init__(self, image_size=(1024, 1024)) -> None:
|
| 50 |
+
# renderer
|
| 51 |
+
self.image_size = image_size
|
| 52 |
+
render_size = max(image_size)
|
| 53 |
+
self.r = OffscreenRenderer(render_size, render_size)
|
| 54 |
+
self.intrinsics = IntrinsicsCamera(
|
| 55 |
+
render_size, render_size, render_size / 2, render_size / 2
|
| 56 |
+
)
|
| 57 |
+
# light
|
| 58 |
+
self.light_pose = np.eye(4)
|
| 59 |
+
self.set_light_topdown()
|
| 60 |
+
self.direc_l = pyrender.DirectionalLight(color=np.ones(3), intensity=5.0)
|
| 61 |
+
self.material = MetallicRoughnessMaterial(
|
| 62 |
+
roughnessFactor=0.75, metallicFactor=0.75, alphaMode="BLEND"
|
| 63 |
+
)
|
| 64 |
+
self.init_camera()
|
| 65 |
+
|
| 66 |
+
def init_camera(self):
|
| 67 |
+
self.flip_pose = np.eye(4)
|
| 68 |
+
self.set_camera(np.eye(4))
|
| 69 |
+
|
| 70 |
+
def set_camera(self, scene_to_cam):
|
| 71 |
+
# object to camera transforms
|
| 72 |
+
self.scene_to_cam = self.flip_pose @ scene_to_cam
|
| 73 |
+
|
| 74 |
+
def set_light_topdown(self, gl=False):
|
| 75 |
+
# top down light, slightly closer to the camera
|
| 76 |
+
if gl:
|
| 77 |
+
rot = cv2.Rodrigues(np.asarray([-np.pi / 2, 0, 0]))[0]
|
| 78 |
+
else:
|
| 79 |
+
rot = cv2.Rodrigues(np.asarray([np.pi / 2, 0, 0]))[0]
|
| 80 |
+
self.light_pose[:3, :3] = rot
|
| 81 |
+
|
| 82 |
+
def align_light_to_camera(self):
|
| 83 |
+
self.light_pose = np.linalg.inv(self.scene_to_cam)
|
| 84 |
+
|
| 85 |
+
def set_intrinsics(self, intrinsics):
|
| 86 |
+
"""
|
| 87 |
+
Args:
|
| 88 |
+
intrinsics: (4,) fx,fy,px,py
|
| 89 |
+
"""
|
| 90 |
+
self.intrinsics = IntrinsicsCamera(
|
| 91 |
+
intrinsics[0], intrinsics[1], intrinsics[2], intrinsics[3]
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
def get_cam_to_scene(self):
|
| 95 |
+
cam_to_scene = np.eye(4)
|
| 96 |
+
cam_to_scene[:3, :3] = self.scene_to_cam[:3, :3].T
|
| 97 |
+
cam_to_scene[:3, 3] = -self.scene_to_cam[:3, :3].T @ self.scene_to_cam[:3, 3]
|
| 98 |
+
return cam_to_scene
|
| 99 |
+
|
| 100 |
+
def set_camera_view(self, angle, bbox_center, distance=2.0):
|
| 101 |
+
# Calculate camera position based on angle and distance from bounding box center
|
| 102 |
+
camera_position = bbox_center + distance * np.array([np.sin(angle), 0, np.cos(angle)], dtype=np.float32)
|
| 103 |
+
look_at_matrix = look_at(camera_position, bbox_center, [0, 1, 0])
|
| 104 |
+
self.scene_to_cam = look_at_matrix @ self.flip_pose
|
| 105 |
+
|
| 106 |
+
def render(self, input_dict):
|
| 107 |
+
# Create separate scenes for transparent objects (mesh) and solid objects (joints and bones)
|
| 108 |
+
scene_transparent = Scene(ambient_light=np.array([1.0, 1.0, 1.0, 1.0]) * 0.1)
|
| 109 |
+
scene_solid = Scene(ambient_light=np.array([1.0, 1.0, 1.0, 1.0]) * 0.1)
|
| 110 |
+
|
| 111 |
+
mesh_pyrender = Mesh.from_trimesh(input_dict["shape"], smooth=False)
|
| 112 |
+
mesh_pyrender.primitives[0].material = self.material
|
| 113 |
+
scene_transparent.add(mesh_pyrender, pose=np.eye(4), name="shape")
|
| 114 |
+
|
| 115 |
+
if "joint_meshes" in input_dict:
|
| 116 |
+
joints_pyrender = Mesh.from_trimesh(input_dict["joint_meshes"], smooth=False)
|
| 117 |
+
joints_pyrender.primitives[0].material = self.material
|
| 118 |
+
scene_solid.add(joints_pyrender, pose=np.eye(4), name="joints")
|
| 119 |
+
|
| 120 |
+
if "bone_meshes" in input_dict:
|
| 121 |
+
bones_pyrender = Mesh.from_trimesh(input_dict["bone_meshes"], smooth=False)
|
| 122 |
+
bones_pyrender.primitives[0].material = self.material
|
| 123 |
+
scene_solid.add(bones_pyrender, pose=np.eye(4), name="bones")
|
| 124 |
+
|
| 125 |
+
# Camera for both scenes
|
| 126 |
+
scene_transparent.add(self.intrinsics, pose=self.get_cam_to_scene())
|
| 127 |
+
scene_solid.add(self.intrinsics, pose=self.get_cam_to_scene())
|
| 128 |
+
|
| 129 |
+
# Light for both scenes
|
| 130 |
+
scene_transparent.add(self.direc_l, pose=self.light_pose)
|
| 131 |
+
scene_solid.add(self.direc_l, pose=self.light_pose)
|
| 132 |
+
|
| 133 |
+
# Render transparent scene first
|
| 134 |
+
color_transparent, depth_transparent = self.r.render(scene_transparent)
|
| 135 |
+
|
| 136 |
+
# Render solid scene on top
|
| 137 |
+
color_solid, depth_solid = self.r.render(scene_solid)
|
| 138 |
+
|
| 139 |
+
# Combine the two scenes
|
| 140 |
+
color_combined = np.where(depth_solid[..., np.newaxis] == 0, color_transparent, color_solid)
|
| 141 |
+
|
| 142 |
+
return color_combined, depth_solid
|
| 143 |
+
def delete(self):
|
| 144 |
+
self.r.delete()
|
third_party/Puppeteer/skeleton/data_utils/read_npz.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
import numpy as np
|
| 15 |
+
import scipy.sparse as sp
|
| 16 |
+
|
| 17 |
+
# Load the NPZ file
|
| 18 |
+
data = np.load('articulation_xlv2_test.npz', allow_pickle=True)
|
| 19 |
+
data_list = data['arr_0']
|
| 20 |
+
|
| 21 |
+
print(f"Loaded {len(data_list)} data entries")
|
| 22 |
+
print(f"Data keys: {data_list[0].keys()}")
|
| 23 |
+
# 'vertices', 'faces', 'normals', 'joints', 'bones', 'root_index', 'uuid', 'pc_w_norm', 'joint_names', 'skinning_weights_value',
|
| 24 |
+
# 'skinning_weights_row', 'skinning_weights_col', 'skinning_weights_shape'
|
| 25 |
+
|
| 26 |
+
data = data_list[0] # check the first data
|
| 27 |
+
|
| 28 |
+
vertices = data['vertices'] # (n_vertex, 3)
|
| 29 |
+
faces = data['faces'] # (n_faces, 3)
|
| 30 |
+
normals = data['normals'] # (n_vertex, 3)
|
| 31 |
+
joints = data['joints'] # (n_joints, 3)
|
| 32 |
+
bones = data['bones'] # (n_bones, 2)
|
| 33 |
+
pc_w_norm = data['pc_w_norm'] # (8192, 6)
|
| 34 |
+
|
| 35 |
+
# Extract the sparse skinning weights components
|
| 36 |
+
skinning_data = data['skinning_weights_value']
|
| 37 |
+
skinning_rows = data['skinning_weights_row']
|
| 38 |
+
skinning_cols = data['skinning_weights_col']
|
| 39 |
+
skinning_shape = data['skinning_weights_shape']
|
| 40 |
+
|
| 41 |
+
skinning_sparse = sp.coo_matrix((skinning_data, (skinning_rows, skinning_cols)), shape=skinning_shape)
|
| 42 |
+
skinning_weights = skinning_sparse.toarray() # (n_vertex, n_joints)
|
| 43 |
+
|
third_party/Puppeteer/skeleton/data_utils/read_rig_mesh_from_glb.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
Blender script for extracting rig (.txt) and mesh (.obj) from glbs.
|
| 17 |
+
This code currently supports GLB files only, but it can be easily modified to load other formats (e.g., FBX, DAE) with minimal changes.
|
| 18 |
+
"""
|
| 19 |
+
|
| 20 |
+
import bpy
|
| 21 |
+
import os
|
| 22 |
+
import re
|
| 23 |
+
import json
|
| 24 |
+
import pickle
|
| 25 |
+
|
| 26 |
+
def get_hierarchy_root_joint(joint):
|
| 27 |
+
"""
|
| 28 |
+
Function to find the top parent joint node from the given
|
| 29 |
+
'joint' Blender node (armature bone).
|
| 30 |
+
"""
|
| 31 |
+
root_joint = joint
|
| 32 |
+
while root_joint.parent is not None:
|
| 33 |
+
root_joint = root_joint.parent
|
| 34 |
+
return root_joint
|
| 35 |
+
|
| 36 |
+
def get_meshes_and_armatures():
|
| 37 |
+
"""
|
| 38 |
+
Function to get all meshes and armatures in the scene
|
| 39 |
+
"""
|
| 40 |
+
default_objects = ['Cube', 'Light', 'Camera', 'Icosphere']
|
| 41 |
+
for obj_name in default_objects:
|
| 42 |
+
if obj_name in bpy.data.objects:
|
| 43 |
+
bpy.data.objects.remove(bpy.data.objects[obj_name], do_unlink=True)
|
| 44 |
+
|
| 45 |
+
meshes = [obj for obj in bpy.context.scene.objects if obj.type == 'MESH']
|
| 46 |
+
armatures = [obj for obj in bpy.context.scene.objects if obj.type == 'ARMATURE']
|
| 47 |
+
return meshes, armatures
|
| 48 |
+
|
| 49 |
+
def get_joint_dict(root):
|
| 50 |
+
"""
|
| 51 |
+
Function to create a dictionary of joints from the root joint
|
| 52 |
+
"""
|
| 53 |
+
joint_pos = {}
|
| 54 |
+
def traverse_bone(bone):
|
| 55 |
+
joint_pos[bone.name] = {
|
| 56 |
+
'pos': bone.head_local,
|
| 57 |
+
'pa': bone.parent.name if bone.parent else 'None',
|
| 58 |
+
'ch': [child.name for child in bone.children]
|
| 59 |
+
}
|
| 60 |
+
for child in bone.children:
|
| 61 |
+
traverse_bone(child)
|
| 62 |
+
|
| 63 |
+
traverse_bone(root)
|
| 64 |
+
return joint_pos
|
| 65 |
+
|
| 66 |
+
def record_info(root, joint_dict, meshes, mesh_vert_offsets, file_info):
|
| 67 |
+
"""
|
| 68 |
+
- root: root joint
|
| 69 |
+
- joint_dict
|
| 70 |
+
- meshes
|
| 71 |
+
- mesh_vert_offsets: for multi-geometry
|
| 72 |
+
- file_info
|
| 73 |
+
"""
|
| 74 |
+
skin_records = {}
|
| 75 |
+
|
| 76 |
+
def replace_special_characters(name):
|
| 77 |
+
return re.sub(r'\W+', '_', name)
|
| 78 |
+
|
| 79 |
+
for key, val in joint_dict.items():
|
| 80 |
+
modified_key = replace_special_characters(key)
|
| 81 |
+
file_info.write(f'joints {modified_key} {val["pos"][0]:.8f} {val["pos"][1]:.8f} {val["pos"][2]:.8f}\n')
|
| 82 |
+
file_info.write(f'root {replace_special_characters(root.name)}\n')
|
| 83 |
+
|
| 84 |
+
for mesh_index, mesh in enumerate(meshes):
|
| 85 |
+
vert_offset = mesh_vert_offsets[mesh_index]
|
| 86 |
+
if mesh.type == 'MESH':
|
| 87 |
+
for vtx in mesh.data.vertices:
|
| 88 |
+
weights = {}
|
| 89 |
+
for group in vtx.groups:
|
| 90 |
+
bone_name = replace_special_characters(mesh.vertex_groups[group.group].name)
|
| 91 |
+
weights[bone_name] = group.weight
|
| 92 |
+
|
| 93 |
+
global_vertex_index = vert_offset + vtx.index
|
| 94 |
+
|
| 95 |
+
skin_record = f"skin {global_vertex_index} " + " ".join(f"{bone} {weight:.4f}" for bone, weight in weights.items())
|
| 96 |
+
|
| 97 |
+
if global_vertex_index not in skin_records:
|
| 98 |
+
skin_records[global_vertex_index] = skin_record
|
| 99 |
+
file_info.write(skin_record + "\n")
|
| 100 |
+
|
| 101 |
+
for key, val in joint_dict.items():
|
| 102 |
+
if val['pa'] != 'None':
|
| 103 |
+
parent_name = replace_special_characters(val['pa'])
|
| 104 |
+
child_name = replace_special_characters(key)
|
| 105 |
+
file_info.write(f'hier {parent_name} {child_name}\n')
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def record_obj(meshes, file_obj):
|
| 109 |
+
vert_offset = 0
|
| 110 |
+
norm_offset = 0
|
| 111 |
+
mesh_vert_offsets = []
|
| 112 |
+
|
| 113 |
+
for mesh in meshes:
|
| 114 |
+
mesh_vert_offsets.append(vert_offset)
|
| 115 |
+
bpy.context.view_layer.objects.active = mesh
|
| 116 |
+
bpy.ops.object.mode_set(mode='OBJECT')
|
| 117 |
+
|
| 118 |
+
# vertex
|
| 119 |
+
for v in mesh.data.vertices:
|
| 120 |
+
file_obj.write(f"v {v.co[0]} {v.co[1]} {v.co[2]}\n")
|
| 121 |
+
file_obj.write("\n")
|
| 122 |
+
|
| 123 |
+
# normal
|
| 124 |
+
for vn in mesh.data.vertices:
|
| 125 |
+
normal = vn.normal
|
| 126 |
+
file_obj.write(f"vn {normal[0]} {normal[1]} {normal[2]}\n")
|
| 127 |
+
file_obj.write("\n")
|
| 128 |
+
|
| 129 |
+
# face
|
| 130 |
+
for poly in mesh.data.polygons:
|
| 131 |
+
verts = [v + 1 + vert_offset for v in poly.vertices]
|
| 132 |
+
file_obj.write(f"f {verts[0]}//{verts[0]} {verts[1]}//{verts[1]} {verts[2]}//{verts[2]}\n")
|
| 133 |
+
|
| 134 |
+
vert_count = len(mesh.data.vertices)
|
| 135 |
+
vert_offset += vert_count
|
| 136 |
+
norm_offset += vert_count
|
| 137 |
+
|
| 138 |
+
return mesh_vert_offsets
|
| 139 |
+
|
| 140 |
+
def process_glb(glb_path, rigs_dir, meshes_dir):
|
| 141 |
+
base_name = os.path.splitext(os.path.basename(glb_path))[0]
|
| 142 |
+
|
| 143 |
+
obj_name = os.path.join(meshes_dir, f'{base_name}.obj')
|
| 144 |
+
info_name = os.path.join(rigs_dir, f'{base_name}.txt')
|
| 145 |
+
|
| 146 |
+
# Skip processing if rig info file already exists
|
| 147 |
+
if os.path.exists(info_name):
|
| 148 |
+
print(f"{info_name} already exists. Skipping...")
|
| 149 |
+
return
|
| 150 |
+
|
| 151 |
+
if os.path.exists(obj_name):
|
| 152 |
+
print(f"{obj_name} already exists. Skipping...")
|
| 153 |
+
return
|
| 154 |
+
|
| 155 |
+
bpy.ops.wm.read_factory_settings(use_empty=True)
|
| 156 |
+
bpy.ops.import_scene.gltf(filepath=glb_path)
|
| 157 |
+
|
| 158 |
+
meshes, armatures = get_meshes_and_armatures()
|
| 159 |
+
|
| 160 |
+
if not armatures:
|
| 161 |
+
print(f"No armatures found in {glb_path}. Skipping...")
|
| 162 |
+
return
|
| 163 |
+
|
| 164 |
+
root = armatures[0].data.bones[0]
|
| 165 |
+
root_name = get_hierarchy_root_joint(root)
|
| 166 |
+
joint_dict = get_joint_dict(root_name)
|
| 167 |
+
|
| 168 |
+
# save meshes
|
| 169 |
+
with open(obj_name, 'w') as file_obj:
|
| 170 |
+
mesh_vert_offsets = record_obj(meshes, file_obj)
|
| 171 |
+
|
| 172 |
+
# save rigs
|
| 173 |
+
with open(info_name, 'w') as file_info:
|
| 174 |
+
record_info(root_name, joint_dict, meshes, mesh_vert_offsets, file_info)
|
| 175 |
+
|
| 176 |
+
print(f"Processed {glb_path}")
|
| 177 |
+
|
| 178 |
+
if __name__ == '__main__':
|
| 179 |
+
|
| 180 |
+
src_dir = 'glbs'
|
| 181 |
+
rigs_dir = 'rigs'
|
| 182 |
+
meshes_dir = 'meshes'
|
| 183 |
+
# Ensure rigs directory exists
|
| 184 |
+
if not os.path.exists(rigs_dir):
|
| 185 |
+
os.makedirs(rigs_dir)
|
| 186 |
+
if not os.path.exists(meshes_dir):
|
| 187 |
+
os.makedirs(meshes_dir)
|
| 188 |
+
|
| 189 |
+
glb_paths = [os.path.join(src_dir, file) for file in os.listdir(src_dir) if file.endswith('.glb')]
|
| 190 |
+
|
| 191 |
+
print(len(glb_paths))
|
| 192 |
+
|
| 193 |
+
for glb_path in glb_paths:
|
| 194 |
+
try:
|
| 195 |
+
process_glb(glb_path, rigs_dir, meshes_dir)
|
| 196 |
+
except Exception as e:
|
| 197 |
+
with open('error.txt', 'a') as error_file:
|
| 198 |
+
error_file.write(f"{glb_path}: {str(e)}\n")
|
third_party/Puppeteer/skeleton/data_utils/render_data.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
import os
|
| 15 |
+
import numpy as np
|
| 16 |
+
import cv2
|
| 17 |
+
|
| 18 |
+
from pyrender_wrapper import PyRenderWrapper
|
| 19 |
+
from data_loader import DataLoader
|
| 20 |
+
|
| 21 |
+
def main():
|
| 22 |
+
loader = DataLoader()
|
| 23 |
+
|
| 24 |
+
raw_size = (960, 960)
|
| 25 |
+
renderer = PyRenderWrapper(raw_size)
|
| 26 |
+
|
| 27 |
+
output_dir = 'render_results'
|
| 28 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 29 |
+
|
| 30 |
+
rig_path = 'examples/0a59c5ffa4a1476bac6d540b79947f31.txt'
|
| 31 |
+
mesh_path = rig_path.replace('.txt', '.obj')
|
| 32 |
+
|
| 33 |
+
filename = os.path.splitext(os.path.basename(rig_path))[0]
|
| 34 |
+
|
| 35 |
+
loader.load_rig_data(rig_path)
|
| 36 |
+
loader.load_mesh(mesh_path)
|
| 37 |
+
input_dict = loader.query_mesh_rig()
|
| 38 |
+
|
| 39 |
+
angles = [0, np.pi/2, np.pi, 3*np.pi/2]
|
| 40 |
+
|
| 41 |
+
bbox_center = loader.mesh.bounding_box.centroid
|
| 42 |
+
bbox_size = loader.mesh.bounding_box.extents
|
| 43 |
+
distance = np.max(bbox_size) * 2
|
| 44 |
+
|
| 45 |
+
subfolder_path = os.path.join(output_dir, filename)
|
| 46 |
+
|
| 47 |
+
os.makedirs(subfolder_path, exist_ok=True)
|
| 48 |
+
|
| 49 |
+
for i, angle in enumerate(angles):
|
| 50 |
+
print(f"Rendering view at {np.degrees(angle)} degrees")
|
| 51 |
+
|
| 52 |
+
renderer.set_camera_view(angle, bbox_center, distance)
|
| 53 |
+
renderer.align_light_to_camera()
|
| 54 |
+
|
| 55 |
+
color = renderer.render(input_dict)[0]
|
| 56 |
+
|
| 57 |
+
output_filename = f"{filename}_view{i+1}.png"
|
| 58 |
+
output_filepath = os.path.join(subfolder_path, output_filename)
|
| 59 |
+
cv2.imwrite(output_filepath, color)
|
| 60 |
+
if __name__ == "__main__":
|
| 61 |
+
main()
|
third_party/Puppeteer/skeleton/data_utils/save_npz.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
"""
|
| 15 |
+
This python script shows how we process the meshes and rigs from the input folders and save them in a compressed npz file.
|
| 16 |
+
"""
|
| 17 |
+
import os
|
| 18 |
+
import numpy as np
|
| 19 |
+
import glob
|
| 20 |
+
import pickle
|
| 21 |
+
from concurrent.futures import ProcessPoolExecutor
|
| 22 |
+
import skimage.measure
|
| 23 |
+
import trimesh
|
| 24 |
+
import mesh2sdf.core
|
| 25 |
+
import scipy.sparse as sp
|
| 26 |
+
|
| 27 |
+
def read_obj_file(file_path):
|
| 28 |
+
vertices = []
|
| 29 |
+
faces = []
|
| 30 |
+
normals = [] # Added normals list
|
| 31 |
+
|
| 32 |
+
with open(file_path, 'r') as file:
|
| 33 |
+
for line in file:
|
| 34 |
+
if line.startswith('v '):
|
| 35 |
+
parts = line.split()[1:]
|
| 36 |
+
vertices.append([float(parts[0]), float(parts[1]), float(parts[2])])
|
| 37 |
+
elif line.startswith('vn '): # Added reading normals
|
| 38 |
+
parts = line.split()[1:]
|
| 39 |
+
normals.append([float(parts[0]), float(parts[1]), float(parts[2])])
|
| 40 |
+
elif line.startswith('f '):
|
| 41 |
+
parts = line.split()[1:]
|
| 42 |
+
# OBJ format is 1-based, we need 0-based for npz
|
| 43 |
+
face = [int(part.split('//')[0]) - 1 for part in parts]
|
| 44 |
+
faces.append(face)
|
| 45 |
+
|
| 46 |
+
return np.array(vertices), np.array(faces), np.array(normals)
|
| 47 |
+
|
| 48 |
+
def read_rig_file(file_path):
|
| 49 |
+
"""
|
| 50 |
+
Read rig from txt file, our format is the same as RigNet:
|
| 51 |
+
joints joint_name x y z
|
| 52 |
+
root root_joint_name
|
| 53 |
+
skin vertex_idx joint_name weight joint_name weight ...
|
| 54 |
+
hier parent_joint_name child_joint_name
|
| 55 |
+
"""
|
| 56 |
+
joints = []
|
| 57 |
+
bones = []
|
| 58 |
+
joint_names = []
|
| 59 |
+
|
| 60 |
+
joint_mapping = {}
|
| 61 |
+
joint_index = 0
|
| 62 |
+
|
| 63 |
+
skinning_data = {} # Dictionary to store vertex index -> [(joint_idx, weight), ...]
|
| 64 |
+
|
| 65 |
+
with open(file_path, 'r') as file:
|
| 66 |
+
lines = file.readlines()
|
| 67 |
+
|
| 68 |
+
for line in lines:
|
| 69 |
+
parts = line.split()
|
| 70 |
+
if line.startswith('joints'):
|
| 71 |
+
name = parts[1]
|
| 72 |
+
position = [float(parts[2]), float(parts[3]), float(parts[4])]
|
| 73 |
+
joints.append(position)
|
| 74 |
+
joint_names.append(name)
|
| 75 |
+
joint_mapping[name] = joint_index
|
| 76 |
+
joint_index += 1
|
| 77 |
+
elif line.startswith('hier'):
|
| 78 |
+
parent_joint = joint_mapping[parts[1]]
|
| 79 |
+
child_joint = joint_mapping[parts[2]]
|
| 80 |
+
bones.append([parent_joint, child_joint])
|
| 81 |
+
elif line.startswith('root'):
|
| 82 |
+
root = joint_mapping[parts[1]]
|
| 83 |
+
elif line.startswith('skin'):
|
| 84 |
+
vertex_idx = int(parts[1])
|
| 85 |
+
|
| 86 |
+
if vertex_idx not in skinning_data:
|
| 87 |
+
skinning_data[vertex_idx] = []
|
| 88 |
+
|
| 89 |
+
for i in range(2, len(parts), 2):
|
| 90 |
+
if i+1 < len(parts):
|
| 91 |
+
joint_name = parts[i]
|
| 92 |
+
weight = float(parts[i+1])
|
| 93 |
+
|
| 94 |
+
if joint_name in joint_mapping:
|
| 95 |
+
joint_idx = joint_mapping[joint_name]
|
| 96 |
+
skinning_data[vertex_idx].append((joint_idx, weight))
|
| 97 |
+
|
| 98 |
+
return np.array(joints), np.array(bones), root, joint_names, skinning_data
|
| 99 |
+
|
| 100 |
+
def convert_to_sparse_skinning(skinning_data, num_vertices, num_joints):
|
| 101 |
+
"""Convert skinning weights to sparse matrix format."""
|
| 102 |
+
rows = []
|
| 103 |
+
cols = []
|
| 104 |
+
data = []
|
| 105 |
+
|
| 106 |
+
for vertex_idx, weights in skinning_data.items():
|
| 107 |
+
for joint_idx, weight in weights:
|
| 108 |
+
rows.append(vertex_idx)
|
| 109 |
+
cols.append(joint_idx)
|
| 110 |
+
data.append(weight)
|
| 111 |
+
|
| 112 |
+
sparse_skinning = sp.coo_matrix((data, (rows, cols)), shape=(num_vertices, num_joints))
|
| 113 |
+
|
| 114 |
+
# Return as tuple of arrays which can be serialized
|
| 115 |
+
return (sparse_skinning.data, sparse_skinning.row, sparse_skinning.col, sparse_skinning.shape)
|
| 116 |
+
|
| 117 |
+
def normalize_to_unit_cube(vertices, normals=None, scale_factor=1.0):
|
| 118 |
+
min_coords = vertices.min(axis=0)
|
| 119 |
+
max_coords = vertices.max(axis=0)
|
| 120 |
+
center = (max_coords + min_coords) / 2.0
|
| 121 |
+
|
| 122 |
+
vertices -= center
|
| 123 |
+
scale = 1.0 / np.abs(vertices).max() * scale_factor
|
| 124 |
+
vertices *= scale
|
| 125 |
+
|
| 126 |
+
if normals is not None:
|
| 127 |
+
# Normalize each normal vector to unit length
|
| 128 |
+
norms = np.linalg.norm(normals, axis=1, keepdims=True)
|
| 129 |
+
normals = normals / (norms+1e-8)
|
| 130 |
+
|
| 131 |
+
return vertices, normals, center, scale
|
| 132 |
+
else:
|
| 133 |
+
return vertices, center, scale
|
| 134 |
+
|
| 135 |
+
def normalize_vertices(vertices, scale=0.9):
|
| 136 |
+
bbmin, bbmax = vertices.min(0), vertices.max(0)
|
| 137 |
+
center = (bbmin + bbmax) * 0.5
|
| 138 |
+
scale = 2.0 * scale / (bbmax - bbmin).max()
|
| 139 |
+
vertices = (vertices - center) * scale
|
| 140 |
+
return vertices, center, scale
|
| 141 |
+
|
| 142 |
+
def export_to_watertight(normalized_mesh, octree_depth: int = 7):
|
| 143 |
+
"""
|
| 144 |
+
Convert the non-watertight mesh to watertight.
|
| 145 |
+
|
| 146 |
+
Args:
|
| 147 |
+
input_path (str): normalized path
|
| 148 |
+
octree_depth (int):
|
| 149 |
+
|
| 150 |
+
Returns:
|
| 151 |
+
mesh(trimesh.Trimesh): watertight mesh
|
| 152 |
+
|
| 153 |
+
"""
|
| 154 |
+
size = 2 ** octree_depth
|
| 155 |
+
level = 2 / size
|
| 156 |
+
|
| 157 |
+
scaled_vertices, to_orig_center, to_orig_scale = normalize_vertices(normalized_mesh.vertices)
|
| 158 |
+
|
| 159 |
+
sdf = mesh2sdf.core.compute(scaled_vertices, normalized_mesh.faces, size=size)
|
| 160 |
+
|
| 161 |
+
vertices, faces, normals, _ = skimage.measure.marching_cubes(np.abs(sdf), level)
|
| 162 |
+
|
| 163 |
+
# watertight mesh
|
| 164 |
+
vertices = vertices / size * 2 - 1 # -1 to 1
|
| 165 |
+
vertices = vertices / to_orig_scale + to_orig_center
|
| 166 |
+
mesh = trimesh.Trimesh(vertices, faces, normals=normals)
|
| 167 |
+
|
| 168 |
+
return mesh
|
| 169 |
+
|
| 170 |
+
def process_mesh_to_pc(mesh, marching_cubes = True, sample_num = 8192):
|
| 171 |
+
if marching_cubes:
|
| 172 |
+
mesh = export_to_watertight(mesh)
|
| 173 |
+
return_mesh = mesh
|
| 174 |
+
points, face_idx = mesh.sample(sample_num, return_index=True)
|
| 175 |
+
points, _, _ = normalize_to_unit_cube(points, scale_factor=0.9995)
|
| 176 |
+
normals = mesh.face_normals[face_idx]
|
| 177 |
+
|
| 178 |
+
pc_normal = np.concatenate([points, normals], axis=-1, dtype=np.float16)
|
| 179 |
+
return pc_normal, return_mesh
|
| 180 |
+
|
| 181 |
+
def process_single_file(args):
|
| 182 |
+
mesh_file, rig_file = args
|
| 183 |
+
mesh_name = os.path.basename(mesh_file).split('.')[0]
|
| 184 |
+
rig_name = os.path.basename(rig_file).split('.')[0]
|
| 185 |
+
|
| 186 |
+
if mesh_name != rig_name:
|
| 187 |
+
print(f"Skipping files {mesh_file} and {rig_file} because their names do not match.")
|
| 188 |
+
return None
|
| 189 |
+
|
| 190 |
+
vertices, faces, normals = read_obj_file(mesh_file)
|
| 191 |
+
|
| 192 |
+
joints, bones, root, joint_names, skinning_data = read_rig_file(rig_file)
|
| 193 |
+
|
| 194 |
+
# Normalize the mesh to the unit cube centered at the origin
|
| 195 |
+
vertices, normals, center, scale = normalize_to_unit_cube(vertices, normals, scale_factor=0.5)
|
| 196 |
+
|
| 197 |
+
# Apply the same transformation to joints
|
| 198 |
+
joints -= center
|
| 199 |
+
joints *= scale
|
| 200 |
+
|
| 201 |
+
# Create trimesh object for processing
|
| 202 |
+
mesh = trimesh.Trimesh(vertices=vertices, faces=faces)
|
| 203 |
+
|
| 204 |
+
# Process into point cloud with normals
|
| 205 |
+
pc_normal, _ = process_mesh_to_pc(mesh)
|
| 206 |
+
|
| 207 |
+
# Convert skinning data to sparse format
|
| 208 |
+
sparse_skinning = convert_to_sparse_skinning(skinning_data, len(vertices), len(joints))
|
| 209 |
+
|
| 210 |
+
return {
|
| 211 |
+
'vertices': vertices,
|
| 212 |
+
'faces': faces,
|
| 213 |
+
'normals': normals,
|
| 214 |
+
'joints': joints,
|
| 215 |
+
'bones': bones,
|
| 216 |
+
'root_index': root,
|
| 217 |
+
'uuid': mesh_name,
|
| 218 |
+
'pc_w_norm': pc_normal,
|
| 219 |
+
'joint_names': joint_names,
|
| 220 |
+
'skinning_weights_value': sparse_skinning[0], # values
|
| 221 |
+
'skinning_weights_rows': sparse_skinning[1], # row indices
|
| 222 |
+
'skinning_weights_cols': sparse_skinning[2], # column indices
|
| 223 |
+
'skinning_weights_shape': sparse_skinning[3] # shape of matrix
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
+
def process_files(mesh_folder, rig_folder, output_file, num_workers=8):
|
| 227 |
+
file_pairs = []
|
| 228 |
+
|
| 229 |
+
for root, _, files in os.walk(rig_folder):
|
| 230 |
+
for file in files:
|
| 231 |
+
if file.endswith('.txt'):
|
| 232 |
+
rig_file = os.path.join(root, file)
|
| 233 |
+
obj_base_name = os.path.splitext(file)[0]
|
| 234 |
+
mesh_file = os.path.join(mesh_folder, obj_base_name + '.obj')
|
| 235 |
+
if os.path.exists(mesh_file):
|
| 236 |
+
file_pairs.append((mesh_file, rig_file))
|
| 237 |
+
else:
|
| 238 |
+
print(f"Mesh file not found: {mesh_file}")
|
| 239 |
+
|
| 240 |
+
with ProcessPoolExecutor(max_workers=num_workers) as executor:
|
| 241 |
+
data_list = list(executor.map(process_single_file, file_pairs))
|
| 242 |
+
|
| 243 |
+
data_list = [data for data in data_list if data is not None]
|
| 244 |
+
|
| 245 |
+
np.savez_compressed(output_file, data_list, allow_pickle=True)
|
| 246 |
+
|
| 247 |
+
def main():
|
| 248 |
+
# Example usage
|
| 249 |
+
mesh_folder = 'meshes/'
|
| 250 |
+
rig_folder = 'rigs/'
|
| 251 |
+
output_file = 'results.npz'
|
| 252 |
+
|
| 253 |
+
process_files(mesh_folder, rig_folder, output_file)
|
| 254 |
+
|
| 255 |
+
if __name__ == "__main__":
|
| 256 |
+
main()
|
third_party/Puppeteer/skeleton/demo.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
|
| 2 |
+
#
|
| 3 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
| 4 |
+
# you may not use this file except in compliance with the License.
|
| 5 |
+
# You may obtain a copy of the License at
|
| 6 |
+
#
|
| 7 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
| 8 |
+
#
|
| 9 |
+
# Unless required by applicable law or agreed to in writing, software
|
| 10 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
| 11 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
| 12 |
+
# See the License for the specific language governing permissions and
|
| 13 |
+
# limitations under the License.
|
| 14 |
+
import os
|
| 15 |
+
import torch
|
| 16 |
+
import trimesh
|
| 17 |
+
import argparse
|
| 18 |
+
import numpy as np
|
| 19 |
+
|
| 20 |
+
from tqdm import tqdm
|
| 21 |
+
from trimesh import Scene
|
| 22 |
+
|
| 23 |
+
from accelerate import Accelerator
|
| 24 |
+
from accelerate.utils import set_seed
|
| 25 |
+
from accelerate.utils import DistributedDataParallelKwargs
|
| 26 |
+
|
| 27 |
+
from skeleton_models.skeletongen import SkeletonGPT
|
| 28 |
+
from data_utils.save_npz import normalize_to_unit_cube
|
| 29 |
+
from utils.mesh_to_pc import MeshProcessor
|
| 30 |
+
from utils.save_utils import save_mesh, pred_joints_and_bones, save_skeleton_to_txt, save_skeleton_to_txt_joint, save_args, \
|
| 31 |
+
merge_duplicate_joints_and_fix_bones, save_skeleton_obj, render_mesh_with_skeleton
|
| 32 |
+
|
| 33 |
+
class Dataset:
|
| 34 |
+
def __init__(self, input_list, input_pc_num = 8192, apply_marching_cubes = True, octree_depth = 7, output_dir = None):
|
| 35 |
+
super().__init__()
|
| 36 |
+
self.data = []
|
| 37 |
+
self.output_dir = output_dir
|
| 38 |
+
|
| 39 |
+
mesh_list = []
|
| 40 |
+
for input_path in input_list:
|
| 41 |
+
ext = os.path.splitext(input_path)[1].lower()
|
| 42 |
+
if ext in ['.ply', '.stl', '.obj']:
|
| 43 |
+
cur_data = trimesh.load(input_path, force='mesh')
|
| 44 |
+
mesh_list.append(cur_data)
|
| 45 |
+
else:
|
| 46 |
+
print(f"Unsupported file type: {ext}")
|
| 47 |
+
if apply_marching_cubes:
|
| 48 |
+
print("First apply Marching Cubes and then sample point cloud, need time...")
|
| 49 |
+
pc_list = MeshProcessor.convert_meshes_to_point_clouds(mesh_list, input_pc_num, apply_marching_cubes = apply_marching_cubes, octree_depth = octree_depth)
|
| 50 |
+
for input_path, cur_data, mesh in zip(input_list, pc_list, mesh_list):
|
| 51 |
+
self.data.append({'pc_normal': cur_data, 'faces': mesh.faces, 'vertices': mesh.vertices, 'file_name': os.path.splitext(os.path.basename(input_path))[0]})
|
| 52 |
+
print(f"dataset total data samples: {len(self.data)}")
|
| 53 |
+
|
| 54 |
+
def __len__(self):
|
| 55 |
+
return len(self.data)
|
| 56 |
+
|
| 57 |
+
def __getitem__(self, idx):
|
| 58 |
+
data_dict = {}
|
| 59 |
+
data_dict['pc_normal'] = self.data[idx]['pc_normal']
|
| 60 |
+
# normalize pc coor
|
| 61 |
+
pc_coor = data_dict['pc_normal'][:, :3]
|
| 62 |
+
normals = data_dict['pc_normal'][:, 3:]
|
| 63 |
+
pc_coor, center, scale = normalize_to_unit_cube(pc_coor, scale_factor=0.9995)
|
| 64 |
+
|
| 65 |
+
data_dict['file_name'] = self.data[idx]['file_name']
|
| 66 |
+
pc_coor = pc_coor.astype(np.float32)
|
| 67 |
+
normals = normals.astype(np.float32)
|
| 68 |
+
|
| 69 |
+
point_cloud = trimesh.PointCloud(pc_coor)
|
| 70 |
+
point_cloud.metadata['normals'] = normals
|
| 71 |
+
|
| 72 |
+
try:
|
| 73 |
+
point_cloud.export(os.path.join(self.output_dir, f"{data_dict['file_name']}.ply"))
|
| 74 |
+
except Exception as e:
|
| 75 |
+
print(f"fail to save point clouds: {e}")
|
| 76 |
+
|
| 77 |
+
assert (np.linalg.norm(normals, axis=-1) > 0.99).all(), "normals should be unit vectors, something wrong"
|
| 78 |
+
data_dict['pc_normal'] = np.concatenate([pc_coor, normals], axis=-1, dtype=np.float16)
|
| 79 |
+
|
| 80 |
+
vertices = self.data[idx]['vertices']
|
| 81 |
+
faces = self.data[idx]['faces']
|
| 82 |
+
bounds = np.array([pc_coor.min(axis=0), pc_coor.max(axis=0)])
|
| 83 |
+
pc_center = (bounds[0] + bounds[1])[None, :] / 2
|
| 84 |
+
pc_scale = ((bounds[1] - bounds[0]).max() + 1e-5)
|
| 85 |
+
data_dict['transform_params'] = torch.tensor([
|
| 86 |
+
center[0], center[1], center[2],
|
| 87 |
+
scale,
|
| 88 |
+
pc_center[0][0], pc_center[0][1], pc_center[0][2],
|
| 89 |
+
pc_scale
|
| 90 |
+
], dtype=torch.float32)
|
| 91 |
+
data_dict['vertices'] = vertices
|
| 92 |
+
data_dict['faces']= faces
|
| 93 |
+
return data_dict
|
| 94 |
+
|
| 95 |
+
def get_args():
|
| 96 |
+
parser = argparse.ArgumentParser("SkeletonGPT", add_help=False)
|
| 97 |
+
|
| 98 |
+
parser.add_argument("--input_pc_num", default=8192, type=int)
|
| 99 |
+
parser.add_argument("--num_beams", default=1, type=int)
|
| 100 |
+
parser.add_argument('--input_dir', default=None, type=str, help="input mesh directory")
|
| 101 |
+
parser.add_argument('--input_path', default=None, type=str, help="input mesh path")
|
| 102 |
+
parser.add_argument("--output_dir", default="outputs", type=str)
|
| 103 |
+
parser.add_argument('--llm', default="facebook/opt-350m", type=str, help="The LLM backend")
|
| 104 |
+
parser.add_argument("--pad_id", default=-1, type=int, help="padding id")
|
| 105 |
+
parser.add_argument("--n_discrete_size", default=128, type=int, help="discretized 3D space")
|
| 106 |
+
parser.add_argument("--n_max_bones", default=100, type=int, help="max number of bones")
|
| 107 |
+
parser.add_argument('--dataset_path', default="combine_256_updated", type=str, help="data path")
|
| 108 |
+
parser.add_argument("--seed", default=0, type=int)
|
| 109 |
+
parser.add_argument("--precision", default="fp16", type=str)
|
| 110 |
+
parser.add_argument("--batchsize_per_gpu", default=1, type=int)
|
| 111 |
+
parser.add_argument('--pretrained_weights', default=None, type=str)
|
| 112 |
+
parser.add_argument('--save_name', default="infer_results", type=str)
|
| 113 |
+
parser.add_argument("--save_render", default=False, action="store_true", help="save rendering results of mesh with skel")
|
| 114 |
+
parser.add_argument("--apply_marching_cubes", default=False, action="store_true")
|
| 115 |
+
parser.add_argument("--octree_depth", default=7, type=int)
|
| 116 |
+
parser.add_argument("--hier_order", default=False, action="store_true")
|
| 117 |
+
parser.add_argument("--joint_token", default=False, action="store_true", help="use joint_based tokenization")
|
| 118 |
+
parser.add_argument("--seq_shuffle", default=False, action="store_true", help="shuffle the skeleton sequence")
|
| 119 |
+
|
| 120 |
+
args = parser.parse_args()
|
| 121 |
+
return args
|
| 122 |
+
|
| 123 |
+
if __name__ == "__main__":
|
| 124 |
+
args = get_args()
|
| 125 |
+
|
| 126 |
+
output_dir = f'{args.output_dir}/{args.save_name}'
|
| 127 |
+
os.makedirs(output_dir, exist_ok=True)
|
| 128 |
+
save_args(args, output_dir)
|
| 129 |
+
|
| 130 |
+
kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
|
| 131 |
+
accelerator = Accelerator(
|
| 132 |
+
kwargs_handlers=[kwargs],
|
| 133 |
+
mixed_precision=args.precision,
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
model = SkeletonGPT(args).cuda()
|
| 137 |
+
|
| 138 |
+
if args.pretrained_weights is not None:
|
| 139 |
+
pkg = torch.load(args.pretrained_weights, map_location=torch.device("cpu"))
|
| 140 |
+
model.load_state_dict(pkg["model"])
|
| 141 |
+
else:
|
| 142 |
+
raise ValueError("Pretrained weights must be provided.")
|
| 143 |
+
model.eval()
|
| 144 |
+
set_seed(args.seed)
|
| 145 |
+
|
| 146 |
+
# create dataset
|
| 147 |
+
if args.input_dir is not None:
|
| 148 |
+
input_list = sorted(os.listdir(args.input_dir))
|
| 149 |
+
input_list = [os.path.join(args.input_dir, x) for x in input_list if x.endswith('.ply') or x.endswith('.obj') or x.endswith('.stl')]
|
| 150 |
+
dataset = Dataset(input_list, args.input_pc_num, args.apply_marching_cubes, args.octree_depth, output_dir)
|
| 151 |
+
elif args.input_path is not None:
|
| 152 |
+
dataset = Dataset([args.input_path], args.input_pc_num, args.apply_marching_cubes, args.octree_depth, output_dir)
|
| 153 |
+
else:
|
| 154 |
+
raise ValueError("input_dir or input_path must be provided.")
|
| 155 |
+
|
| 156 |
+
dataloader = torch.utils.data.DataLoader(
|
| 157 |
+
dataset,
|
| 158 |
+
batch_size= 1,
|
| 159 |
+
drop_last = False,
|
| 160 |
+
shuffle = False,
|
| 161 |
+
)
|
| 162 |
+
|
| 163 |
+
dataloader, model = accelerator.prepare(dataloader, model)
|
| 164 |
+
|
| 165 |
+
for curr_iter, batch_data_label in tqdm(enumerate(dataloader), total=len(dataloader)):
|
| 166 |
+
with accelerator.autocast():
|
| 167 |
+
pred_bone_coords = model.generate(batch_data_label)
|
| 168 |
+
|
| 169 |
+
# determine the output file name
|
| 170 |
+
file_name = os.path.basename(batch_data_label['file_name'][0])
|
| 171 |
+
pred_skel_filename = os.path.join(output_dir, f'{file_name}_skel.obj')
|
| 172 |
+
pred_rig_filename = os.path.join(output_dir, f"{file_name}_pred.txt")
|
| 173 |
+
mesh_filename = os.path.join(output_dir, f"{file_name}_mesh.obj")
|
| 174 |
+
|
| 175 |
+
transform_params = batch_data_label['transform_params'][0].cpu().numpy()
|
| 176 |
+
trans = transform_params[:3]
|
| 177 |
+
scale = transform_params[3]
|
| 178 |
+
pc_trans = transform_params[4:7]
|
| 179 |
+
pc_scale = transform_params[7]
|
| 180 |
+
vertices = batch_data_label['vertices'][0].cpu().numpy()
|
| 181 |
+
faces = batch_data_label['faces'][0].cpu().numpy()
|
| 182 |
+
|
| 183 |
+
skeleton = pred_bone_coords[0].cpu().numpy()
|
| 184 |
+
pred_joints, pred_bones = pred_joints_and_bones(skeleton.squeeze())
|
| 185 |
+
|
| 186 |
+
# Post process: merge duplicate or nearby joints and deduplicate bones.
|
| 187 |
+
if args.hier_order: # for MagicArticulate hier order
|
| 188 |
+
pred_root_index = pred_bones[0][0]
|
| 189 |
+
pred_joints, pred_bones, pred_root_index = merge_duplicate_joints_and_fix_bones(pred_joints, pred_bones, root_index=pred_root_index)
|
| 190 |
+
else: # for Puppeteer or MagicArticulate spaital order
|
| 191 |
+
pred_joints, pred_bones = merge_duplicate_joints_and_fix_bones(pred_joints, pred_bones)
|
| 192 |
+
pred_root_index = None
|
| 193 |
+
|
| 194 |
+
# when save rig to txt, denormalize the skeletons to the same scale with input meshes
|
| 195 |
+
pred_joints_denorm = pred_joints * pc_scale + pc_trans # first align with point cloud
|
| 196 |
+
pred_joints_denorm = pred_joints_denorm / scale + trans # then align with original mesh
|
| 197 |
+
|
| 198 |
+
if args.joint_token:
|
| 199 |
+
pred_root_index = save_skeleton_to_txt_joint(pred_joints_denorm, pred_bones, pred_rig_filename)
|
| 200 |
+
else:
|
| 201 |
+
save_skeleton_to_txt(pred_joints_denorm, pred_bones, pred_root_index, args.hier_order, vertices, pred_rig_filename)
|
| 202 |
+
|
| 203 |
+
# save skeletons
|
| 204 |
+
if args.hier_order or args.joint_token:
|
| 205 |
+
save_skeleton_obj(pred_joints, pred_bones, pred_skel_filename, pred_root_index, use_cone=True)
|
| 206 |
+
else:
|
| 207 |
+
save_skeleton_obj(pred_joints, pred_bones, pred_skel_filename, use_cone=False)
|
| 208 |
+
|
| 209 |
+
# when saving mesh and rendering, use normalized vertices (-0.5,0.5)
|
| 210 |
+
vertices_norm = (vertices - trans) * scale
|
| 211 |
+
vertices_norm = (vertices_norm - pc_trans) / pc_scale
|
| 212 |
+
save_mesh(vertices_norm, faces, mesh_filename)
|
| 213 |
+
|
| 214 |
+
# render mesh w/ skeleton
|
| 215 |
+
if args.save_render:
|
| 216 |
+
if args.hier_order or args.joint_token:
|
| 217 |
+
render_mesh_with_skeleton(pred_joints, pred_bones, vertices_norm, faces, output_dir, file_name, prefix='pred', root_idx=pred_root_index)
|
| 218 |
+
else:
|
| 219 |
+
render_mesh_with_skeleton(pred_joints, pred_bones, vertices_norm, faces, output_dir, file_name, prefix='pred')
|
third_party/Puppeteer/skeleton/demo.sh
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
CUDA_VISIBLE_DEVICES=0 python demo.py --input_dir ./examples \
|
| 2 |
+
--pretrained_weights skeleton_ckpts/puppeteer_skeleton_w_diverse_pose.pth \
|
| 3 |
+
--save_name infer_results_demo --input_pc_num 8192 \
|
| 4 |
+
--save_render --apply_marching_cubes --joint_token --seq_shuffle
|
| 5 |
+
|
| 6 |
+
# If you found the results not satisfactory, try the model trained with bone-based tokenization:
|
| 7 |
+
|
| 8 |
+
# CUDA_VISIBLE_DEVICES=0 python demo.py --input_dir ./examples \
|
| 9 |
+
# --pretrained_weights skeleton_ckpts/puppeteer_skeleton_w_diverse_pose_bone_token.pth \
|
| 10 |
+
# --save_name infer_results_demo_bone_token --input_pc_num 8192 \
|
| 11 |
+
# --save_render --apply_marching_cubes --hier_order --seq_shuffle
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# If you want to run the demo using MagicArticulate weights, run:
|
| 15 |
+
|
| 16 |
+
# CUDA_VISIBLE_DEVICES=0 python demo.py --input_dir ./examples \
|
| 17 |
+
# --pretrained_weights skeleton_ckpts/checkpoint_trainonv2_hier.pth \
|
| 18 |
+
# --save_name infer_results_demo_magicarti --input_pc_num 8192 \
|
| 19 |
+
# --save_render --apply_marching_cubes --hier_order
|
third_party/Puppeteer/skeleton/download.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from huggingface_hub import hf_hub_download
|
| 2 |
+
|
| 3 |
+
file_path = hf_hub_download(
|
| 4 |
+
repo_id="Maikou/Michelangelo",
|
| 5 |
+
filename="checkpoints/aligned_shape_latents/shapevae-256.ckpt",
|
| 6 |
+
local_dir="third_partys/Michelangelo"
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
file_path = hf_hub_download(
|
| 10 |
+
repo_id="Seed3D/Puppeteer",
|
| 11 |
+
filename="skeleton_ckpts/puppeteer_skeleton_w_diverse_pose.pth",
|
| 12 |
+
local_dir="skeleton"
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
file_path = hf_hub_download(
|
| 16 |
+
repo_id="Seed3D/Puppeteer",
|
| 17 |
+
filename="skeleton_ckpts/puppeteer_skeleton_wo_diverse_pose.pth",
|
| 18 |
+
local_dir="skeleton"
|
| 19 |
+
)
|
| 20 |
+
|
| 21 |
+
file_path = hf_hub_download(
|
| 22 |
+
repo_id="Seed3D/Puppeteer",
|
| 23 |
+
filename="skeleton_ckpts/puppeteer_skeleton_w_diverse_pose_bone_token.pth",
|
| 24 |
+
local_dir="skeleton"
|
| 25 |
+
)
|