eeuuia commited on
Commit
c701c4f
·
verified ·
1 Parent(s): 82e06c6

Update api/ltx_server_refactored.py

Browse files
Files changed (1) hide show
  1. api/ltx_server_refactored.py +21 -35
api/ltx_server_refactored.py CHANGED
@@ -40,28 +40,22 @@ from tools.video_encode_tool import video_encode_tool_singleton
40
  DEPS_DIR = Path("/data")
41
  LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
42
 
43
- # (Todas as funções de setup, helpers e inicialização da classe permanecem inalteradas)
44
- # ... (run_setup, add_deps_to_path, _query_gpu_processes_via_nvml, etc.)
45
  def run_setup():
46
  setup_script_path = "setup.py"
47
- if not os.path.exists(setup_script_path):
48
- print("[DEBUG] 'setup.py' não encontrado. Pulando clonagem de dependências.")
49
- return
50
- try:
51
- print("[DEBUG] Executando setup.py para dependências...")
52
- subprocess.run([sys.executable, setup_script_path], check=True)
53
- print("[DEBUG] Setup concluído com sucesso.")
54
- except subprocess.CalledProcessError as e:
55
- print(f"[DEBUG] ERRO no setup.py (code {e.returncode}). Abortando.")
56
- sys.exit(1)
57
  if not LTX_VIDEO_REPO_DIR.exists():
58
  print(f"[DEBUG] Repositório não encontrado em {LTX_VIDEO_REPO_DIR}. Rodando setup...")
59
  run_setup()
 
60
  def add_deps_to_path():
61
  repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
62
  if str(LTX_VIDEO_REPO_DIR.resolve()) not in sys.path:
63
  sys.path.insert(0, repo_path)
64
  print(f"[DEBUG] Repo adicionado ao sys.path: {repo_path}")
 
65
  def calculate_padding(orig_h, orig_w, target_h, target_w):
66
  pad_h = target_h - orig_h
67
  pad_w = target_w - orig_w
@@ -70,6 +64,7 @@ def calculate_padding(orig_h, orig_w, target_h, target_w):
70
  pad_left = pad_w // 2
71
  pad_right = pad_w - pad_left
72
  return (pad_left, pad_right, pad_top, pad_bottom)
 
73
  def log_tensor_info(tensor, name="Tensor"):
74
  if not isinstance(tensor, torch.Tensor):
75
  print(f"\n[INFO] '{name}' não é tensor.")
@@ -79,10 +74,7 @@ def log_tensor_info(tensor, name="Tensor"):
79
  print(f" - Dtype: {tensor.dtype}")
80
  print(f" - Device: {tensor.device}")
81
  if tensor.numel() > 0:
82
- try:
83
- print(f" - Min: {tensor.min().item():.4f} Max: {tensor.max().item():.4f} Mean: {tensor.mean().item():.4f}")
84
- except Exception:
85
- pass
86
  print("------------------------------------------\n")
87
 
88
  add_deps_to_path()
@@ -200,19 +192,14 @@ class VideoService:
200
 
201
  @torch.no_grad()
202
  def _upsample_latents_internal(self, latents: torch.Tensor) -> torch.Tensor:
203
- try:
204
- if not self.latent_upsampler:
205
- raise ValueError("Latent Upsampler não está carregado.")
206
- latents_unnormalized = un_normalize_latents(latents, self.pipeline.vae, vae_per_channel_normalize=True)
207
- upsampled_latents = self.latent_upsampler(latents_unnormalized)
208
- return normalize_latents(upsampled_latents, self.pipeline.vae, vae_per_channel_normalize=True)
209
- except Exception as e:
210
- pass
211
- finally:
212
- torch.cuda.empty_cache()
213
- torch.cuda.ipc_collect()
214
- self.finalize(keep_paths=[])
215
-
216
  def _prepare_conditioning_tensor(self, filepath, height, width, padding_values):
217
  tensor = load_image_to_tensor_with_resize_and_crop(filepath, height, width)
218
  tensor = torch.nn.functional.pad(tensor, padding_values)
@@ -266,7 +253,7 @@ class VideoService:
266
  "output_type": "latent", "conditioning_items": conditioning_items, "guidance_scale": float(guidance_scale),
267
  **(self.config.get("first_pass", {}))
268
  }
269
- try:
270
  with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device == 'cuda'):
271
  latents = self.pipeline(**first_pass_kwargs).images
272
  pixel_tensor = vae_manager_singleton.decode(latents.clone(), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
@@ -274,15 +261,14 @@ class VideoService:
274
  latents_cpu = latents.detach().to("cpu")
275
  tensor_path = os.path.join(results_dir, f"latents_low_res_{used_seed}.pt")
276
  torch.save(latents_cpu, tensor_path)
277
- return video_path, tensor_path, used_seed
278
-
279
- except Exception as e:
280
- pass
281
- finally:
282
  torch.cuda.empty_cache()
283
  torch.cuda.ipc_collect()
284
  self.finalize(keep_paths=[])
285
 
 
 
 
286
  def generate_upscale_denoise(self, latents_path, prompt, negative_prompt, guidance_scale, seed):
287
  used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
288
  seed_everething(used_seed)
 
40
  DEPS_DIR = Path("/data")
41
  LTX_VIDEO_REPO_DIR = DEPS_DIR / "LTX-Video"
42
 
 
 
43
  def run_setup():
44
  setup_script_path = "setup.py"
45
+ print("[DEBUG] Executando setup.py para dependências...")
46
+ subprocess.run([sys.executable, setup_script_path], check=True)
47
+ print("[DEBUG] Setup concluído com sucesso.")
48
+
 
 
 
 
 
 
49
  if not LTX_VIDEO_REPO_DIR.exists():
50
  print(f"[DEBUG] Repositório não encontrado em {LTX_VIDEO_REPO_DIR}. Rodando setup...")
51
  run_setup()
52
+
53
  def add_deps_to_path():
54
  repo_path = str(LTX_VIDEO_REPO_DIR.resolve())
55
  if str(LTX_VIDEO_REPO_DIR.resolve()) not in sys.path:
56
  sys.path.insert(0, repo_path)
57
  print(f"[DEBUG] Repo adicionado ao sys.path: {repo_path}")
58
+
59
  def calculate_padding(orig_h, orig_w, target_h, target_w):
60
  pad_h = target_h - orig_h
61
  pad_w = target_w - orig_w
 
64
  pad_left = pad_w // 2
65
  pad_right = pad_w - pad_left
66
  return (pad_left, pad_right, pad_top, pad_bottom)
67
+
68
  def log_tensor_info(tensor, name="Tensor"):
69
  if not isinstance(tensor, torch.Tensor):
70
  print(f"\n[INFO] '{name}' não é tensor.")
 
74
  print(f" - Dtype: {tensor.dtype}")
75
  print(f" - Device: {tensor.device}")
76
  if tensor.numel() > 0:
77
+ print(f" - Min: {tensor.min().item():.4f} Max: {tensor.max().item():.4f} Mean: {tensor.mean().item():.4f}")
 
 
 
78
  print("------------------------------------------\n")
79
 
80
  add_deps_to_path()
 
192
 
193
  @torch.no_grad()
194
  def _upsample_latents_internal(self, latents: torch.Tensor) -> torch.Tensor:
195
+ latents_unnormalized = un_normalize_latents(latents, self.pipeline.vae, vae_per_channel_normalize=True)
196
+ upsampled_latents = self.latent_upsampler(latents_unnormalized)
197
+ torch.cuda.empty_cache()
198
+ torch.cuda.ipc_collect()
199
+ self.finalize(keep_paths=[])
200
+ return normalize_latents(upsampled_latents, self.pipeline.vae, vae_per_channel_normalize=True)
201
+
202
+
 
 
 
 
 
203
  def _prepare_conditioning_tensor(self, filepath, height, width, padding_values):
204
  tensor = load_image_to_tensor_with_resize_and_crop(filepath, height, width)
205
  tensor = torch.nn.functional.pad(tensor, padding_values)
 
253
  "output_type": "latent", "conditioning_items": conditioning_items, "guidance_scale": float(guidance_scale),
254
  **(self.config.get("first_pass", {}))
255
  }
256
+ if True:
257
  with torch.autocast(device_type="cuda", dtype=self.runtime_autocast_dtype, enabled=self.device == 'cuda'):
258
  latents = self.pipeline(**first_pass_kwargs).images
259
  pixel_tensor = vae_manager_singleton.decode(latents.clone(), decode_timestep=float(self.config.get("decode_timestep", 0.05)))
 
261
  latents_cpu = latents.detach().to("cpu")
262
  tensor_path = os.path.join(results_dir, f"latents_low_res_{used_seed}.pt")
263
  torch.save(latents_cpu, tensor_path)
264
+
 
 
 
 
265
  torch.cuda.empty_cache()
266
  torch.cuda.ipc_collect()
267
  self.finalize(keep_paths=[])
268
 
269
+ return video_path, tensor_path, used_seed
270
+
271
+
272
  def generate_upscale_denoise(self, latents_path, prompt, negative_prompt, guidance_scale, seed):
273
  used_seed = random.randint(0, 2**32 - 1) if seed is None else int(seed)
274
  seed_everething(used_seed)