|
492 | 492 | " # Create timesteps\n", |
493 | 493 | " timesteps = torch.randint(0, inferer.scheduler.num_train_timesteps, (batch_size,)).to(device).long()\n", |
494 | 494 | " # Get model prediction\n", |
495 | | - " # cross attention expects shape [batch size, sequence length, channels], \n", |
496 | | - " #we are use channels = latent dimension and sequence length = 1\n", |
| 495 | + " # cross attention expects shape [batch size, sequence length, channels],\n", |
| 496 | + " # we are use channels = latent dimension and sequence length = 1\n", |
497 | 497 | " latent = model.semantic_encoder(images)\n", |
498 | 498 | " noise_pred = inferer(\n", |
499 | 499 | " inputs=images, diffusion_model=model.unet, noise=noise, timesteps=timesteps, condition=latent.unsqueeze(2)\n", |
|
529 | 529 | " iter_loss_list.append(iter_loss / val_interval)\n", |
530 | 530 | " val_iter_loss_list.append(val_iter_loss / len(val_loader))\n", |
531 | 531 | " iter_loss = 0\n", |
532 | | - " print(\n", |
533 | | - " f\"Iteration {epoch} - Interval Loss {iter_loss_list[-1]:.4f}, \n", |
534 | | - " Interval Loss Val {val_iter_loss_list[-1]:.4f}\"\n", |
535 | | - " )\n", |
| 532 | + " to_print = [\n", |
| 533 | + " f\"Iteration {epoch} - Interval Loss {iter_loss_list[-1]:.4f}\",\n", |
| 534 | + " f\"Interval Loss Val {val_iter_loss_list[-1]:.4f}\",\n", |
| 535 | + " ]\n", |
| 536 | + " print(\"\".join(to_print))\n", |
536 | 537 | "\n", |
537 | 538 | "total_time = time.time() - total_start\n", |
538 | 539 | "\n", |
|
0 commit comments