From 7b61e9eb58e0a004b451e8f06c9f90b861f81b45 Mon Sep 17 00:00:00 2001 From: recris Date: Sat, 30 Nov 2024 11:36:40 +0000 Subject: [PATCH] Fix issues found in review (pt 2) --- library/train_util.py | 2 +- sd3_train.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/library/train_util.py b/library/train_util.py index eaf6ec004..d5e72323a 100644 --- a/library/train_util.py +++ b/library/train_util.py @@ -5875,7 +5875,7 @@ def get_huber_threshold(args, timesteps: torch.Tensor, noise_scheduler) -> torch alpha = -math.log(args.huber_c) / noise_scheduler.config.num_train_timesteps result = torch.exp(-alpha * timesteps) * args.huber_scale elif args.huber_schedule == "snr": - if not hasattr(noise_scheduler, "alphas_cumprod"): + if noise_scheduler is None or not hasattr(noise_scheduler, "alphas_cumprod"): raise NotImplementedError("Huber schedule 'snr' is not supported with the current model.") alphas_cumprod = torch.index_select(noise_scheduler.alphas_cumprod, 0, timesteps.cpu()) sigmas = ((1.0 - alphas_cumprod) / alphas_cumprod) ** 0.5 diff --git a/sd3_train.py b/sd3_train.py index cf2bdf938..909c5ead6 100644 --- a/sd3_train.py +++ b/sd3_train.py @@ -845,7 +845,7 @@ def grad_hook(parameter: torch.Tensor): # ) # calculate loss loss = train_util.conditional_loss( - args, model_pred.float(), target.float(), timesteps, "none", noise_scheduler + args, model_pred.float(), target.float(), timesteps, "none", None ) if args.masked_loss or ("alpha_masks" in batch and batch["alpha_masks"] is not None): loss = apply_masked_loss(loss, batch)