Commit 5b57f61b authored by flamelaw's avatar flamelaw
Browse files

fix pin_memory with different latent sampling method

parent 2d22d72c
Loading
Loading
Loading
Loading
+4 −1
Original line number Diff line number Diff line
@@ -416,7 +416,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
    pin_memory = shared.opts.pin_memory

    ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method)
    dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, batch_size=ds.batch_size, pin_memory=pin_memory)
    
    latent_sampling_method = ds.latent_sampling_method

    dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)

    if unload:
        shared.sd_model.cond_stage_model.to(devices.cpu)
+19 −4
Original line number Diff line number Diff line
@@ -138,8 +138,11 @@ class PersonalizedBase(Dataset):
        return entry

class PersonalizedDataLoader(DataLoader):
    def __init__(self, *args, **kwargs):
        super(PersonalizedDataLoader, self).__init__(shuffle=True, drop_last=True, *args, **kwargs)
    def __init__(self, dataset, latent_sampling_method="once", batch_size=1, pin_memory=False):
        super(PersonalizedDataLoader, self).__init__(dataset, shuffle=True, drop_last=True, batch_size=batch_size, pin_memory=pin_memory)
        if latent_sampling_method == "random":
            self.collate_fn = collate_wrapper_random
        else:
            self.collate_fn = collate_wrapper
        

@@ -148,6 +151,8 @@ class BatchLoader:
        self.cond_text = [entry.cond_text for entry in data]
        self.cond = [entry.cond for entry in data]
        self.latent_sample = torch.stack([entry.latent_sample for entry in data]).squeeze(1)
        #self.emb_index = [entry.emb_index for entry in data]
        #print(self.latent_sample.device)

    def pin_memory(self):
        self.latent_sample = self.latent_sample.pin_memory()
@@ -155,3 +160,13 @@ class BatchLoader:

def collate_wrapper(batch):
    return BatchLoader(batch)

class BatchLoaderRandom(BatchLoader):
    def __init__(self, data):
        super().__init__(data)

    def pin_memory(self):
        return self

def collate_wrapper_random(batch):
    return BatchLoaderRandom(batch)
 No newline at end of file
+1 −6
Original line number Diff line number Diff line
@@ -277,7 +277,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_

    latent_sampling_method = ds.latent_sampling_method

    dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, batch_size=ds.batch_size, pin_memory=False)
    dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory)

    if unload:
        shared.sd_model.first_stage_model.to(devices.cpu)
@@ -333,11 +333,6 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
                # go back until we reach gradient accumulation steps
                if (j + 1) % gradient_step != 0:
                    continue
                #print(f"grad:{embedding.vec.grad.detach().cpu().abs().mean().item():.7f}")
                #scaler.unscale_(optimizer)
                #print(f"grad:{embedding.vec.grad.detach().cpu().abs().mean().item():.7f}")
                #torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=1.0)
                #print(f"grad:{embedding.vec.grad.detach().cpu().abs().mean().item():.7f}")
                scaler.step(optimizer)
                scaler.update()
                embedding.step += 1