Unverified Commit 6165f07e authored by NoCrypt's avatar NoCrypt Committed by GitHub
Browse files

Merge branch 'master' into patch-1

parents c556d345 e666220e
Loading
Loading
Loading
Loading
+33 −0
Original line number Diff line number Diff line
// attaches listeners to the txt2img and img2img galleries to update displayed generation param text when the image changes

let txt2img_gallery, img2img_gallery, modal = undefined;
onUiUpdate(function(){
	if (!txt2img_gallery) {
		txt2img_gallery = attachGalleryListeners("txt2img")
	}
	if (!img2img_gallery) {
		img2img_gallery = attachGalleryListeners("img2img")
	}
	if (!modal) {
		modal = gradioApp().getElementById('lightboxModal')
		modalObserver.observe(modal,  { attributes : true, attributeFilter : ['style'] });
	}
});

let modalObserver = new MutationObserver(function(mutations) {
	mutations.forEach(function(mutationRecord) {
		let selectedTab = gradioApp().querySelector('#tabs div button.bg-white')?.innerText
		if (mutationRecord.target.style.display === 'none' && selectedTab === 'txt2img' || selectedTab === 'img2img')
			gradioApp().getElementById(selectedTab+"_generation_info_button").click()
	});
});

function attachGalleryListeners(tab_name) {
	gallery = gradioApp().querySelector('#'+tab_name+'_gallery')
	gallery?.addEventListener('click', () => gradioApp().getElementById(tab_name+"_generation_info_button").click());
	gallery?.addEventListener('keydown', (e) => {
		if (e.keyCode == 37 || e.keyCode == 39) // left or right arrow
			gradioApp().getElementById(tab_name+"_generation_info_button").click()
	});
	return gallery;
}
+14 −2
Original line number Diff line number Diff line
@@ -15,6 +15,9 @@ from modules.sd_models import checkpoints_list
from modules.realesrgan_model import get_realesrgan_models
from typing import List

if shared.cmd_opts.deepdanbooru:
    from modules.deepbooru import get_deepbooru_tags

def upscaler_to_index(name: str):
    try:
        return [x.name.lower() for x in shared.sd_upscalers].index(name.lower())
@@ -220,11 +223,20 @@ class Api:
        if image_b64 is None:
            raise HTTPException(status_code=404, detail="Image not found") 

        img = self.__base64_to_image(image_b64)
        img = decode_base64_to_image(image_b64)
        img = img.convert('RGB')

        # Override object param
        with self.queue_lock:
            if interrogatereq.model == "clip":
                processed = shared.interrogator.interrogate(img)
            elif interrogatereq.model == "deepdanbooru":
                if shared.cmd_opts.deepdanbooru:
                    processed = get_deepbooru_tags(img)
                else:
                    raise HTTPException(status_code=404, detail="Model not found. Add --deepdanbooru when launching for using the model.")
            else:
                raise HTTPException(status_code=404, detail="Model not found")
        
        return InterrogateResponse(caption=processed)

+1 −0
Original line number Diff line number Diff line
@@ -170,6 +170,7 @@ class ProgressResponse(BaseModel):

class InterrogateRequest(BaseModel):
    image: str = Field(default="", title="Image", description="Image to work on, must be a Base64 string containing the image's data.")
    model: str = Field(default="clip", title="Model", description="The interrogate model used.")

class InterrogateResponse(BaseModel):
    caption: str = Field(default=None, title="Caption", description="The generated caption for the image.")
+11 −2
Original line number Diff line number Diff line
from pyngrok import ngrok, conf, exception


def connect(token, port, region):
    account = None
    if token == None:
        token = 'None'
    else:
        if ':' in token:
            # token = authtoken:username:password
            account = token.split(':')[1] + ':' + token.split(':')[-1]
            token = token.split(':')[0]

    config = conf.PyngrokConfig(
        auth_token=token, region=region
    )
    try:
        if account == None:
            public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True).public_url
        else:
            public_url = ngrok.connect(port, pyngrok_config=config, bind_tls=True, auth=account).public_url
    except exception.PyngrokNgrokError:
        print(f'Invalid ngrok authtoken, ngrok connection aborted.\n'
              f'Your token: {token}, get the right one on https://dashboard.ngrok.com/get-started/your-authtoken')
+18 −11
Original line number Diff line number Diff line
@@ -163,13 +163,21 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
    checkpoint_file = checkpoint_info.filename
    sd_model_hash = checkpoint_info.hash

    if shared.opts.sd_checkpoint_cache > 0 and hasattr(model, "sd_checkpoint_info"):
    cache_enabled = shared.opts.sd_checkpoint_cache > 0

    if cache_enabled:
        sd_vae.restore_base_vae(model)
        checkpoints_loaded[model.sd_checkpoint_info] = model.state_dict().copy()

    vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)

    if checkpoint_info not in checkpoints_loaded:
    if cache_enabled and checkpoint_info in checkpoints_loaded:
        # use checkpoint cache
        vae_name = sd_vae.get_filename(vae_file) if vae_file else None
        vae_message = f" with {vae_name} VAE" if vae_name else ""
        print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
        model.load_state_dict(checkpoints_loaded[checkpoint_info])
    else:
        # load from file
        print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")

        pl_sd = torch.load(checkpoint_file, map_location=shared.weight_load_location)
@@ -181,6 +189,10 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
        model.load_state_dict(sd, strict=False)
        del sd
        
        if cache_enabled:
            # cache newly loaded model
            checkpoints_loaded[checkpoint_info] = model.state_dict().copy()

        if shared.cmd_opts.opt_channelslast:
            model.to(memory_format=torch.channels_last)

@@ -199,14 +211,9 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):

        model.first_stage_model.to(devices.dtype_vae)

    else:
        vae_name = sd_vae.get_filename(vae_file) if vae_file else None
        vae_message = f" with {vae_name} VAE" if vae_name else ""
        print(f"Loading weights [{sd_model_hash}]{vae_message} from cache")
        model.load_state_dict(checkpoints_loaded[checkpoint_info])

    if shared.opts.sd_checkpoint_cache > 0:
        while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache:
    # clean up cache if limit is reached
    if cache_enabled:
        while len(checkpoints_loaded) > shared.opts.sd_checkpoint_cache + 1: # we need to count the current model
            checkpoints_loaded.popitem(last=False)  # LRU

    model.sd_model_hash = sd_model_hash
Loading