Unverified Commit adf723a9 authored by AUTOMATIC1111's avatar AUTOMATIC1111 Committed by GitHub
Browse files

Merge pull request #8492 from zhanghua000/absolute-path

fix: gradio's ValueError about fetching extensions files
parents ddc503d1 d25c4b13
Loading
Loading
Loading
Loading
+12 −7
Original line number Diff line number Diff line
@@ -7,6 +7,11 @@ import shlex
import platform
import argparse
import json
try:
    from modules.paths import script_path, data_path
except ModuleNotFoundError:
    script_path = os.path.dirname(__file__)
    data_path = os.getcwd()

dir_repos = "repositories"
dir_extensions = "extensions"
@@ -122,7 +127,7 @@ def is_installed(package):


def repo_dir(name):
    return os.path.join(dir_repos, name)
    return os.path.join(script_path, dir_repos, name)


def run_python(code, desc=None, errdesc=None):
@@ -215,7 +220,7 @@ def list_extensions(settings_file):

    disabled_extensions = set(settings.get('disabled_extensions', []))

    return [x for x in os.listdir(dir_extensions) if x not in disabled_extensions]
    return [x for x in os.listdir(os.path.join(data_path, dir_extensions)) if x not in disabled_extensions]


def run_extensions_installers(settings_file):
@@ -306,7 +311,7 @@ def prepare_environment():
    if not is_installed("pyngrok") and ngrok:
        run_pip("install pyngrok", "ngrok")

    os.makedirs(dir_repos, exist_ok=True)
    os.makedirs(os.path.join(script_path, dir_repos), exist_ok=True)

    git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash)
    git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
@@ -317,7 +322,7 @@ def prepare_environment():
    if not is_installed("lpips"):
        run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")

    run_pip(f"install -r {requirements_file}", "requirements for Web UI")
    run_pip(f"install -r {os.path.join(script_path, requirements_file)}", "requirements for Web UI")

    run_extensions_installers(settings_file=args.ui_settings_file)

@@ -325,7 +330,7 @@ def prepare_environment():
        version_check(commit)

    if update_all_extensions:
        git_pull_recursive(dir_extensions)
        git_pull_recursive(os.path.join(data_path, dir_extensions))
    
    if "--exit" in sys.argv:
        print("Exiting because of --exit argument")
@@ -341,7 +346,7 @@ def tests(test_dir):
        sys.argv.append("--api")
    if "--ckpt" not in sys.argv:
        sys.argv.append("--ckpt")
        sys.argv.append("./test/test_files/empty.pt")
        sys.argv.append(os.path.join(script_path, "test/test_files/empty.pt"))
    if "--skip-torch-cuda-test" not in sys.argv:
        sys.argv.append("--skip-torch-cuda-test")
    if "--disable-nan-check" not in sys.argv:
@@ -350,7 +355,7 @@ def tests(test_dir):
    print(f"Launching Web UI in another process for testing with arguments: {' '.join(sys.argv[1:])}")

    os.environ['COMMANDLINE_ARGS'] = ""
    with open('test/stdout.txt', "w", encoding="utf8") as stdout, open('test/stderr.txt', "w", encoding="utf8") as stderr:
    with open(os.path.join(script_path, 'test/stdout.txt'), "w", encoding="utf8") as stdout, open(os.path.join(script_path, 'test/stderr.txt'), "w", encoding="utf8") as stderr:
        proc = subprocess.Popen([sys.executable, *sys.argv], stdout=stdout, stderr=stderr)

    import test.server_poll
+4 −1
Original line number Diff line number Diff line
@@ -35,8 +35,11 @@ def model():
    global sd_vae_approx_model

    if sd_vae_approx_model is None:
        model_path = os.path.join(paths.models_path, "VAE-approx", "model.pt")
        sd_vae_approx_model = VAEApprox()
        sd_vae_approx_model.load_state_dict(torch.load(os.path.join(paths.models_path, "VAE-approx", "model.pt"), map_location='cpu' if devices.device.type != 'cuda' else None))
        if not os.path.exists(model_path):
            model_path = os.path.join(paths.script_path, "models", "VAE-approx", "model.pt")
        sd_vae_approx_model.load_state_dict(torch.load(model_path, map_location='cpu' if devices.device.type != 'cuda' else None))
        sd_vae_approx_model.eval()
        sd_vae_approx_model.to(devices.device, devices.dtype)

+2 −1
Original line number Diff line number Diff line
@@ -1751,7 +1751,8 @@ def create_ui():


def reload_javascript():
    head = f'<script type="text/javascript" src="file={os.path.abspath("script.js")}?{os.path.getmtime("script.js")}"></script>\n'
    script_js = os.path.join(script_path, "script.js")
    head = f'<script type="text/javascript" src="file={os.path.abspath(script_js)}?{os.path.getmtime(script_js)}"></script>\n'

    inline = f"{localization.localization_js(shared.opts.localization)};"
    if cmd_opts.theme is not None:
+5 −3
Original line number Diff line number Diff line
import os
import unittest
import requests
from gradio.processing_utils import encode_pil_to_base64
from PIL import Image
from modules.paths import script_path

class TestExtrasWorking(unittest.TestCase):
    def setUp(self):
@@ -19,7 +21,7 @@ class TestExtrasWorking(unittest.TestCase):
            "upscaler_1": "None",
            "upscaler_2": "None",
            "extras_upscaler_2_visibility": 0,
            "image": encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png"))
            "image": encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png")))
            }

    def test_simple_upscaling_performed(self):
@@ -31,7 +33,7 @@ class TestPngInfoWorking(unittest.TestCase):
    def setUp(self):
        self.url_png_info = "http://localhost:7860/sdapi/v1/extra-single-image"
        self.png_info = {
            "image": encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png"))
            "image": encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png")))
        }

    def test_png_info_performed(self):
@@ -42,7 +44,7 @@ class TestInterrogateWorking(unittest.TestCase):
    def setUp(self):
        self.url_interrogate = "http://localhost:7860/sdapi/v1/extra-single-image"
        self.interrogate = {
            "image": encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png")),
            "image": encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png"))),
            "model": "clip"
        }

+5 −3
Original line number Diff line number Diff line
import os
import unittest
import requests
from gradio.processing_utils import encode_pil_to_base64
from PIL import Image
from modules.paths import script_path


class TestImg2ImgWorking(unittest.TestCase):
    def setUp(self):
        self.url_img2img = "http://localhost:7860/sdapi/v1/img2img"
        self.simple_img2img = {
            "init_images": [encode_pil_to_base64(Image.open(r"test/test_files/img2img_basic.png"))],
            "init_images": [encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png")))],
            "resize_mode": 0,
            "denoising_strength": 0.75,
            "mask": None,
@@ -47,11 +49,11 @@ class TestImg2ImgWorking(unittest.TestCase):
        self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200)

    def test_inpainting_masked_performed(self):
        self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png"))
        self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png")))
        self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200)

    def test_inpainting_with_inverted_masked_performed(self):
        self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(r"test/test_files/mask_basic.png"))
        self.simple_img2img["mask"] = encode_pil_to_base64(Image.open(os.path.join(script_path, r"test/test_files/img2img_basic.png")))
        self.simple_img2img["inpainting_mask_invert"] = True
        self.assertEqual(requests.post(self.url_img2img, json=self.simple_img2img).status_code, 200)

Loading