Unverified Commit d9bd7ada authored by AUTOMATIC1111's avatar AUTOMATIC1111 Committed by GitHub
Browse files

Merge pull request #10820 from akx/report-error

Add & use modules.errors.print_error
parents 78a602ae 52b8752e
Loading
Loading
Loading
Loading
+2 −5
Original line number Diff line number Diff line
import os
import sys
import traceback

from basicsr.utils.download_util import load_file_from_url

from modules.errors import print_error
from modules.upscaler import Upscaler, UpscalerData
from ldsr_model_arch import LDSR
from modules import shared, script_callbacks
@@ -51,10 +50,8 @@ class UpscalerLDSR(Upscaler):

        try:
            return LDSR(model, yaml)

        except Exception:
            print("Error importing LDSR:", file=sys.stderr)
            print(traceback.format_exc(), file=sys.stderr)
            print_error("Error importing LDSR", exc_info=True)
        return None

    def do_upscale(self, img, path):
+3 −3
Original line number Diff line number Diff line
import os.path
import sys
import traceback

import PIL.Image
import numpy as np
@@ -12,6 +11,8 @@ from basicsr.utils.download_util import load_file_from_url
import modules.upscaler
from modules import devices, modelloader, script_callbacks
from scunet_model_arch import SCUNet as net

from modules.errors import print_error
from modules.shared import opts


@@ -38,8 +39,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
                scaler_data = modules.upscaler.UpscalerData(name, file, self, 4)
                scalers.append(scaler_data)
            except Exception:
                print(f"Error loading ScuNET model: {file}", file=sys.stderr)
                print(traceback.format_exc(), file=sys.stderr)
                print_error(f"Error loading ScuNET model: {file}", exc_info=True)
        if add_model2:
            scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self)
            scalers.append(scaler_data2)
+4 −3
Original line number Diff line number Diff line
@@ -16,6 +16,7 @@ from secrets import compare_digest
import modules.shared as shared
from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui, postprocessing
from modules.api import models
from modules.errors import print_error
from modules.shared import opts
from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images
from modules.textual_inversion.textual_inversion import create_embedding, train_embedding
@@ -109,7 +110,6 @@ def api_middleware(app: FastAPI):
        from rich.console import Console
        console = Console()
    except Exception:
        import traceback
        rich_available = False

    @app.middleware("http")
@@ -140,11 +140,12 @@ def api_middleware(app: FastAPI):
            "errors": str(e),
        }
        if not isinstance(e, HTTPException): # do not print backtrace on known httpexceptions
            print(f"API error: {request.method}: {request.url} {err}")
            message = f"API error: {request.method}: {request.url} {err}"
            if rich_available:
                print(message)
                console.print_exception(show_locals=True, max_frames=2, extra_lines=1, suppress=[anyio, starlette], word_wrap=False, width=min([console.width, 200]))
            else:
                traceback.print_exc()
                print_error(message, exc_info=True)
        return JSONResponse(status_code=vars(e).get('status_code', 500), content=jsonable_encoder(err))

    @app.middleware("http")
+9 −13
Original line number Diff line number Diff line
import html
import sys
import threading
import traceback
import time

from modules import shared, progress
from modules.errors import print_error

queue_lock = threading.Lock()

@@ -56,16 +55,14 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
        try:
            res = list(func(*args, **kwargs))
        except Exception as e:
            # When printing out our debug argument list, do not print out more than a MB of text
            max_debug_str_len = 131072 # (1024*1024)/8

            print("Error completing request", file=sys.stderr)
            argStr = f"Arguments: {args} {kwargs}"
            print(argStr[:max_debug_str_len], file=sys.stderr)
            if len(argStr) > max_debug_str_len:
                print(f"(Argument list truncated at {max_debug_str_len}/{len(argStr)} characters)", file=sys.stderr)

            print(traceback.format_exc(), file=sys.stderr)
            # When printing out our debug argument list,
            # do not print out more than a 100 KB of text
            max_debug_str_len = 131072
            message = "Error completing request"
            arg_str = f"Arguments: {args} {kwargs}"[:max_debug_str_len]
            if len(arg_str) > max_debug_str_len:
                arg_str += f" (Argument list truncated at {max_debug_str_len}/{len(arg_str)} characters)"
            print_error(f"{message}\n{arg_str}", exc_info=True)

            shared.state.job = ""
            shared.state.job_count = 0
@@ -108,4 +105,3 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False):
        return tuple(res)

    return f
+4 −6
Original line number Diff line number Diff line
import os
import sys
import traceback

import cv2
import torch
@@ -8,6 +6,7 @@ import torch
import modules.face_restoration
import modules.shared
from modules import shared, devices, modelloader
from modules.errors import print_error
from modules.paths import models_path

# codeformer people made a choice to include modified basicsr library to their project which makes
@@ -105,8 +104,8 @@ def setup_model(dirname):
                            restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
                        del output
                        torch.cuda.empty_cache()
                    except Exception as error:
                        print(f'\tFailed inference for CodeFormer: {error}', file=sys.stderr)
                    except Exception:
                        print_error('Failed inference for CodeFormer', exc_info=True)
                        restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))

                    restored_face = restored_face.astype('uint8')
@@ -135,7 +134,6 @@ def setup_model(dirname):
        shared.face_restorers.append(codeformer)

    except Exception:
        print("Error setting up CodeFormer:", file=sys.stderr)
        print(traceback.format_exc(), file=sys.stderr)
        print_error("Error setting up CodeFormer", exc_info=True)

   # sys.path = stored_sys_path
Loading