FIX: Many different
Different fixes and improvements +VersionUP (beta1)
This commit is contained in:
parent
376c19c62f
commit
0901ab9ed4
@ -1,4 +1,4 @@
|
|||||||
insightface==0.7.3
|
insightface==0.7.3
|
||||||
onnx==1.14.0
|
onnx>=1.14.0
|
||||||
onnxruntime==1.15.0
|
onnxruntime>=1.15.0
|
||||||
opencv-python>=4.7.0.72
|
opencv-python>=4.7.0.72
|
||||||
|
|||||||
@ -14,7 +14,7 @@ from modules.api import api
|
|||||||
|
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
|
|
||||||
from scripts.reactor_swapper import UpscaleOptions, swap_face
|
from scripts.reactor_swapper import EnhancementOptions, swap_face
|
||||||
from scripts.reactor_logger import logger
|
from scripts.reactor_logger import logger
|
||||||
|
|
||||||
|
|
||||||
@ -78,7 +78,7 @@ def reactor_api(_: gr.Blocks, app: FastAPI):
|
|||||||
gender_s = gender_source
|
gender_s = gender_source
|
||||||
gender_t = gender_target
|
gender_t = gender_target
|
||||||
restore_first_bool = True if restore_first == 1 else False
|
restore_first_bool = True if restore_first == 1 else False
|
||||||
up_options = UpscaleOptions(do_restore_first=restore_first_bool, scale=scale, upscaler=get_upscaler(upscaler), upscale_visibility=upscale_visibility,face_restorer=get_face_restorer(face_restorer),restorer_visibility=restorer_visibility)
|
up_options = EnhancementOptions(do_restore_first=restore_first_bool, scale=scale, upscaler=get_upscaler(upscaler), upscale_visibility=upscale_visibility,face_restorer=get_face_restorer(face_restorer),restorer_visibility=restorer_visibility)
|
||||||
use_model = get_full_model(model)
|
use_model = get_full_model(model)
|
||||||
if use_model is None:
|
if use_model is None:
|
||||||
Exception("Model not found")
|
Exception("Model not found")
|
||||||
|
|||||||
@ -17,10 +17,10 @@ from modules.paths_internal import models_path
|
|||||||
from modules.images import save_image
|
from modules.images import save_image
|
||||||
|
|
||||||
from scripts.reactor_logger import logger
|
from scripts.reactor_logger import logger
|
||||||
from scripts.reactor_swapper import UpscaleOptions, swap_face, check_process_halt, reset_messaged
|
from scripts.reactor_swapper import EnhancementOptions, swap_face, check_process_halt, reset_messaged
|
||||||
from scripts.reactor_version import version_flag, app_title
|
from scripts.reactor_version import version_flag, app_title
|
||||||
from scripts.console_log_patch import apply_logging_patch
|
from scripts.console_log_patch import apply_logging_patch
|
||||||
from scripts.reactor_helpers import make_grid
|
from scripts.reactor_helpers import make_grid, get_image_path
|
||||||
|
|
||||||
|
|
||||||
MODELS_PATH = None
|
MODELS_PATH = None
|
||||||
@ -51,7 +51,7 @@ class FaceSwapScript(scripts.Script):
|
|||||||
with gr.Accordion(f"{app_title}", open=False):
|
with gr.Accordion(f"{app_title}", open=False):
|
||||||
with gr.Tab("Main"):
|
with gr.Tab("Main"):
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
img = gr.inputs.Image(type="pil")
|
img = gr.Image(type="pil")
|
||||||
enable = gr.Checkbox(False, label="Enable", info=f"The Fast and Simple FaceSwap Extension - {version_flag}")
|
enable = gr.Checkbox(False, label="Enable", info=f"The Fast and Simple FaceSwap Extension - {version_flag}")
|
||||||
save_original = gr.Checkbox(False, label="Save Original", info="Save the original image(s) made before swapping; If you use \"img2img\" - this option will affect with \"Swap in generated\" only")
|
save_original = gr.Checkbox(False, label="Save Original", info="Save the original image(s) made before swapping; If you use \"img2img\" - this option will affect with \"Swap in generated\" only")
|
||||||
gr.Markdown("<br>")
|
gr.Markdown("<br>")
|
||||||
@ -110,9 +110,11 @@ class FaceSwapScript(scripts.Script):
|
|||||||
label="1. Restore Face -> 2. Upscale (-Uncheck- if you want vice versa)",
|
label="1. Restore Face -> 2. Upscale (-Uncheck- if you want vice versa)",
|
||||||
info="Postprocessing Order"
|
info="Postprocessing Order"
|
||||||
)
|
)
|
||||||
upscaler_name = gr.inputs.Dropdown(
|
upscaler_name = gr.Dropdown(
|
||||||
choices=[upscaler.name for upscaler in shared.sd_upscalers],
|
choices=[upscaler.name for upscaler in shared.sd_upscalers],
|
||||||
label="Upscaler",
|
label="Upscaler",
|
||||||
|
value="None",
|
||||||
|
info="Won't scale if you choose -Swap in Source- via img2img, only 1x-postprocessing will affect (texturing, denoising, restyling etc.)"
|
||||||
)
|
)
|
||||||
gr.Markdown("<br>")
|
gr.Markdown("<br>")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
@ -127,13 +129,13 @@ class FaceSwapScript(scripts.Script):
|
|||||||
logger.warning(
|
logger.warning(
|
||||||
"You should at least have one model in models directory, please read the doc here : https://github.com/Gourieff/sd-webui-reactor/"
|
"You should at least have one model in models directory, please read the doc here : https://github.com/Gourieff/sd-webui-reactor/"
|
||||||
)
|
)
|
||||||
model = gr.inputs.Dropdown(
|
model = gr.Dropdown(
|
||||||
choices=models,
|
choices=models,
|
||||||
label="Model not found, please download one and reload WebUI",
|
label="Model not found, please download one and reload WebUI",
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
model = gr.inputs.Dropdown(
|
model = gr.Dropdown(
|
||||||
choices=models, label="Model", default=models[0]
|
choices=models, label="Model", value=models[0]
|
||||||
)
|
)
|
||||||
console_logging_level = gr.Radio(
|
console_logging_level = gr.Radio(
|
||||||
["No log", "Minimum", "Default"],
|
["No log", "Minimum", "Default"],
|
||||||
@ -178,8 +180,8 @@ class FaceSwapScript(scripts.Script):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def upscale_options(self) -> UpscaleOptions:
|
def enhancement_options(self) -> EnhancementOptions:
|
||||||
return UpscaleOptions(
|
return EnhancementOptions(
|
||||||
do_restore_first = self.restore_first,
|
do_restore_first = self.restore_first,
|
||||||
scale=self.upscaler_scale,
|
scale=self.upscaler_scale,
|
||||||
upscaler=self.upscaler,
|
upscaler=self.upscaler,
|
||||||
@ -253,17 +255,21 @@ class FaceSwapScript(scripts.Script):
|
|||||||
for i in range(len(p.init_images)):
|
for i in range(len(p.init_images)):
|
||||||
if len(p.init_images) > 1:
|
if len(p.init_images) > 1:
|
||||||
logger.info("Swap in %s", i)
|
logger.info("Swap in %s", i)
|
||||||
result = swap_face(
|
result, output, swapped = swap_face(
|
||||||
self.source,
|
self.source,
|
||||||
p.init_images[i],
|
p.init_images[i],
|
||||||
source_faces_index=self.source_faces_index,
|
source_faces_index=self.source_faces_index,
|
||||||
faces_index=self.faces_index,
|
faces_index=self.faces_index,
|
||||||
model=self.model,
|
model=self.model,
|
||||||
upscale_options=self.upscale_options,
|
enhancement_options=self.enhancement_options,
|
||||||
gender_source=self.gender_source,
|
gender_source=self.gender_source,
|
||||||
gender_target=self.gender_target,
|
gender_target=self.gender_target,
|
||||||
)
|
)
|
||||||
p.init_images[i] = result
|
p.init_images[i] = result
|
||||||
|
# result_path = get_image_path(p.init_images[i], p.outpath_samples, "", p.all_seeds[i], p.all_prompts[i], "txt", p=p, suffix="-swapped")
|
||||||
|
# if len(output) != 0:
|
||||||
|
# with open(result_path, 'w', encoding="utf8") as f:
|
||||||
|
# f.writelines(output)
|
||||||
|
|
||||||
if shared.state.interrupted or shared.state.skipped:
|
if shared.state.interrupted or shared.state.skipped:
|
||||||
return
|
return
|
||||||
@ -286,6 +292,7 @@ class FaceSwapScript(scripts.Script):
|
|||||||
orig_infotexts : List[str] = processed.infotexts[processed.index_of_first_image:]
|
orig_infotexts : List[str] = processed.infotexts[processed.index_of_first_image:]
|
||||||
|
|
||||||
result_images: List = processed.images
|
result_images: List = processed.images
|
||||||
|
# result_info: List = processed.infotexts
|
||||||
|
|
||||||
if self.swap_in_generated:
|
if self.swap_in_generated:
|
||||||
logger.info("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index)
|
logger.info("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index)
|
||||||
@ -296,25 +303,31 @@ class FaceSwapScript(scripts.Script):
|
|||||||
break
|
break
|
||||||
if len(orig_images) > 1:
|
if len(orig_images) > 1:
|
||||||
logger.info("Swap in %s", i)
|
logger.info("Swap in %s", i)
|
||||||
result = swap_face(
|
result, output, swapped = swap_face(
|
||||||
self.source,
|
self.source,
|
||||||
img,
|
img,
|
||||||
source_faces_index=self.source_faces_index,
|
source_faces_index=self.source_faces_index,
|
||||||
faces_index=self.faces_index,
|
faces_index=self.faces_index,
|
||||||
model=self.model,
|
model=self.model,
|
||||||
upscale_options=self.upscale_options,
|
enhancement_options=self.enhancement_options,
|
||||||
gender_source=self.gender_source,
|
gender_source=self.gender_source,
|
||||||
gender_target=self.gender_target,
|
gender_target=self.gender_target,
|
||||||
)
|
)
|
||||||
if result is not None:
|
if result is not None and swapped > 0:
|
||||||
suffix = "-swapped"
|
|
||||||
result_images.append(result)
|
result_images.append(result)
|
||||||
|
suffix = "-swapped"
|
||||||
try:
|
try:
|
||||||
save_image(result, p.outpath_samples, "", p.all_seeds[0], p.all_prompts[0], "png",info=info, p=p, suffix=suffix)
|
img_path = save_image(result, p.outpath_samples, "", p.all_seeds[0], p.all_prompts[0], "png",info=info, p=p, suffix=suffix)
|
||||||
except:
|
except:
|
||||||
logger.error("Cannot save a result image - please, check SD WebUI Settings (Saving and Paths)")
|
logger.error("Cannot save a result image - please, check SD WebUI Settings (Saving and Paths)")
|
||||||
else:
|
elif result is None:
|
||||||
logger.error("Cannot create a result image")
|
logger.error("Cannot create a result image")
|
||||||
|
|
||||||
|
# if len(output) != 0:
|
||||||
|
# split_fullfn = os.path.splitext(img_path[0])
|
||||||
|
# fullfn = split_fullfn[0] + ".txt"
|
||||||
|
# with open(fullfn, 'w', encoding="utf8") as f:
|
||||||
|
# f.writelines(output)
|
||||||
|
|
||||||
if shared.opts.return_grid and len(result_images) > 2 and postprocess_run:
|
if shared.opts.return_grid and len(result_images) > 2 and postprocess_run:
|
||||||
grid = make_grid(result_images)
|
grid = make_grid(result_images)
|
||||||
@ -324,7 +337,8 @@ class FaceSwapScript(scripts.Script):
|
|||||||
except:
|
except:
|
||||||
logger.error("Cannot save a grid - please, check SD WebUI Settings (Saving and Paths)")
|
logger.error("Cannot save a grid - please, check SD WebUI Settings (Saving and Paths)")
|
||||||
|
|
||||||
processed.images = result_images
|
processed.images = result_images
|
||||||
|
# processed.infotexts = result_info
|
||||||
|
|
||||||
def postprocess_batch(self, p, *args, **kwargs):
|
def postprocess_batch(self, p, *args, **kwargs):
|
||||||
if self.enable and not self.save_original:
|
if self.enable and not self.save_original:
|
||||||
@ -343,13 +357,13 @@ class FaceSwapScript(scripts.Script):
|
|||||||
if self.source is not None:
|
if self.source is not None:
|
||||||
logger.info("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index)
|
logger.info("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index)
|
||||||
image: Image.Image = script_pp.image
|
image: Image.Image = script_pp.image
|
||||||
result = swap_face(
|
result, output, swapped = swap_face(
|
||||||
self.source,
|
self.source,
|
||||||
image,
|
image,
|
||||||
source_faces_index=self.source_faces_index,
|
source_faces_index=self.source_faces_index,
|
||||||
faces_index=self.faces_index,
|
faces_index=self.faces_index,
|
||||||
model=self.model,
|
model=self.model,
|
||||||
upscale_options=self.upscale_options,
|
enhancement_options=self.enhancement_options,
|
||||||
gender_source=self.gender_source,
|
gender_source=self.gender_source,
|
||||||
gender_target=self.gender_target,
|
gender_target=self.gender_target,
|
||||||
)
|
)
|
||||||
@ -358,5 +372,11 @@ class FaceSwapScript(scripts.Script):
|
|||||||
pp.info = {}
|
pp.info = {}
|
||||||
p.extra_generation_params.update(pp.info)
|
p.extra_generation_params.update(pp.info)
|
||||||
script_pp.image = pp.image
|
script_pp.image = pp.image
|
||||||
|
|
||||||
|
# if len(output) != 0:
|
||||||
|
# result_path = get_image_path(script_pp.image, p.outpath_samples, "", p.all_seeds[0], p.all_prompts[0], "txt", p=p, suffix="-swapped")
|
||||||
|
# if len(output) != 0:
|
||||||
|
# with open(result_path, 'w', encoding="utf8") as f:
|
||||||
|
# f.writelines(output)
|
||||||
except:
|
except:
|
||||||
logger.error("Cannot create a result image")
|
logger.error("Cannot create a result image")
|
||||||
|
|||||||
@ -1,8 +1,12 @@
|
|||||||
|
import os
|
||||||
from collections import Counter
|
from collections import Counter
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from math import isqrt, ceil
|
from math import isqrt, ceil
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
|
from modules.images import FilenameGenerator, get_next_sequence_number
|
||||||
|
from modules import shared, script_callbacks
|
||||||
|
|
||||||
def make_grid(image_list: List):
|
def make_grid(image_list: List):
|
||||||
|
|
||||||
# Count the occurrences of each image size in the image_list
|
# Count the occurrences of each image size in the image_list
|
||||||
@ -43,3 +47,56 @@ def make_grid(image_list: List):
|
|||||||
|
|
||||||
# Return None if there are no images or only one image in the image_list
|
# Return None if there are no images or only one image in the image_list
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def get_image_path(image, path, basename, seed=None, prompt=None, extension='png', p=None, suffix=""):
|
||||||
|
|
||||||
|
namegen = FilenameGenerator(p, seed, prompt, image)
|
||||||
|
|
||||||
|
save_to_dirs = shared.opts.save_to_dirs
|
||||||
|
|
||||||
|
if save_to_dirs:
|
||||||
|
dirname = namegen.apply(shared.opts.directories_filename_pattern or "[prompt_words]").lstrip(' ').rstrip('\\ /')
|
||||||
|
path = os.path.join(path, dirname)
|
||||||
|
|
||||||
|
os.makedirs(path, exist_ok=True)
|
||||||
|
|
||||||
|
if seed is None:
|
||||||
|
file_decoration = ""
|
||||||
|
elif shared.opts.save_to_dirs:
|
||||||
|
file_decoration = shared.opts.samples_filename_pattern or "[seed]"
|
||||||
|
else:
|
||||||
|
file_decoration = shared.opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
|
||||||
|
|
||||||
|
file_decoration = namegen.apply(file_decoration) + suffix
|
||||||
|
|
||||||
|
add_number = shared.opts.save_images_add_number or file_decoration == ''
|
||||||
|
|
||||||
|
if file_decoration != "" and add_number:
|
||||||
|
file_decoration = f"-{file_decoration}"
|
||||||
|
|
||||||
|
if add_number:
|
||||||
|
basecount = get_next_sequence_number(path, basename)
|
||||||
|
fullfn = None
|
||||||
|
for i in range(500):
|
||||||
|
fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}"
|
||||||
|
fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}")
|
||||||
|
if not os.path.exists(fullfn):
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
fullfn = os.path.join(path, f"{file_decoration}.{extension}")
|
||||||
|
|
||||||
|
pnginfo = {}
|
||||||
|
|
||||||
|
params = script_callbacks.ImageSaveParams(image, p, fullfn, pnginfo)
|
||||||
|
# script_callbacks.before_image_saved_callback(params)
|
||||||
|
|
||||||
|
fullfn = params.filename
|
||||||
|
|
||||||
|
fullfn_without_extension, extension = os.path.splitext(params.filename)
|
||||||
|
if hasattr(os, 'statvfs'):
|
||||||
|
max_name_len = os.statvfs(path).f_namemax
|
||||||
|
fullfn_without_extension = fullfn_without_extension[:max_name_len - max(4, len(extension))]
|
||||||
|
params.filename = fullfn_without_extension + extension
|
||||||
|
fullfn = params.filename
|
||||||
|
|
||||||
|
return fullfn
|
||||||
|
|||||||
@ -8,7 +8,6 @@ import numpy as np
|
|||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
import insightface
|
import insightface
|
||||||
import onnxruntime
|
|
||||||
|
|
||||||
from modules.face_restoration import FaceRestoration
|
from modules.face_restoration import FaceRestoration
|
||||||
from modules.upscaler import UpscalerData
|
from modules.upscaler import UpscalerData
|
||||||
@ -21,11 +20,11 @@ import warnings
|
|||||||
np.warnings = warnings
|
np.warnings = warnings
|
||||||
np.warnings.filterwarnings('ignore')
|
np.warnings.filterwarnings('ignore')
|
||||||
|
|
||||||
providers = onnxruntime.get_available_providers()
|
providers = ["CPUExecutionProvider"]
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class UpscaleOptions:
|
class EnhancementOptions:
|
||||||
do_restore_first: bool = True
|
do_restore_first: bool = True
|
||||||
scale: int = 1
|
scale: int = 1
|
||||||
upscaler: UpscalerData = None
|
upscaler: UpscalerData = None
|
||||||
@ -34,24 +33,6 @@ class UpscaleOptions:
|
|||||||
restorer_visibility: float = 0.5
|
restorer_visibility: float = 0.5
|
||||||
|
|
||||||
|
|
||||||
def cosine_distance(vector1: np.ndarray, vector2: np.ndarray) -> float:
|
|
||||||
vec1 = vector1.flatten()
|
|
||||||
vec2 = vector2.flatten()
|
|
||||||
|
|
||||||
dot_product = np.dot(vec1, vec2)
|
|
||||||
norm1 = np.linalg.norm(vec1)
|
|
||||||
norm2 = np.linalg.norm(vec2)
|
|
||||||
|
|
||||||
cosine_distance = 1 - (dot_product / (norm1 * norm2))
|
|
||||||
return cosine_distance
|
|
||||||
|
|
||||||
|
|
||||||
def cosine_similarity(test_vec: np.ndarray, source_vecs: List[np.ndarray]) -> float:
|
|
||||||
cos_dist = sum(cosine_distance(test_vec, source_vec) for source_vec in source_vecs)
|
|
||||||
average_cos_dist = cos_dist / len(source_vecs)
|
|
||||||
return average_cos_dist
|
|
||||||
|
|
||||||
|
|
||||||
MESSAGED_STOPPED = False
|
MESSAGED_STOPPED = False
|
||||||
MESSAGED_SKIPPED = False
|
MESSAGED_SKIPPED = False
|
||||||
|
|
||||||
@ -102,76 +83,88 @@ def getFaceSwapModel(model_path: str):
|
|||||||
return FS_MODEL
|
return FS_MODEL
|
||||||
|
|
||||||
|
|
||||||
def upscale_image(image: Image, upscale_options: UpscaleOptions):
|
def restore_face(image: Image, enhancement_options: EnhancementOptions):
|
||||||
|
result_image = image
|
||||||
|
|
||||||
|
if check_process_halt(msgforced=True):
|
||||||
|
return result_image
|
||||||
|
|
||||||
|
if enhancement_options.face_restorer is not None:
|
||||||
|
original_image = result_image.copy()
|
||||||
|
logger.info("Restoring the face with %s", enhancement_options.face_restorer.name())
|
||||||
|
numpy_image = np.array(result_image)
|
||||||
|
numpy_image = enhancement_options.face_restorer.restore(numpy_image)
|
||||||
|
restored_image = Image.fromarray(numpy_image)
|
||||||
|
result_image = Image.blend(
|
||||||
|
original_image, restored_image, enhancement_options.restorer_visibility
|
||||||
|
)
|
||||||
|
|
||||||
|
return result_image
|
||||||
|
|
||||||
|
def upscale_image(image: Image, enhancement_options: EnhancementOptions):
|
||||||
|
result_image = image
|
||||||
|
|
||||||
|
if check_process_halt(msgforced=True):
|
||||||
|
return result_image
|
||||||
|
|
||||||
|
if enhancement_options.upscaler is not None and enhancement_options.upscaler.name != "None":
|
||||||
|
original_image = result_image.copy()
|
||||||
|
logger.info(
|
||||||
|
"Upscaling with %s scale = %s",
|
||||||
|
enhancement_options.upscaler.name,
|
||||||
|
enhancement_options.scale,
|
||||||
|
)
|
||||||
|
result_image = enhancement_options.upscaler.scaler.upscale(
|
||||||
|
original_image, enhancement_options.scale, enhancement_options.upscaler.data_path
|
||||||
|
)
|
||||||
|
if enhancement_options.scale == 1:
|
||||||
|
result_image = Image.blend(
|
||||||
|
original_image, result_image, enhancement_options.upscale_visibility
|
||||||
|
)
|
||||||
|
|
||||||
|
return result_image
|
||||||
|
|
||||||
|
def enhance_image(image: Image, enhancement_options: EnhancementOptions):
|
||||||
result_image = image
|
result_image = image
|
||||||
|
|
||||||
if check_process_halt(msgforced=True):
|
if check_process_halt(msgforced=True):
|
||||||
return result_image
|
return result_image
|
||||||
|
|
||||||
if upscale_options.do_restore_first:
|
if enhancement_options.do_restore_first:
|
||||||
if upscale_options.face_restorer is not None:
|
|
||||||
original_image = result_image.copy()
|
result_image = restore_face(result_image, enhancement_options)
|
||||||
logger.info("Restoring the face with %s", upscale_options.face_restorer.name())
|
result_image = upscale_image(result_image, enhancement_options)
|
||||||
numpy_image = np.array(result_image)
|
|
||||||
numpy_image = upscale_options.face_restorer.restore(numpy_image)
|
|
||||||
restored_image = Image.fromarray(numpy_image)
|
|
||||||
result_image = Image.blend(
|
|
||||||
original_image, restored_image, upscale_options.restorer_visibility
|
|
||||||
)
|
|
||||||
if upscale_options.upscaler is not None and upscale_options.upscaler.name != "None":
|
|
||||||
original_image = result_image.copy()
|
|
||||||
logger.info(
|
|
||||||
"Upscaling with %s scale = %s",
|
|
||||||
upscale_options.upscaler.name,
|
|
||||||
upscale_options.scale,
|
|
||||||
)
|
|
||||||
result_image = upscale_options.upscaler.scaler.upscale(
|
|
||||||
original_image, upscale_options.scale, upscale_options.upscaler.data_path
|
|
||||||
)
|
|
||||||
if upscale_options.scale == 1:
|
|
||||||
result_image = Image.blend(
|
|
||||||
original_image, result_image, upscale_options.upscale_visibility
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
if upscale_options.upscaler is not None and upscale_options.upscaler.name != "None":
|
|
||||||
original_image = result_image.copy()
|
result_image = upscale_image(result_image, enhancement_options)
|
||||||
logger.info(
|
result_image = restore_face(result_image, enhancement_options)
|
||||||
"Upscaling with %s scale = %s",
|
|
||||||
upscale_options.upscaler.name,
|
|
||||||
upscale_options.scale,
|
|
||||||
)
|
|
||||||
result_image = upscale_options.upscaler.scaler.upscale(
|
|
||||||
image, upscale_options.scale, upscale_options.upscaler.data_path
|
|
||||||
)
|
|
||||||
if upscale_options.scale == 1:
|
|
||||||
result_image = Image.blend(
|
|
||||||
original_image, result_image, upscale_options.upscale_visibility
|
|
||||||
)
|
|
||||||
if upscale_options.face_restorer is not None:
|
|
||||||
original_image = result_image.copy()
|
|
||||||
logger.info("Restoring the face with %s", upscale_options.face_restorer.name())
|
|
||||||
numpy_image = np.array(result_image)
|
|
||||||
numpy_image = upscale_options.face_restorer.restore(numpy_image)
|
|
||||||
restored_image = Image.fromarray(numpy_image)
|
|
||||||
result_image = Image.blend(
|
|
||||||
original_image, restored_image, upscale_options.restorer_visibility
|
|
||||||
)
|
|
||||||
|
|
||||||
return result_image
|
return result_image
|
||||||
|
|
||||||
|
def get_gender(face, face_index):
|
||||||
def get_face_gender(
|
|
||||||
face,
|
|
||||||
face_index,
|
|
||||||
gender_condition,
|
|
||||||
operated: str
|
|
||||||
):
|
|
||||||
gender = [
|
gender = [
|
||||||
x.sex
|
x.sex
|
||||||
for x in face
|
for x in face
|
||||||
]
|
]
|
||||||
gender.reverse()
|
gender.reverse()
|
||||||
face_gender = gender[face_index]
|
try:
|
||||||
|
face_gender = gender[face_index]
|
||||||
|
except:
|
||||||
|
logger.error("Gender Detection: No face with index = %s was found", face_index)
|
||||||
|
return "None"
|
||||||
|
return face_gender
|
||||||
|
|
||||||
|
def get_face_gender(
|
||||||
|
face,
|
||||||
|
face_index,
|
||||||
|
gender_condition,
|
||||||
|
operated: str,
|
||||||
|
gender_detected,
|
||||||
|
):
|
||||||
|
face_gender = gender_detected
|
||||||
|
if face_gender == "None":
|
||||||
|
return None, 0
|
||||||
logger.info("%s Face %s: Detected Gender -%s-", operated, face_index, face_gender)
|
logger.info("%s Face %s: Detected Gender -%s-", operated, face_index, face_gender)
|
||||||
if (gender_condition == 1 and face_gender == "F") or (gender_condition == 2 and face_gender == "M"):
|
if (gender_condition == 1 and face_gender == "F") or (gender_condition == 2 and face_gender == "M"):
|
||||||
logger.info("OK - Detected Gender matches Condition")
|
logger.info("OK - Detected Gender matches Condition")
|
||||||
@ -183,10 +176,22 @@ def get_face_gender(
|
|||||||
logger.info("WRONG - Detected Gender doesn't match Condition")
|
logger.info("WRONG - Detected Gender doesn't match Condition")
|
||||||
return sorted(face, key=lambda x: x.bbox[0])[face_index], 1
|
return sorted(face, key=lambda x: x.bbox[0])[face_index], 1
|
||||||
|
|
||||||
|
def get_face_age(face, face_index):
|
||||||
|
age = [
|
||||||
|
x.age
|
||||||
|
for x in face
|
||||||
|
]
|
||||||
|
age.reverse()
|
||||||
|
try:
|
||||||
|
face_age = age[face_index]
|
||||||
|
except:
|
||||||
|
logger.error("Age Detection: No face with index = %s was found", face_index)
|
||||||
|
return "None"
|
||||||
|
return face_age
|
||||||
|
|
||||||
def reget_face_single(img_data, det_size, face_index):
|
def reget_face_single(img_data, det_size, face_index, gender_source, gender_target):
|
||||||
det_size_half = (det_size[0] // 2, det_size[1] // 2)
|
det_size_half = (det_size[0] // 2, det_size[1] // 2)
|
||||||
return get_face_single(img_data, face_index=face_index, det_size=det_size_half)
|
return get_face_single(img_data, face_index=face_index, det_size=det_size_half, gender_source=gender_source, gender_target=gender_target)
|
||||||
|
|
||||||
|
|
||||||
def get_face_single(img_data: np.ndarray, face_index=0, det_size=(640, 640), gender_source=0, gender_target=0):
|
def get_face_single(img_data: np.ndarray, face_index=0, det_size=(640, 640), gender_source=0, gender_target=0):
|
||||||
@ -198,23 +203,39 @@ def get_face_single(img_data: np.ndarray, face_index=0, det_size=(640, 640), gen
|
|||||||
if os.path.exists(buffalo_path):
|
if os.path.exists(buffalo_path):
|
||||||
os.remove(buffalo_path)
|
os.remove(buffalo_path)
|
||||||
|
|
||||||
|
face_age = "None"
|
||||||
|
try:
|
||||||
|
face_age = get_face_age(face, face_index)
|
||||||
|
except:
|
||||||
|
logger.error("Cannot detect any Age for Face index = %s", face_index)
|
||||||
|
|
||||||
|
face_gender = "None"
|
||||||
|
try:
|
||||||
|
face_gender = get_gender(face, face_index)
|
||||||
|
gender_detected = face_gender
|
||||||
|
face_gender = "Female" if face_gender == "F" else ("Male" if face_gender == "M" else "None")
|
||||||
|
except:
|
||||||
|
logger.error("Cannot detect any Gender for Face index = %s", face_index)
|
||||||
|
|
||||||
if gender_source != 0:
|
if gender_source != 0:
|
||||||
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
||||||
return reget_face_single(img_data, det_size, face_index)
|
return reget_face_single(img_data, det_size, face_index, gender_source, gender_target)
|
||||||
return get_face_gender(face,face_index,gender_source,"Source")
|
faces, wrong_gender = get_face_gender(face,face_index,gender_source,"Source",gender_detected)
|
||||||
|
return faces, wrong_gender, face_age, face_gender
|
||||||
|
|
||||||
if gender_target != 0:
|
if gender_target != 0:
|
||||||
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
||||||
return reget_face_single(img_data, det_size, face_index)
|
return reget_face_single(img_data, det_size, face_index, gender_source, gender_target)
|
||||||
return get_face_gender(face,face_index,gender_target,"Target")
|
faces, wrong_gender = get_face_gender(face,face_index,gender_target,"Target",gender_detected)
|
||||||
|
return faces, wrong_gender, face_age, face_gender
|
||||||
|
|
||||||
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
||||||
return reget_face_single(img_data, det_size, face_index)
|
return reget_face_single(img_data, det_size, face_index, gender_source, gender_target)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return sorted(face, key=lambda x: x.bbox[0])[face_index], 0
|
return sorted(face, key=lambda x: x.bbox[0])[face_index], 0, face_age, face_gender
|
||||||
except IndexError:
|
except IndexError:
|
||||||
return None, 0
|
return None, 0, face_age, face_gender
|
||||||
|
|
||||||
|
|
||||||
def swap_face(
|
def swap_face(
|
||||||
@ -223,7 +244,7 @@ def swap_face(
|
|||||||
model: Union[str, None] = None,
|
model: Union[str, None] = None,
|
||||||
source_faces_index: List[int] = [0],
|
source_faces_index: List[int] = [0],
|
||||||
faces_index: List[int] = [0],
|
faces_index: List[int] = [0],
|
||||||
upscale_options: Union[UpscaleOptions, None] = None,
|
enhancement_options: Union[EnhancementOptions, None] = None,
|
||||||
gender_source: int = 0,
|
gender_source: int = 0,
|
||||||
gender_target: int = 0,
|
gender_target: int = 0,
|
||||||
):
|
):
|
||||||
@ -250,52 +271,88 @@ def swap_face(
|
|||||||
source_img = cv2.cvtColor(np.array(source_img), cv2.COLOR_RGB2BGR)
|
source_img = cv2.cvtColor(np.array(source_img), cv2.COLOR_RGB2BGR)
|
||||||
target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)
|
target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)
|
||||||
|
|
||||||
source_face, wrong_gender = get_face_single(source_img, face_index=source_faces_index[0], gender_source=gender_source)
|
output: List = []
|
||||||
|
output_info: str = ""
|
||||||
|
swapped = 0
|
||||||
|
|
||||||
|
logger.info("Detecting Source Face, Index = %s", source_faces_index[0])
|
||||||
|
source_face, wrong_gender, source_age, source_gender = get_face_single(source_img, face_index=source_faces_index[0], gender_source=gender_source)
|
||||||
|
if source_age != "None" or source_gender != "None":
|
||||||
|
logger.info("Detected: -%s- y.o. %s", source_age, source_gender)
|
||||||
|
|
||||||
|
output_info = f"SourceFaceIndex={source_faces_index[0]};Age={source_age};Gender={source_gender}\n"
|
||||||
|
output.append(output_info)
|
||||||
|
|
||||||
if len(source_faces_index) != 0 and len(source_faces_index) != 1 and len(source_faces_index) != len(faces_index):
|
if len(source_faces_index) != 0 and len(source_faces_index) != 1 and len(source_faces_index) != len(faces_index):
|
||||||
logger.info("Source Faces must have no entries (default=0), one entry, or same number of entries as target faces.")
|
logger.info("Source Faces must have no entries (default=0), one entry, or same number of entries as target faces.")
|
||||||
elif source_face is not None:
|
elif source_face is not None:
|
||||||
|
|
||||||
result = target_img
|
result = target_img
|
||||||
face_swapper = getFaceSwapModel(model)
|
face_swapper = getFaceSwapModel(model)
|
||||||
|
|
||||||
source_face_idx = 0
|
source_face_idx = 0
|
||||||
|
|
||||||
swapped = 0
|
|
||||||
|
|
||||||
for face_num in faces_index:
|
for face_num in faces_index:
|
||||||
if len(source_faces_index) > 1 and source_face_idx > 0:
|
if len(source_faces_index) > 1 and source_face_idx > 0:
|
||||||
source_face, wrong_gender = get_face_single(source_img, face_index=source_faces_index[source_face_idx], gender_source=gender_source)
|
|
||||||
|
logger.info("Detecting Source Face, Index = %s", source_faces_index[source_face_idx])
|
||||||
|
source_face, wrong_gender, source_age, source_gender = get_face_single(source_img, face_index=source_faces_index[source_face_idx], gender_source=gender_source)
|
||||||
|
if source_age != "None" or source_gender != "None":
|
||||||
|
logger.info("Detected: -%s- y.o. %s", source_age, source_gender)
|
||||||
|
|
||||||
|
output_info = f"SourceFaceIndex={source_faces_index[source_face_idx]};Age={source_age};Gender={source_gender}\n"
|
||||||
|
output.append(output_info)
|
||||||
|
|
||||||
source_face_idx += 1
|
source_face_idx += 1
|
||||||
|
|
||||||
if source_face is not None and wrong_gender == 0:
|
if source_face is not None and wrong_gender == 0:
|
||||||
target_face, wrong_gender = get_face_single(target_img, face_index=face_num, gender_target=gender_target)
|
logger.info("Detecting Target Face, Index = %s", face_num)
|
||||||
|
target_face, wrong_gender, target_age, target_gender = get_face_single(target_img, face_index=face_num, gender_target=gender_target)
|
||||||
|
if target_age != "None" or target_gender != "None":
|
||||||
|
logger.info("Detected: -%s- y.o. %s", target_age, target_gender)
|
||||||
|
|
||||||
|
output_info = f"TargetFaceIndex={face_num};Age={target_age};Gender={target_gender}\n"
|
||||||
|
output.append(output_info)
|
||||||
|
|
||||||
if target_face is not None and wrong_gender == 0:
|
if target_face is not None and wrong_gender == 0:
|
||||||
|
logger.info("Swapping Source into Target")
|
||||||
result = face_swapper.get(result, target_face, source_face)
|
result = face_swapper.get(result, target_face, source_face)
|
||||||
swapped += 1
|
swapped += 1
|
||||||
|
|
||||||
elif wrong_gender == 1:
|
elif wrong_gender == 1:
|
||||||
wrong_gender = 0
|
wrong_gender = 0
|
||||||
|
|
||||||
if source_face_idx == len(source_faces_index):
|
if source_face_idx == len(source_faces_index):
|
||||||
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
||||||
if upscale_options is not None:
|
|
||||||
result_image = upscale_image(result_image, upscale_options)
|
if enhancement_options is not None and len(source_faces_index) > 1:
|
||||||
return result_image
|
result_image = enhance_image(result_image, enhancement_options)
|
||||||
|
|
||||||
|
return result_image, output, swapped
|
||||||
|
|
||||||
else:
|
else:
|
||||||
logger.info(f"No target face found for {face_num}")
|
logger.info(f"No target face found for {face_num}")
|
||||||
|
|
||||||
elif wrong_gender == 1:
|
elif wrong_gender == 1:
|
||||||
wrong_gender = 0
|
wrong_gender = 0
|
||||||
|
|
||||||
if source_face_idx == len(source_faces_index):
|
if source_face_idx == len(source_faces_index):
|
||||||
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
||||||
if upscale_options is not None:
|
|
||||||
result_image = upscale_image(result_image, upscale_options)
|
if enhancement_options is not None and len(source_faces_index) > 1:
|
||||||
return result_image
|
result_image = enhance_image(result_image, enhancement_options)
|
||||||
|
|
||||||
|
return result_image, output, swapped
|
||||||
|
|
||||||
else:
|
else:
|
||||||
logger.info(f"No source face found for face number {source_face_idx}.")
|
logger.info(f"No source face found for face number {source_face_idx}.")
|
||||||
|
|
||||||
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
||||||
if upscale_options is not None and swapped > 0:
|
|
||||||
result_image = upscale_image(result_image, upscale_options)
|
if enhancement_options is not None and swapped > 0:
|
||||||
|
result_image = enhance_image(result_image, enhancement_options)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
logger.info("No source face(s) found")
|
logger.info("No source face(s) found")
|
||||||
return result_image
|
|
||||||
|
return result_image, output, swapped
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
app_title = "ReActor"
|
app_title = "ReActor"
|
||||||
version_flag = "v0.4.1"
|
version_flag = "v0.4.2-b1"
|
||||||
|
|
||||||
from scripts.reactor_logger import logger, get_Run, set_Run
|
from scripts.reactor_logger import logger, get_Run, set_Run
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user