UPDATE: More unique filenames
To prevent conflicts with other extensions during importing modules and scripts +VersionUP (beta4)
This commit is contained in:
parent
2c7187a3af
commit
9a86140cf2
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
<img src="example/ReActor_logo_red.png" alt="logo" width="180px"/>
|
<img src="example/ReActor_logo_red.png" alt="logo" width="180px"/>
|
||||||
|
|
||||||
<hr>
|
<hr>
|
||||||
[](https://github.com/Gourieff/sd-webui-reactor/commits/main)
|
[](https://github.com/Gourieff/sd-webui-reactor/commits/main)
|
||||||

|

|
||||||
[](https://github.com/Gourieff/sd-webui-reactor/issues?cacheSeconds=0)
|
[](https://github.com/Gourieff/sd-webui-reactor/issues?cacheSeconds=0)
|
||||||
|
|||||||
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
<img src="example/ReActor_logo_red.png" alt="logo" width="180px"/>
|
<img src="example/ReActor_logo_red.png" alt="logo" width="180px"/>
|
||||||
|
|
||||||
<hr>
|
<hr>
|
||||||
[](https://github.com/Gourieff/sd-webui-reactor/commits/main)
|
[](https://github.com/Gourieff/sd-webui-reactor/commits/main)
|
||||||

|

|
||||||
[](https://github.com/Gourieff/sd-webui-reactor/issues?cacheSeconds=0)
|
[](https://github.com/Gourieff/sd-webui-reactor/issues?cacheSeconds=0)
|
||||||
|
|||||||
@ -14,7 +14,7 @@ from insightface.model_zoo import model_zoo
|
|||||||
import onnxruntime
|
import onnxruntime
|
||||||
import onnx
|
import onnx
|
||||||
from onnx import numpy_helper
|
from onnx import numpy_helper
|
||||||
from scripts.logger import logger
|
from scripts.reactor_logger import logger
|
||||||
|
|
||||||
|
|
||||||
def patched_get_model(self, **kwargs):
|
def patched_get_model(self, **kwargs):
|
||||||
|
|||||||
@ -14,8 +14,8 @@ from modules.api import api
|
|||||||
|
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
|
|
||||||
from scripts.swapper import UpscaleOptions, swap_face
|
from scripts.reactor_swapper import UpscaleOptions, swap_face
|
||||||
from scripts.logger import logger
|
from scripts.reactor_logger import logger
|
||||||
|
|
||||||
|
|
||||||
def default_file_path():
|
def default_file_path():
|
||||||
|
|||||||
@ -1,362 +1,362 @@
|
|||||||
import os, glob
|
import os, glob
|
||||||
import gradio as gr
|
import gradio as gr
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
import modules.scripts as scripts
|
import modules.scripts as scripts
|
||||||
from modules.upscaler import Upscaler, UpscalerData
|
from modules.upscaler import Upscaler, UpscalerData
|
||||||
from modules import scripts, shared, images, scripts_postprocessing
|
from modules import scripts, shared, images, scripts_postprocessing
|
||||||
from modules.processing import (
|
from modules.processing import (
|
||||||
Processed,
|
Processed,
|
||||||
StableDiffusionProcessing,
|
StableDiffusionProcessing,
|
||||||
StableDiffusionProcessingImg2Img,
|
StableDiffusionProcessingImg2Img,
|
||||||
)
|
)
|
||||||
from modules.face_restoration import FaceRestoration
|
from modules.face_restoration import FaceRestoration
|
||||||
from modules.paths_internal import models_path
|
from modules.paths_internal import models_path
|
||||||
from modules.images import save_image
|
from modules.images import save_image
|
||||||
|
|
||||||
from scripts.logger import logger
|
from scripts.reactor_logger import logger
|
||||||
from scripts.swapper import UpscaleOptions, swap_face, check_process_halt, reset_messaged
|
from scripts.reactor_swapper import UpscaleOptions, swap_face, check_process_halt, reset_messaged
|
||||||
from scripts.version import version_flag, app_title
|
from scripts.reactor_version import version_flag, app_title
|
||||||
from scripts.console_log_patch import apply_logging_patch
|
from scripts.console_log_patch import apply_logging_patch
|
||||||
from scripts.helpers import make_grid
|
from scripts.reactor_helpers import make_grid
|
||||||
|
|
||||||
|
|
||||||
MODELS_PATH = None
|
MODELS_PATH = None
|
||||||
|
|
||||||
def get_models():
|
def get_models():
|
||||||
global MODELS_PATH
|
global MODELS_PATH
|
||||||
models_path_init = os.path.join(models_path, "insightface/*")
|
models_path_init = os.path.join(models_path, "insightface/*")
|
||||||
models = glob.glob(models_path_init)
|
models = glob.glob(models_path_init)
|
||||||
models = [x for x in models if x.endswith(".onnx") or x.endswith(".pth")]
|
models = [x for x in models if x.endswith(".onnx") or x.endswith(".pth")]
|
||||||
models_names = []
|
models_names = []
|
||||||
for model in models:
|
for model in models:
|
||||||
model_path = os.path.split(model)
|
model_path = os.path.split(model)
|
||||||
if MODELS_PATH is None:
|
if MODELS_PATH is None:
|
||||||
MODELS_PATH = model_path[0]
|
MODELS_PATH = model_path[0]
|
||||||
model_name = model_path[1]
|
model_name = model_path[1]
|
||||||
models_names.append(model_name)
|
models_names.append(model_name)
|
||||||
return models_names
|
return models_names
|
||||||
|
|
||||||
|
|
||||||
class FaceSwapScript(scripts.Script):
|
class FaceSwapScript(scripts.Script):
|
||||||
def title(self):
|
def title(self):
|
||||||
return f"{app_title}"
|
return f"{app_title}"
|
||||||
|
|
||||||
def show(self, is_img2img):
|
def show(self, is_img2img):
|
||||||
return scripts.AlwaysVisible
|
return scripts.AlwaysVisible
|
||||||
|
|
||||||
def ui(self, is_img2img):
|
def ui(self, is_img2img):
|
||||||
with gr.Accordion(f"{app_title}", open=False):
|
with gr.Accordion(f"{app_title}", open=False):
|
||||||
with gr.Tab("Main"):
|
with gr.Tab("Main"):
|
||||||
with gr.Column():
|
with gr.Column():
|
||||||
img = gr.inputs.Image(type="pil")
|
img = gr.inputs.Image(type="pil")
|
||||||
enable = gr.Checkbox(False, label="Enable", info=f"The Fast and Simple FaceSwap Extension - {version_flag}")
|
enable = gr.Checkbox(False, label="Enable", info=f"The Fast and Simple FaceSwap Extension - {version_flag}")
|
||||||
save_original = gr.Checkbox(False, label="Save Original", info="Save the original image(s) made before swapping; If you use \"img2img\" - this option will affect with \"Swap in generated\" only")
|
save_original = gr.Checkbox(False, label="Save Original", info="Save the original image(s) made before swapping; If you use \"img2img\" - this option will affect with \"Swap in generated\" only")
|
||||||
gr.Markdown("<br>")
|
gr.Markdown("<br>")
|
||||||
gr.Markdown("Source Image (above):")
|
gr.Markdown("Source Image (above):")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
source_faces_index = gr.Textbox(
|
source_faces_index = gr.Textbox(
|
||||||
value="0",
|
value="0",
|
||||||
placeholder="Which face(s) to use as Source (comma separated)",
|
placeholder="Which face(s) to use as Source (comma separated)",
|
||||||
label="Comma separated face number(s); Example: 0,2,1",
|
label="Comma separated face number(s); Example: 0,2,1",
|
||||||
)
|
)
|
||||||
gender_source = gr.Radio(
|
gender_source = gr.Radio(
|
||||||
["No", "Female Only", "Male Only"],
|
["No", "Female Only", "Male Only"],
|
||||||
value="No",
|
value="No",
|
||||||
label="Gender Detection (Source)",
|
label="Gender Detection (Source)",
|
||||||
type="index",
|
type="index",
|
||||||
)
|
)
|
||||||
gr.Markdown("<br>")
|
gr.Markdown("<br>")
|
||||||
gr.Markdown("Target Image (result):")
|
gr.Markdown("Target Image (result):")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
faces_index = gr.Textbox(
|
faces_index = gr.Textbox(
|
||||||
value="0",
|
value="0",
|
||||||
placeholder="Which face(s) to Swap into Target (comma separated)",
|
placeholder="Which face(s) to Swap into Target (comma separated)",
|
||||||
label="Comma separated face number(s); Example: 1,0,2",
|
label="Comma separated face number(s); Example: 1,0,2",
|
||||||
)
|
)
|
||||||
gender_target = gr.Radio(
|
gender_target = gr.Radio(
|
||||||
["No", "Female Only", "Male Only"],
|
["No", "Female Only", "Male Only"],
|
||||||
value="No",
|
value="No",
|
||||||
label="Gender Detection (Target)",
|
label="Gender Detection (Target)",
|
||||||
type="index",
|
type="index",
|
||||||
)
|
)
|
||||||
gr.Markdown("<br>")
|
gr.Markdown("<br>")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
face_restorer_name = gr.Radio(
|
face_restorer_name = gr.Radio(
|
||||||
label="Restore Face",
|
label="Restore Face",
|
||||||
choices=["None"] + [x.name() for x in shared.face_restorers],
|
choices=["None"] + [x.name() for x in shared.face_restorers],
|
||||||
value=shared.face_restorers[0].name(),
|
value=shared.face_restorers[0].name(),
|
||||||
type="value",
|
type="value",
|
||||||
)
|
)
|
||||||
face_restorer_visibility = gr.Slider(
|
face_restorer_visibility = gr.Slider(
|
||||||
0, 1, 1, step=0.1, label="Restore Face Visibility"
|
0, 1, 1, step=0.1, label="Restore Face Visibility"
|
||||||
)
|
)
|
||||||
gr.Markdown("<br>")
|
gr.Markdown("<br>")
|
||||||
swap_in_source = gr.Checkbox(
|
swap_in_source = gr.Checkbox(
|
||||||
False,
|
False,
|
||||||
label="Swap in source image",
|
label="Swap in source image",
|
||||||
visible=is_img2img,
|
visible=is_img2img,
|
||||||
)
|
)
|
||||||
swap_in_generated = gr.Checkbox(
|
swap_in_generated = gr.Checkbox(
|
||||||
True,
|
True,
|
||||||
label="Swap in generated image",
|
label="Swap in generated image",
|
||||||
visible=is_img2img,
|
visible=is_img2img,
|
||||||
)
|
)
|
||||||
with gr.Tab("Upscale"):
|
with gr.Tab("Upscale"):
|
||||||
restore_first = gr.Checkbox(
|
restore_first = gr.Checkbox(
|
||||||
True,
|
True,
|
||||||
label="1. Restore Face -> 2. Upscale (-Uncheck- if you want vice versa)",
|
label="1. Restore Face -> 2. Upscale (-Uncheck- if you want vice versa)",
|
||||||
info="Postprocessing Order"
|
info="Postprocessing Order"
|
||||||
)
|
)
|
||||||
upscaler_name = gr.inputs.Dropdown(
|
upscaler_name = gr.inputs.Dropdown(
|
||||||
choices=[upscaler.name for upscaler in shared.sd_upscalers],
|
choices=[upscaler.name for upscaler in shared.sd_upscalers],
|
||||||
label="Upscaler",
|
label="Upscaler",
|
||||||
)
|
)
|
||||||
gr.Markdown("<br>")
|
gr.Markdown("<br>")
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
upscaler_scale = gr.Slider(1, 8, 1, step=0.1, label="Scale by")
|
upscaler_scale = gr.Slider(1, 8, 1, step=0.1, label="Scale by")
|
||||||
upscaler_visibility = gr.Slider(
|
upscaler_visibility = gr.Slider(
|
||||||
0, 1, 1, step=0.1, label="Upscaler Visibility (if scale = 1)"
|
0, 1, 1, step=0.1, label="Upscaler Visibility (if scale = 1)"
|
||||||
)
|
)
|
||||||
with gr.Tab("Settings"):
|
with gr.Tab("Settings"):
|
||||||
models = get_models()
|
models = get_models()
|
||||||
with gr.Row():
|
with gr.Row():
|
||||||
if len(models) == 0:
|
if len(models) == 0:
|
||||||
logger.warning(
|
logger.warning(
|
||||||
"You should at least have one model in models directory, please read the doc here : https://github.com/Gourieff/sd-webui-reactor/"
|
"You should at least have one model in models directory, please read the doc here : https://github.com/Gourieff/sd-webui-reactor/"
|
||||||
)
|
)
|
||||||
model = gr.inputs.Dropdown(
|
model = gr.inputs.Dropdown(
|
||||||
choices=models,
|
choices=models,
|
||||||
label="Model not found, please download one and reload WebUI",
|
label="Model not found, please download one and reload WebUI",
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
model = gr.inputs.Dropdown(
|
model = gr.inputs.Dropdown(
|
||||||
choices=models, label="Model", default=models[0]
|
choices=models, label="Model", default=models[0]
|
||||||
)
|
)
|
||||||
console_logging_level = gr.Radio(
|
console_logging_level = gr.Radio(
|
||||||
["No log", "Minimum", "Default"],
|
["No log", "Minimum", "Default"],
|
||||||
value="Minimum",
|
value="Minimum",
|
||||||
label="Console Log Level",
|
label="Console Log Level",
|
||||||
type="index",
|
type="index",
|
||||||
)
|
)
|
||||||
|
|
||||||
return [
|
return [
|
||||||
img,
|
img,
|
||||||
enable,
|
enable,
|
||||||
source_faces_index,
|
source_faces_index,
|
||||||
faces_index,
|
faces_index,
|
||||||
model,
|
model,
|
||||||
face_restorer_name,
|
face_restorer_name,
|
||||||
face_restorer_visibility,
|
face_restorer_visibility,
|
||||||
restore_first,
|
restore_first,
|
||||||
upscaler_name,
|
upscaler_name,
|
||||||
upscaler_scale,
|
upscaler_scale,
|
||||||
upscaler_visibility,
|
upscaler_visibility,
|
||||||
swap_in_source,
|
swap_in_source,
|
||||||
swap_in_generated,
|
swap_in_generated,
|
||||||
console_logging_level,
|
console_logging_level,
|
||||||
gender_source,
|
gender_source,
|
||||||
gender_target,
|
gender_target,
|
||||||
save_original,
|
save_original,
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def upscaler(self) -> UpscalerData:
|
def upscaler(self) -> UpscalerData:
|
||||||
for upscaler in shared.sd_upscalers:
|
for upscaler in shared.sd_upscalers:
|
||||||
if upscaler.name == self.upscaler_name:
|
if upscaler.name == self.upscaler_name:
|
||||||
return upscaler
|
return upscaler
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def face_restorer(self) -> FaceRestoration:
|
def face_restorer(self) -> FaceRestoration:
|
||||||
for face_restorer in shared.face_restorers:
|
for face_restorer in shared.face_restorers:
|
||||||
if face_restorer.name() == self.face_restorer_name:
|
if face_restorer.name() == self.face_restorer_name:
|
||||||
return face_restorer
|
return face_restorer
|
||||||
return None
|
return None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def upscale_options(self) -> UpscaleOptions:
|
def upscale_options(self) -> UpscaleOptions:
|
||||||
return UpscaleOptions(
|
return UpscaleOptions(
|
||||||
do_restore_first = self.restore_first,
|
do_restore_first = self.restore_first,
|
||||||
scale=self.upscaler_scale,
|
scale=self.upscaler_scale,
|
||||||
upscaler=self.upscaler,
|
upscaler=self.upscaler,
|
||||||
face_restorer=self.face_restorer,
|
face_restorer=self.face_restorer,
|
||||||
upscale_visibility=self.upscaler_visibility,
|
upscale_visibility=self.upscaler_visibility,
|
||||||
restorer_visibility=self.face_restorer_visibility,
|
restorer_visibility=self.face_restorer_visibility,
|
||||||
)
|
)
|
||||||
|
|
||||||
def process(
|
def process(
|
||||||
self,
|
self,
|
||||||
p: StableDiffusionProcessing,
|
p: StableDiffusionProcessing,
|
||||||
img,
|
img,
|
||||||
enable,
|
enable,
|
||||||
source_faces_index,
|
source_faces_index,
|
||||||
faces_index,
|
faces_index,
|
||||||
model,
|
model,
|
||||||
face_restorer_name,
|
face_restorer_name,
|
||||||
face_restorer_visibility,
|
face_restorer_visibility,
|
||||||
restore_first,
|
restore_first,
|
||||||
upscaler_name,
|
upscaler_name,
|
||||||
upscaler_scale,
|
upscaler_scale,
|
||||||
upscaler_visibility,
|
upscaler_visibility,
|
||||||
swap_in_source,
|
swap_in_source,
|
||||||
swap_in_generated,
|
swap_in_generated,
|
||||||
console_logging_level,
|
console_logging_level,
|
||||||
gender_source,
|
gender_source,
|
||||||
gender_target,
|
gender_target,
|
||||||
save_original,
|
save_original,
|
||||||
):
|
):
|
||||||
self.enable = enable
|
self.enable = enable
|
||||||
if self.enable:
|
if self.enable:
|
||||||
|
|
||||||
reset_messaged()
|
reset_messaged()
|
||||||
if check_process_halt():
|
if check_process_halt():
|
||||||
return
|
return
|
||||||
|
|
||||||
global MODELS_PATH
|
global MODELS_PATH
|
||||||
self.source = img
|
self.source = img
|
||||||
self.face_restorer_name = face_restorer_name
|
self.face_restorer_name = face_restorer_name
|
||||||
self.upscaler_scale = upscaler_scale
|
self.upscaler_scale = upscaler_scale
|
||||||
self.upscaler_visibility = upscaler_visibility
|
self.upscaler_visibility = upscaler_visibility
|
||||||
self.face_restorer_visibility = face_restorer_visibility
|
self.face_restorer_visibility = face_restorer_visibility
|
||||||
self.restore_first = restore_first
|
self.restore_first = restore_first
|
||||||
self.upscaler_name = upscaler_name
|
self.upscaler_name = upscaler_name
|
||||||
self.swap_in_generated = swap_in_generated
|
self.swap_in_generated = swap_in_generated
|
||||||
self.model = os.path.join(MODELS_PATH,model)
|
self.model = os.path.join(MODELS_PATH,model)
|
||||||
self.console_logging_level = console_logging_level
|
self.console_logging_level = console_logging_level
|
||||||
self.gender_source = gender_source
|
self.gender_source = gender_source
|
||||||
self.gender_target = gender_target
|
self.gender_target = gender_target
|
||||||
self.save_original = save_original
|
self.save_original = save_original
|
||||||
if self.gender_source is None or self.gender_source == "No":
|
if self.gender_source is None or self.gender_source == "No":
|
||||||
self.gender_source = 0
|
self.gender_source = 0
|
||||||
if self.gender_target is None or self.gender_target == "No":
|
if self.gender_target is None or self.gender_target == "No":
|
||||||
self.gender_target = 0
|
self.gender_target = 0
|
||||||
self.source_faces_index = [
|
self.source_faces_index = [
|
||||||
int(x) for x in source_faces_index.strip(",").split(",") if x.isnumeric()
|
int(x) for x in source_faces_index.strip(",").split(",") if x.isnumeric()
|
||||||
]
|
]
|
||||||
self.faces_index = [
|
self.faces_index = [
|
||||||
int(x) for x in faces_index.strip(",").split(",") if x.isnumeric()
|
int(x) for x in faces_index.strip(",").split(",") if x.isnumeric()
|
||||||
]
|
]
|
||||||
if len(self.source_faces_index) == 0:
|
if len(self.source_faces_index) == 0:
|
||||||
self.source_faces_index = [0]
|
self.source_faces_index = [0]
|
||||||
if len(self.faces_index) == 0:
|
if len(self.faces_index) == 0:
|
||||||
self.faces_index = [0]
|
self.faces_index = [0]
|
||||||
|
|
||||||
if self.source is not None:
|
if self.source is not None:
|
||||||
apply_logging_patch(console_logging_level)
|
apply_logging_patch(console_logging_level)
|
||||||
if isinstance(p, StableDiffusionProcessingImg2Img) and swap_in_source:
|
if isinstance(p, StableDiffusionProcessingImg2Img) and swap_in_source:
|
||||||
logger.info("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index)
|
logger.info("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index)
|
||||||
|
|
||||||
for i in range(len(p.init_images)):
|
for i in range(len(p.init_images)):
|
||||||
if len(p.init_images) > 1:
|
if len(p.init_images) > 1:
|
||||||
logger.info("Swap in %s", i)
|
logger.info("Swap in %s", i)
|
||||||
result = swap_face(
|
result = swap_face(
|
||||||
self.source,
|
self.source,
|
||||||
p.init_images[i],
|
p.init_images[i],
|
||||||
source_faces_index=self.source_faces_index,
|
source_faces_index=self.source_faces_index,
|
||||||
faces_index=self.faces_index,
|
faces_index=self.faces_index,
|
||||||
model=self.model,
|
model=self.model,
|
||||||
upscale_options=self.upscale_options,
|
upscale_options=self.upscale_options,
|
||||||
gender_source=self.gender_source,
|
gender_source=self.gender_source,
|
||||||
gender_target=self.gender_target,
|
gender_target=self.gender_target,
|
||||||
)
|
)
|
||||||
p.init_images[i] = result
|
p.init_images[i] = result
|
||||||
|
|
||||||
if shared.state.interrupted or shared.state.skipped:
|
if shared.state.interrupted or shared.state.skipped:
|
||||||
return
|
return
|
||||||
|
|
||||||
else:
|
else:
|
||||||
logger.error("Please provide a source face")
|
logger.error("Please provide a source face")
|
||||||
|
|
||||||
def postprocess(self, p: StableDiffusionProcessing, processed: Processed, *args):
|
def postprocess(self, p: StableDiffusionProcessing, processed: Processed, *args):
|
||||||
if self.enable:
|
if self.enable:
|
||||||
|
|
||||||
reset_messaged()
|
reset_messaged()
|
||||||
if check_process_halt():
|
if check_process_halt():
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.save_original:
|
if self.save_original:
|
||||||
|
|
||||||
postprocess_run: bool = True
|
postprocess_run: bool = True
|
||||||
|
|
||||||
orig_images : List[Image.Image] = processed.images[processed.index_of_first_image:]
|
orig_images : List[Image.Image] = processed.images[processed.index_of_first_image:]
|
||||||
orig_infotexts : List[str] = processed.infotexts[processed.index_of_first_image:]
|
orig_infotexts : List[str] = processed.infotexts[processed.index_of_first_image:]
|
||||||
|
|
||||||
result_images: List = processed.images
|
result_images: List = processed.images
|
||||||
|
|
||||||
if self.swap_in_generated:
|
if self.swap_in_generated:
|
||||||
logger.info("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index)
|
logger.info("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index)
|
||||||
if self.source is not None:
|
if self.source is not None:
|
||||||
for i,(img,info) in enumerate(zip(orig_images, orig_infotexts)):
|
for i,(img,info) in enumerate(zip(orig_images, orig_infotexts)):
|
||||||
if check_process_halt():
|
if check_process_halt():
|
||||||
postprocess_run = False
|
postprocess_run = False
|
||||||
break
|
break
|
||||||
if len(orig_images) > 1:
|
if len(orig_images) > 1:
|
||||||
logger.info("Swap in %s", i)
|
logger.info("Swap in %s", i)
|
||||||
result = swap_face(
|
result = swap_face(
|
||||||
self.source,
|
self.source,
|
||||||
img,
|
img,
|
||||||
source_faces_index=self.source_faces_index,
|
source_faces_index=self.source_faces_index,
|
||||||
faces_index=self.faces_index,
|
faces_index=self.faces_index,
|
||||||
model=self.model,
|
model=self.model,
|
||||||
upscale_options=self.upscale_options,
|
upscale_options=self.upscale_options,
|
||||||
gender_source=self.gender_source,
|
gender_source=self.gender_source,
|
||||||
gender_target=self.gender_target,
|
gender_target=self.gender_target,
|
||||||
)
|
)
|
||||||
if result is not None:
|
if result is not None:
|
||||||
suffix = "-swapped"
|
suffix = "-swapped"
|
||||||
result_images.append(result)
|
result_images.append(result)
|
||||||
try:
|
try:
|
||||||
save_image(result, p.outpath_samples, "", p.all_seeds[0], p.all_prompts[0], "png",info=info, p=p, suffix=suffix)
|
save_image(result, p.outpath_samples, "", p.all_seeds[0], p.all_prompts[0], "png",info=info, p=p, suffix=suffix)
|
||||||
except:
|
except:
|
||||||
logger.error("Cannot save a result image - please, check SD WebUI Settings (Saving and Paths)")
|
logger.error("Cannot save a result image - please, check SD WebUI Settings (Saving and Paths)")
|
||||||
else:
|
else:
|
||||||
logger.error("Cannot create a result image")
|
logger.error("Cannot create a result image")
|
||||||
|
|
||||||
if shared.opts.return_grid and len(result_images) > 2 and postprocess_run:
|
if shared.opts.return_grid and len(result_images) > 2 and postprocess_run:
|
||||||
grid = make_grid(result_images)
|
grid = make_grid(result_images)
|
||||||
result_images.insert(0, grid)
|
result_images.insert(0, grid)
|
||||||
try:
|
try:
|
||||||
save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], shared.opts.grid_format, info=info, short_filename=not shared.opts.grid_extended_filename, p=p, grid=True)
|
save_image(grid, p.outpath_grids, "grid", p.all_seeds[0], p.all_prompts[0], shared.opts.grid_format, info=info, short_filename=not shared.opts.grid_extended_filename, p=p, grid=True)
|
||||||
except:
|
except:
|
||||||
logger.error("Cannot save a grid - please, check SD WebUI Settings (Saving and Paths)")
|
logger.error("Cannot save a grid - please, check SD WebUI Settings (Saving and Paths)")
|
||||||
|
|
||||||
processed.images = result_images
|
processed.images = result_images
|
||||||
|
|
||||||
def postprocess_batch(self, p, *args, **kwargs):
|
def postprocess_batch(self, p, *args, **kwargs):
|
||||||
if self.enable and not self.save_original:
|
if self.enable and not self.save_original:
|
||||||
images = kwargs["images"]
|
images = kwargs["images"]
|
||||||
|
|
||||||
def postprocess_image(self, p, script_pp: scripts.PostprocessImageArgs, *args):
|
def postprocess_image(self, p, script_pp: scripts.PostprocessImageArgs, *args):
|
||||||
if self.enable and self.swap_in_generated and not self.save_original:
|
if self.enable and self.swap_in_generated and not self.save_original:
|
||||||
|
|
||||||
current_job_number = shared.state.job_no + 1
|
current_job_number = shared.state.job_no + 1
|
||||||
job_count = shared.state.job_count
|
job_count = shared.state.job_count
|
||||||
if current_job_number == job_count:
|
if current_job_number == job_count:
|
||||||
reset_messaged()
|
reset_messaged()
|
||||||
if check_process_halt():
|
if check_process_halt():
|
||||||
return
|
return
|
||||||
|
|
||||||
if self.source is not None:
|
if self.source is not None:
|
||||||
logger.info("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index)
|
logger.info("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index)
|
||||||
image: Image.Image = script_pp.image
|
image: Image.Image = script_pp.image
|
||||||
result = swap_face(
|
result = swap_face(
|
||||||
self.source,
|
self.source,
|
||||||
image,
|
image,
|
||||||
source_faces_index=self.source_faces_index,
|
source_faces_index=self.source_faces_index,
|
||||||
faces_index=self.faces_index,
|
faces_index=self.faces_index,
|
||||||
model=self.model,
|
model=self.model,
|
||||||
upscale_options=self.upscale_options,
|
upscale_options=self.upscale_options,
|
||||||
gender_source=self.gender_source,
|
gender_source=self.gender_source,
|
||||||
gender_target=self.gender_target,
|
gender_target=self.gender_target,
|
||||||
)
|
)
|
||||||
try:
|
try:
|
||||||
pp = scripts_postprocessing.PostprocessedImage(result)
|
pp = scripts_postprocessing.PostprocessedImage(result)
|
||||||
pp.info = {}
|
pp.info = {}
|
||||||
p.extra_generation_params.update(pp.info)
|
p.extra_generation_params.update(pp.info)
|
||||||
script_pp.image = pp.image
|
script_pp.image = pp.image
|
||||||
except:
|
except:
|
||||||
logger.error("Cannot create a result image")
|
logger.error("Cannot create a result image")
|
||||||
@ -4,16 +4,6 @@ from math import isqrt, ceil
|
|||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
def make_grid(image_list: List):
|
def make_grid(image_list: List):
|
||||||
"""
|
|
||||||
Creates a square image by combining multiple images in a grid pattern.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
image_list (list): List of PIL Image objects to be combined.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
PIL Image object: The resulting square image.
|
|
||||||
None: If the image_list is empty or contains only one image.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Count the occurrences of each image size in the image_list
|
# Count the occurrences of each image size in the image_list
|
||||||
size_counter = Counter(image.size for image in image_list)
|
size_counter = Counter(image.size for image in image_list)
|
||||||
@ -52,4 +42,4 @@ def make_grid(image_list: List):
|
|||||||
return square_image
|
return square_image
|
||||||
|
|
||||||
# Return None if there are no images or only one image in the image_list
|
# Return None if there are no images or only one image in the image_list
|
||||||
return None
|
return None
|
||||||
@ -3,7 +3,7 @@ import copy
|
|||||||
import sys
|
import sys
|
||||||
|
|
||||||
from modules import shared
|
from modules import shared
|
||||||
from scripts.globals import IS_RUN
|
from scripts.reactor_globals import IS_RUN
|
||||||
|
|
||||||
|
|
||||||
class ColoredFormatter(logging.Formatter):
|
class ColoredFormatter(logging.Formatter):
|
||||||
@ -1,301 +1,301 @@
|
|||||||
import copy
|
import copy
|
||||||
import os
|
import os
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import List, Union
|
from typing import List, Union
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
import insightface
|
import insightface
|
||||||
import onnxruntime
|
import onnxruntime
|
||||||
|
|
||||||
from modules.face_restoration import FaceRestoration
|
from modules.face_restoration import FaceRestoration
|
||||||
from modules.upscaler import UpscalerData
|
from modules.upscaler import UpscalerData
|
||||||
from modules.shared import state
|
from modules.shared import state
|
||||||
from modules.paths_internal import models_path
|
from modules.paths_internal import models_path
|
||||||
from scripts.logger import logger
|
from scripts.reactor_logger import logger
|
||||||
|
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
np.warnings = warnings
|
np.warnings = warnings
|
||||||
np.warnings.filterwarnings('ignore')
|
np.warnings.filterwarnings('ignore')
|
||||||
|
|
||||||
providers = onnxruntime.get_available_providers()
|
providers = onnxruntime.get_available_providers()
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class UpscaleOptions:
|
class UpscaleOptions:
|
||||||
do_restore_first: bool = True
|
do_restore_first: bool = True
|
||||||
scale: int = 1
|
scale: int = 1
|
||||||
upscaler: UpscalerData = None
|
upscaler: UpscalerData = None
|
||||||
upscale_visibility: float = 0.5
|
upscale_visibility: float = 0.5
|
||||||
face_restorer: FaceRestoration = None
|
face_restorer: FaceRestoration = None
|
||||||
restorer_visibility: float = 0.5
|
restorer_visibility: float = 0.5
|
||||||
|
|
||||||
|
|
||||||
def cosine_distance(vector1: np.ndarray, vector2: np.ndarray) -> float:
|
def cosine_distance(vector1: np.ndarray, vector2: np.ndarray) -> float:
|
||||||
vec1 = vector1.flatten()
|
vec1 = vector1.flatten()
|
||||||
vec2 = vector2.flatten()
|
vec2 = vector2.flatten()
|
||||||
|
|
||||||
dot_product = np.dot(vec1, vec2)
|
dot_product = np.dot(vec1, vec2)
|
||||||
norm1 = np.linalg.norm(vec1)
|
norm1 = np.linalg.norm(vec1)
|
||||||
norm2 = np.linalg.norm(vec2)
|
norm2 = np.linalg.norm(vec2)
|
||||||
|
|
||||||
cosine_distance = 1 - (dot_product / (norm1 * norm2))
|
cosine_distance = 1 - (dot_product / (norm1 * norm2))
|
||||||
return cosine_distance
|
return cosine_distance
|
||||||
|
|
||||||
|
|
||||||
def cosine_similarity(test_vec: np.ndarray, source_vecs: List[np.ndarray]) -> float:
|
def cosine_similarity(test_vec: np.ndarray, source_vecs: List[np.ndarray]) -> float:
|
||||||
cos_dist = sum(cosine_distance(test_vec, source_vec) for source_vec in source_vecs)
|
cos_dist = sum(cosine_distance(test_vec, source_vec) for source_vec in source_vecs)
|
||||||
average_cos_dist = cos_dist / len(source_vecs)
|
average_cos_dist = cos_dist / len(source_vecs)
|
||||||
return average_cos_dist
|
return average_cos_dist
|
||||||
|
|
||||||
|
|
||||||
MESSAGED_STOPPED = False
|
MESSAGED_STOPPED = False
|
||||||
MESSAGED_SKIPPED = False
|
MESSAGED_SKIPPED = False
|
||||||
|
|
||||||
def reset_messaged():
|
def reset_messaged():
|
||||||
global MESSAGED_STOPPED, MESSAGED_SKIPPED
|
global MESSAGED_STOPPED, MESSAGED_SKIPPED
|
||||||
if not state.interrupted:
|
if not state.interrupted:
|
||||||
MESSAGED_STOPPED = False
|
MESSAGED_STOPPED = False
|
||||||
if not state.skipped:
|
if not state.skipped:
|
||||||
MESSAGED_SKIPPED = False
|
MESSAGED_SKIPPED = False
|
||||||
|
|
||||||
def check_process_halt(msgforced: bool = False):
|
def check_process_halt(msgforced: bool = False):
|
||||||
global MESSAGED_STOPPED, MESSAGED_SKIPPED
|
global MESSAGED_STOPPED, MESSAGED_SKIPPED
|
||||||
if state.interrupted:
|
if state.interrupted:
|
||||||
if not MESSAGED_STOPPED or msgforced:
|
if not MESSAGED_STOPPED or msgforced:
|
||||||
logger.info("Stopped by User")
|
logger.info("Stopped by User")
|
||||||
MESSAGED_STOPPED = True
|
MESSAGED_STOPPED = True
|
||||||
return True
|
return True
|
||||||
if state.skipped:
|
if state.skipped:
|
||||||
if not MESSAGED_SKIPPED or msgforced:
|
if not MESSAGED_SKIPPED or msgforced:
|
||||||
logger.info("Skipped by User")
|
logger.info("Skipped by User")
|
||||||
MESSAGED_SKIPPED = True
|
MESSAGED_SKIPPED = True
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
FS_MODEL = None
|
FS_MODEL = None
|
||||||
CURRENT_FS_MODEL_PATH = None
|
CURRENT_FS_MODEL_PATH = None
|
||||||
|
|
||||||
ANALYSIS_MODEL = None
|
ANALYSIS_MODEL = None
|
||||||
|
|
||||||
|
|
||||||
def getAnalysisModel():
|
def getAnalysisModel():
|
||||||
global ANALYSIS_MODEL
|
global ANALYSIS_MODEL
|
||||||
if ANALYSIS_MODEL is None:
|
if ANALYSIS_MODEL is None:
|
||||||
ANALYSIS_MODEL = insightface.app.FaceAnalysis(
|
ANALYSIS_MODEL = insightface.app.FaceAnalysis(
|
||||||
name="buffalo_l", providers=providers, root=os.path.join(models_path, "insightface") # note: allowed_modules=['detection', 'genderage']
|
name="buffalo_l", providers=providers, root=os.path.join(models_path, "insightface") # note: allowed_modules=['detection', 'genderage']
|
||||||
)
|
)
|
||||||
return ANALYSIS_MODEL
|
return ANALYSIS_MODEL
|
||||||
|
|
||||||
|
|
||||||
def getFaceSwapModel(model_path: str):
|
def getFaceSwapModel(model_path: str):
|
||||||
global FS_MODEL
|
global FS_MODEL
|
||||||
global CURRENT_FS_MODEL_PATH
|
global CURRENT_FS_MODEL_PATH
|
||||||
if CURRENT_FS_MODEL_PATH is None or CURRENT_FS_MODEL_PATH != model_path:
|
if CURRENT_FS_MODEL_PATH is None or CURRENT_FS_MODEL_PATH != model_path:
|
||||||
CURRENT_FS_MODEL_PATH = model_path
|
CURRENT_FS_MODEL_PATH = model_path
|
||||||
FS_MODEL = insightface.model_zoo.get_model(model_path, providers=providers)
|
FS_MODEL = insightface.model_zoo.get_model(model_path, providers=providers)
|
||||||
|
|
||||||
return FS_MODEL
|
return FS_MODEL
|
||||||
|
|
||||||
|
|
||||||
def upscale_image(image: Image, upscale_options: UpscaleOptions):
|
def upscale_image(image: Image, upscale_options: UpscaleOptions):
|
||||||
result_image = image
|
result_image = image
|
||||||
|
|
||||||
if check_process_halt(msgforced=True):
|
if check_process_halt(msgforced=True):
|
||||||
return result_image
|
return result_image
|
||||||
|
|
||||||
if upscale_options.do_restore_first:
|
if upscale_options.do_restore_first:
|
||||||
if upscale_options.face_restorer is not None:
|
if upscale_options.face_restorer is not None:
|
||||||
original_image = result_image.copy()
|
original_image = result_image.copy()
|
||||||
logger.info("Restoring the face with %s", upscale_options.face_restorer.name())
|
logger.info("Restoring the face with %s", upscale_options.face_restorer.name())
|
||||||
numpy_image = np.array(result_image)
|
numpy_image = np.array(result_image)
|
||||||
numpy_image = upscale_options.face_restorer.restore(numpy_image)
|
numpy_image = upscale_options.face_restorer.restore(numpy_image)
|
||||||
restored_image = Image.fromarray(numpy_image)
|
restored_image = Image.fromarray(numpy_image)
|
||||||
result_image = Image.blend(
|
result_image = Image.blend(
|
||||||
original_image, restored_image, upscale_options.restorer_visibility
|
original_image, restored_image, upscale_options.restorer_visibility
|
||||||
)
|
)
|
||||||
if upscale_options.upscaler is not None and upscale_options.upscaler.name != "None":
|
if upscale_options.upscaler is not None and upscale_options.upscaler.name != "None":
|
||||||
original_image = result_image.copy()
|
original_image = result_image.copy()
|
||||||
logger.info(
|
logger.info(
|
||||||
"Upscaling with %s scale = %s",
|
"Upscaling with %s scale = %s",
|
||||||
upscale_options.upscaler.name,
|
upscale_options.upscaler.name,
|
||||||
upscale_options.scale,
|
upscale_options.scale,
|
||||||
)
|
)
|
||||||
result_image = upscale_options.upscaler.scaler.upscale(
|
result_image = upscale_options.upscaler.scaler.upscale(
|
||||||
original_image, upscale_options.scale, upscale_options.upscaler.data_path
|
original_image, upscale_options.scale, upscale_options.upscaler.data_path
|
||||||
)
|
)
|
||||||
if upscale_options.scale == 1:
|
if upscale_options.scale == 1:
|
||||||
result_image = Image.blend(
|
result_image = Image.blend(
|
||||||
original_image, result_image, upscale_options.upscale_visibility
|
original_image, result_image, upscale_options.upscale_visibility
|
||||||
)
|
)
|
||||||
else:
|
else:
|
||||||
if upscale_options.upscaler is not None and upscale_options.upscaler.name != "None":
|
if upscale_options.upscaler is not None and upscale_options.upscaler.name != "None":
|
||||||
original_image = result_image.copy()
|
original_image = result_image.copy()
|
||||||
logger.info(
|
logger.info(
|
||||||
"Upscaling with %s scale = %s",
|
"Upscaling with %s scale = %s",
|
||||||
upscale_options.upscaler.name,
|
upscale_options.upscaler.name,
|
||||||
upscale_options.scale,
|
upscale_options.scale,
|
||||||
)
|
)
|
||||||
result_image = upscale_options.upscaler.scaler.upscale(
|
result_image = upscale_options.upscaler.scaler.upscale(
|
||||||
image, upscale_options.scale, upscale_options.upscaler.data_path
|
image, upscale_options.scale, upscale_options.upscaler.data_path
|
||||||
)
|
)
|
||||||
if upscale_options.scale == 1:
|
if upscale_options.scale == 1:
|
||||||
result_image = Image.blend(
|
result_image = Image.blend(
|
||||||
original_image, result_image, upscale_options.upscale_visibility
|
original_image, result_image, upscale_options.upscale_visibility
|
||||||
)
|
)
|
||||||
if upscale_options.face_restorer is not None:
|
if upscale_options.face_restorer is not None:
|
||||||
original_image = result_image.copy()
|
original_image = result_image.copy()
|
||||||
logger.info("Restoring the face with %s", upscale_options.face_restorer.name())
|
logger.info("Restoring the face with %s", upscale_options.face_restorer.name())
|
||||||
numpy_image = np.array(result_image)
|
numpy_image = np.array(result_image)
|
||||||
numpy_image = upscale_options.face_restorer.restore(numpy_image)
|
numpy_image = upscale_options.face_restorer.restore(numpy_image)
|
||||||
restored_image = Image.fromarray(numpy_image)
|
restored_image = Image.fromarray(numpy_image)
|
||||||
result_image = Image.blend(
|
result_image = Image.blend(
|
||||||
original_image, restored_image, upscale_options.restorer_visibility
|
original_image, restored_image, upscale_options.restorer_visibility
|
||||||
)
|
)
|
||||||
|
|
||||||
return result_image
|
return result_image
|
||||||
|
|
||||||
|
|
||||||
def get_face_gender(
|
def get_face_gender(
|
||||||
face,
|
face,
|
||||||
face_index,
|
face_index,
|
||||||
gender_condition,
|
gender_condition,
|
||||||
operated: str
|
operated: str
|
||||||
):
|
):
|
||||||
gender = [
|
gender = [
|
||||||
x.sex
|
x.sex
|
||||||
for x in face
|
for x in face
|
||||||
]
|
]
|
||||||
gender.reverse()
|
gender.reverse()
|
||||||
face_gender = gender[face_index]
|
face_gender = gender[face_index]
|
||||||
logger.info("%s Face %s: Detected Gender -%s-", operated, face_index, face_gender)
|
logger.info("%s Face %s: Detected Gender -%s-", operated, face_index, face_gender)
|
||||||
if (gender_condition == 1 and face_gender == "F") or (gender_condition == 2 and face_gender == "M"):
|
if (gender_condition == 1 and face_gender == "F") or (gender_condition == 2 and face_gender == "M"):
|
||||||
logger.info("OK - Detected Gender matches Condition")
|
logger.info("OK - Detected Gender matches Condition")
|
||||||
try:
|
try:
|
||||||
return sorted(face, key=lambda x: x.bbox[0])[face_index], 0
|
return sorted(face, key=lambda x: x.bbox[0])[face_index], 0
|
||||||
except IndexError:
|
except IndexError:
|
||||||
return None, 0
|
return None, 0
|
||||||
else:
|
else:
|
||||||
logger.info("WRONG - Detected Gender doesn't match Condition")
|
logger.info("WRONG - Detected Gender doesn't match Condition")
|
||||||
return sorted(face, key=lambda x: x.bbox[0])[face_index], 1
|
return sorted(face, key=lambda x: x.bbox[0])[face_index], 1
|
||||||
|
|
||||||
|
|
||||||
def reget_face_single(img_data, det_size, face_index):
|
def reget_face_single(img_data, det_size, face_index):
|
||||||
det_size_half = (det_size[0] // 2, det_size[1] // 2)
|
det_size_half = (det_size[0] // 2, det_size[1] // 2)
|
||||||
return get_face_single(img_data, face_index=face_index, det_size=det_size_half)
|
return get_face_single(img_data, face_index=face_index, det_size=det_size_half)
|
||||||
|
|
||||||
|
|
||||||
def get_face_single(img_data: np.ndarray, face_index=0, det_size=(640, 640), gender_source=0, gender_target=0):
|
def get_face_single(img_data: np.ndarray, face_index=0, det_size=(640, 640), gender_source=0, gender_target=0):
|
||||||
face_analyser = copy.deepcopy(getAnalysisModel())
|
face_analyser = copy.deepcopy(getAnalysisModel())
|
||||||
face_analyser.prepare(ctx_id=0, det_size=det_size)
|
face_analyser.prepare(ctx_id=0, det_size=det_size)
|
||||||
face = face_analyser.get(img_data)
|
face = face_analyser.get(img_data)
|
||||||
|
|
||||||
buffalo_path = os.path.join(models_path, "insightface/models/buffalo_l.zip")
|
buffalo_path = os.path.join(models_path, "insightface/models/buffalo_l.zip")
|
||||||
if os.path.exists(buffalo_path):
|
if os.path.exists(buffalo_path):
|
||||||
os.remove(buffalo_path)
|
os.remove(buffalo_path)
|
||||||
|
|
||||||
if gender_source != 0:
|
if gender_source != 0:
|
||||||
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
||||||
return reget_face_single(img_data, det_size, face_index)
|
return reget_face_single(img_data, det_size, face_index)
|
||||||
return get_face_gender(face,face_index,gender_source,"Source")
|
return get_face_gender(face,face_index,gender_source,"Source")
|
||||||
|
|
||||||
if gender_target != 0:
|
if gender_target != 0:
|
||||||
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
||||||
return reget_face_single(img_data, det_size, face_index)
|
return reget_face_single(img_data, det_size, face_index)
|
||||||
return get_face_gender(face,face_index,gender_target,"Target")
|
return get_face_gender(face,face_index,gender_target,"Target")
|
||||||
|
|
||||||
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
||||||
return reget_face_single(img_data, det_size, face_index)
|
return reget_face_single(img_data, det_size, face_index)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
return sorted(face, key=lambda x: x.bbox[0])[face_index], 0
|
return sorted(face, key=lambda x: x.bbox[0])[face_index], 0
|
||||||
except IndexError:
|
except IndexError:
|
||||||
return None, 0
|
return None, 0
|
||||||
|
|
||||||
|
|
||||||
def swap_face(
|
def swap_face(
|
||||||
source_img: Image.Image,
|
source_img: Image.Image,
|
||||||
target_img: Image.Image,
|
target_img: Image.Image,
|
||||||
model: Union[str, None] = None,
|
model: Union[str, None] = None,
|
||||||
source_faces_index: List[int] = [0],
|
source_faces_index: List[int] = [0],
|
||||||
faces_index: List[int] = [0],
|
faces_index: List[int] = [0],
|
||||||
upscale_options: Union[UpscaleOptions, None] = None,
|
upscale_options: Union[UpscaleOptions, None] = None,
|
||||||
gender_source: int = 0,
|
gender_source: int = 0,
|
||||||
gender_target: int = 0,
|
gender_target: int = 0,
|
||||||
):
|
):
|
||||||
result_image = target_img
|
result_image = target_img
|
||||||
|
|
||||||
if check_process_halt():
|
if check_process_halt():
|
||||||
return result_image
|
return result_image
|
||||||
|
|
||||||
if model is not None:
|
if model is not None:
|
||||||
|
|
||||||
if isinstance(source_img, str): # source_img is a base64 string
|
if isinstance(source_img, str): # source_img is a base64 string
|
||||||
import base64, io
|
import base64, io
|
||||||
if 'base64,' in source_img: # check if the base64 string has a data URL scheme
|
if 'base64,' in source_img: # check if the base64 string has a data URL scheme
|
||||||
# split the base64 string to get the actual base64 encoded image data
|
# split the base64 string to get the actual base64 encoded image data
|
||||||
base64_data = source_img.split('base64,')[-1]
|
base64_data = source_img.split('base64,')[-1]
|
||||||
# decode base64 string to bytes
|
# decode base64 string to bytes
|
||||||
img_bytes = base64.b64decode(base64_data)
|
img_bytes = base64.b64decode(base64_data)
|
||||||
else:
|
else:
|
||||||
# if no data URL scheme, just decode
|
# if no data URL scheme, just decode
|
||||||
img_bytes = base64.b64decode(source_img)
|
img_bytes = base64.b64decode(source_img)
|
||||||
|
|
||||||
source_img = Image.open(io.BytesIO(img_bytes))
|
source_img = Image.open(io.BytesIO(img_bytes))
|
||||||
|
|
||||||
source_img = cv2.cvtColor(np.array(source_img), cv2.COLOR_RGB2BGR)
|
source_img = cv2.cvtColor(np.array(source_img), cv2.COLOR_RGB2BGR)
|
||||||
target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)
|
target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)
|
||||||
|
|
||||||
source_face, wrong_gender = get_face_single(source_img, face_index=source_faces_index[0], gender_source=gender_source)
|
source_face, wrong_gender = get_face_single(source_img, face_index=source_faces_index[0], gender_source=gender_source)
|
||||||
|
|
||||||
if len(source_faces_index) != 0 and len(source_faces_index) != 1 and len(source_faces_index) != len(faces_index):
|
if len(source_faces_index) != 0 and len(source_faces_index) != 1 and len(source_faces_index) != len(faces_index):
|
||||||
logger.info("Source Faces must have no entries (default=0), one entry, or same number of entries as target faces.")
|
logger.info("Source Faces must have no entries (default=0), one entry, or same number of entries as target faces.")
|
||||||
elif source_face is not None:
|
elif source_face is not None:
|
||||||
|
|
||||||
result = target_img
|
result = target_img
|
||||||
face_swapper = getFaceSwapModel(model)
|
face_swapper = getFaceSwapModel(model)
|
||||||
|
|
||||||
source_face_idx = 0
|
source_face_idx = 0
|
||||||
|
|
||||||
swapped = 0
|
swapped = 0
|
||||||
|
|
||||||
for face_num in faces_index:
|
for face_num in faces_index:
|
||||||
if len(source_faces_index) > 1 and source_face_idx > 0:
|
if len(source_faces_index) > 1 and source_face_idx > 0:
|
||||||
source_face, wrong_gender = get_face_single(source_img, face_index=source_faces_index[source_face_idx], gender_source=gender_source)
|
source_face, wrong_gender = get_face_single(source_img, face_index=source_faces_index[source_face_idx], gender_source=gender_source)
|
||||||
source_face_idx += 1
|
source_face_idx += 1
|
||||||
|
|
||||||
if source_face is not None and wrong_gender == 0:
|
if source_face is not None and wrong_gender == 0:
|
||||||
target_face, wrong_gender = get_face_single(target_img, face_index=face_num, gender_target=gender_target)
|
target_face, wrong_gender = get_face_single(target_img, face_index=face_num, gender_target=gender_target)
|
||||||
if target_face is not None and wrong_gender == 0:
|
if target_face is not None and wrong_gender == 0:
|
||||||
result = face_swapper.get(result, target_face, source_face)
|
result = face_swapper.get(result, target_face, source_face)
|
||||||
swapped += 1
|
swapped += 1
|
||||||
elif wrong_gender == 1:
|
elif wrong_gender == 1:
|
||||||
wrong_gender = 0
|
wrong_gender = 0
|
||||||
if source_face_idx == len(source_faces_index):
|
if source_face_idx == len(source_faces_index):
|
||||||
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
||||||
if upscale_options is not None:
|
if upscale_options is not None:
|
||||||
result_image = upscale_image(result_image, upscale_options)
|
result_image = upscale_image(result_image, upscale_options)
|
||||||
return result_image
|
return result_image
|
||||||
else:
|
else:
|
||||||
logger.info(f"No target face found for {face_num}")
|
logger.info(f"No target face found for {face_num}")
|
||||||
elif wrong_gender == 1:
|
elif wrong_gender == 1:
|
||||||
wrong_gender = 0
|
wrong_gender = 0
|
||||||
if source_face_idx == len(source_faces_index):
|
if source_face_idx == len(source_faces_index):
|
||||||
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
||||||
if upscale_options is not None:
|
if upscale_options is not None:
|
||||||
result_image = upscale_image(result_image, upscale_options)
|
result_image = upscale_image(result_image, upscale_options)
|
||||||
return result_image
|
return result_image
|
||||||
else:
|
else:
|
||||||
logger.info(f"No source face found for face number {source_face_idx}.")
|
logger.info(f"No source face found for face number {source_face_idx}.")
|
||||||
|
|
||||||
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
||||||
if upscale_options is not None and swapped > 0:
|
if upscale_options is not None and swapped > 0:
|
||||||
result_image = upscale_image(result_image, upscale_options)
|
result_image = upscale_image(result_image, upscale_options)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
logger.info("No source face(s) found")
|
logger.info("No source face(s) found")
|
||||||
return result_image
|
return result_image
|
||||||
@ -1,7 +1,7 @@
|
|||||||
app_title = "ReActor"
|
app_title = "ReActor"
|
||||||
version_flag = "v0.4.1-b3"
|
version_flag = "v0.4.1-b4"
|
||||||
|
|
||||||
from scripts.logger import logger, get_Run, set_Run
|
from scripts.reactor_logger import logger, get_Run, set_Run
|
||||||
|
|
||||||
is_run = get_Run()
|
is_run = get_Run()
|
||||||
|
|
||||||
Loading…
x
Reference in New Issue
Block a user