UPDATE: Safetensors Face Models

This commit is contained in:
Gourieff 2023-11-24 13:09:58 +07:00
parent 2c2d40508a
commit bee2c4ee2d
11 changed files with 479 additions and 124 deletions

View File

@ -2,7 +2,7 @@
<img src="https://github.com/Gourieff/Assets/raw/main/sd-webui-reactor/ReActor_logo_red.png?raw=true" alt="logo" width="180px"/> <img src="https://github.com/Gourieff/Assets/raw/main/sd-webui-reactor/ReActor_logo_red.png?raw=true" alt="logo" width="180px"/>
![Version](https://img.shields.io/badge/version-0.5.1_beta1-green?style=for-the-badge&labelColor=darkgreen) ![Version](https://img.shields.io/badge/version-0.5.1_beta2-green?style=for-the-badge&labelColor=darkgreen)
<a href='https://ko-fi.com/gourieff' target='_blank'><img height='33' src='https://storage.ko-fi.com/cdn/kofi3.png?v=3' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a> <a href='https://ko-fi.com/gourieff' target='_blank'><img height='33' src='https://storage.ko-fi.com/cdn/kofi3.png?v=3' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>

View File

@ -2,7 +2,7 @@
<img src="https://github.com/Gourieff/Assets/raw/main/sd-webui-reactor/ReActor_logo_red.png?raw=true" alt="logo" width="180px"/> <img src="https://github.com/Gourieff/Assets/raw/main/sd-webui-reactor/ReActor_logo_red.png?raw=true" alt="logo" width="180px"/>
![Version](https://img.shields.io/badge/версия-0.5.1_beta1-green?style=for-the-badge&labelColor=darkgreen) ![Version](https://img.shields.io/badge/версия-0.5.1_beta2-green?style=for-the-badge&labelColor=darkgreen)
<a href='https://ko-fi.com/gourieff' target='_blank'><img height='33' src='https://storage.ko-fi.com/cdn/kofi3.png?v=3' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a> <a href='https://ko-fi.com/gourieff' target='_blank'><img height='33' src='https://storage.ko-fi.com/cdn/kofi3.png?v=3' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>

View File

@ -43,6 +43,9 @@ args=[
False, #18 Source Image Hash Check, True - by default False, #18 Source Image Hash Check, True - by default
False, #19 Target Image Hash Check, False - by default False, #19 Target Image Hash Check, False - by default
"CUDA", #20 CPU or CUDA (if you have it), CPU - by default "CUDA", #20 CPU or CUDA (if you have it), CPU - by default
True, #21 Face Mask Correction
1, #22 Select Source, 0 - Image, 1 - Face Model
"elena.safetensors", #23 Filename of the face model (from "models/reactor/faces"), e.g. elena.safetensors
] ]
# The args for ReActor can be found by # The args for ReActor can be found by

View File

@ -19,5 +19,8 @@ curl -X POST \
"gender_target": 0, "gender_target": 0,
"save_to_file": 1, "save_to_file": 1,
"result_file_path": "", "result_file_path": "",
"device": "CUDA" "device": "CUDA",
"mask_face": 1,
"select_source": 1,
"face_model": "elena.safetensors"
}' }'

View File

@ -15,5 +15,8 @@
"gender_target": 0, "gender_target": 0,
"save_to_file": 1, "save_to_file": 1,
"result_file_path": "", "result_file_path": "",
"device": "CUDA" "device": "CUDA",
"mask_face": 1,
"select_source": 1,
"face_model": "elena.safetensors"
} }

View File

@ -71,7 +71,10 @@ def reactor_api(_: gr.Blocks, app: FastAPI):
gender_target: int = Body(0,title="Gender Detection (Target) (0 - No, 1 - Female Only, 2 - Male Only)"), gender_target: int = Body(0,title="Gender Detection (Target) (0 - No, 1 - Female Only, 2 - Male Only)"),
save_to_file: int = Body(0,title="Save Result to file, 0 - No, 1 - Yes"), save_to_file: int = Body(0,title="Save Result to file, 0 - No, 1 - Yes"),
result_file_path: str = Body("",title="(if 'save_to_file = 1') Result file path"), result_file_path: str = Body("",title="(if 'save_to_file = 1') Result file path"),
device: str = Body("CPU",title="CPU or CUDA (if you have it)") device: str = Body("CPU",title="CPU or CUDA (if you have it)"),
mask_face: int = Body(0,title="Face Mask Correction, 1 - True, 0 - False"),
select_source: int = Body(0,title="Select Source, 0 - Image, 1 - Face Model"),
face_model: str = Body("None",title="Filename of the face model (from 'models/reactor/faces'), e.g. elena.safetensors")
): ):
s_image = api.decode_base64_to_image(source_image) s_image = api.decode_base64_to_image(source_image)
t_image = api.decode_base64_to_image(target_image) t_image = api.decode_base64_to_image(target_image)
@ -80,11 +83,12 @@ def reactor_api(_: gr.Blocks, app: FastAPI):
gender_s = gender_source gender_s = gender_source
gender_t = gender_target gender_t = gender_target
restore_first_bool = True if restore_first == 1 else False restore_first_bool = True if restore_first == 1 else False
mask_face = True if mask_face == 1 else False
up_options = EnhancementOptions(do_restore_first=restore_first_bool, scale=scale, upscaler=get_upscaler(upscaler), upscale_visibility=upscale_visibility,face_restorer=get_face_restorer(face_restorer),restorer_visibility=restorer_visibility,codeformer_weight=codeformer_weight) up_options = EnhancementOptions(do_restore_first=restore_first_bool, scale=scale, upscaler=get_upscaler(upscaler), upscale_visibility=upscale_visibility,face_restorer=get_face_restorer(face_restorer),restorer_visibility=restorer_visibility,codeformer_weight=codeformer_weight)
use_model = get_full_model(model) use_model = get_full_model(model)
if use_model is None: if use_model is None:
Exception("Model not found") Exception("Model not found")
result = swap_face(s_image, t_image, use_model, sf_index, f_index, up_options, gender_s, gender_t, True, True, device) result = swap_face(s_image, t_image, use_model, sf_index, f_index, up_options, gender_s, gender_t, True, True, device, mask_face, select_source, face_model)
if save_to_file == 1: if save_to_file == 1:
if result_file_path == "": if result_file_path == "":
result_file_path = default_file_path() result_file_path = default_file_path()

View File

@ -28,10 +28,16 @@ except:
model_path = os.path.abspath("models") model_path = os.path.abspath("models")
from scripts.reactor_logger import logger from scripts.reactor_logger import logger
from scripts.reactor_swapper import EnhancementOptions, swap_face, check_process_halt, reset_messaged from scripts.reactor_swapper import (
EnhancementOptions,
swap_face,
check_process_halt,
reset_messaged,
build_face_model
)
from scripts.reactor_version import version_flag, app_title from scripts.reactor_version import version_flag, app_title
from scripts.console_log_patch import apply_logging_patch from scripts.console_log_patch import apply_logging_patch
from scripts.reactor_helpers import make_grid, get_image_path, set_Device from scripts.reactor_helpers import make_grid, get_image_path, set_Device, get_model_names, get_facemodels
from scripts.reactor_globals import DEVICE, DEVICE_LIST from scripts.reactor_globals import DEVICE, DEVICE_LIST
@ -61,12 +67,69 @@ class FaceSwapScript(scripts.Script):
def ui(self, is_img2img): def ui(self, is_img2img):
with gr.Accordion(f"{app_title}", open=False): with gr.Accordion(f"{app_title}", open=False):
def update_fm_list(selected: str):
return gr.Dropdown.update(
value=selected, choices=get_model_names(get_facemodels)
)
def update_upscalers_list(selected: str):
return gr.Dropdown.update(
value=selected, choices=[upscaler.name for upscaler in shared.sd_upscalers]
)
def update_models_list(selected: str):
return gr.Dropdown.update(
value=selected, choices=get_models()
)
# TAB MAIN
with gr.Tab("Main"): with gr.Tab("Main"):
with gr.Column(): with gr.Column():
img = gr.Image(type="pil") img = gr.Image(
type="pil",
label="Source Image",
)
# face_model = gr.File(
# file_types=[".safetensors"],
# label="Face Model",
# show_label=True,
# )
enable = gr.Checkbox(False, label="Enable", info=f"The Fast and Simple FaceSwap Extension - {version_flag}") enable = gr.Checkbox(False, label="Enable", info=f"The Fast and Simple FaceSwap Extension - {version_flag}")
save_original = gr.Checkbox(False, label="Save Original", info="Save the original image(s) made before swapping; If you use \"img2img\" - this option will affect with \"Swap in generated\" only") gr.Markdown("<br>")
mask_face = gr.Checkbox(False, label="Face Mask Correction", info="Apply this option if you see some pixelation around face contours") with gr.Row():
select_source = gr.Radio(
["Image","Face Model"],
value="Image",
label="Select Source",
type="index",
scale=1,
)
face_models = get_model_names(get_facemodels)
face_model = gr.Dropdown(
choices=face_models,
label="Choose Face Model",
value="None",
scale=2,
)
fm_update = gr.Button(
value="🔄",
variant="tool",
)
fm_update.click(
update_fm_list,
inputs=[face_model],
outputs=[face_model],
)
setattr(face_model, "do_not_save_to_config", True)
save_original = gr.Checkbox(
False,
label="Save Original",
info="Save the original image(s) made before swapping; If you use \"img2img\" - this option will affect with \"Swap in generated\" only"
)
mask_face = gr.Checkbox(
False,
label="Face Mask Correction",
info="Apply this option if you see some pixelation around face contours"
)
gr.Markdown("<br>") gr.Markdown("<br>")
gr.Markdown("Source Image (above):") gr.Markdown("Source Image (above):")
with gr.Row(): with gr.Row():
@ -120,18 +183,30 @@ class FaceSwapScript(scripts.Script):
True, True,
label="Swap in generated image", label="Swap in generated image",
visible=is_img2img, visible=is_img2img,
) )
# TAB UPSCALE
with gr.Tab("Upscale"): with gr.Tab("Upscale"):
restore_first = gr.Checkbox( restore_first = gr.Checkbox(
True, True,
label="1. Restore Face -> 2. Upscale (-Uncheck- if you want vice versa)", label="1. Restore Face -> 2. Upscale (-Uncheck- if you want vice versa)",
info="Postprocessing Order" info="Postprocessing Order"
) )
upscaler_name = gr.Dropdown( with gr.Row():
choices=[upscaler.name for upscaler in shared.sd_upscalers], upscaler_name = gr.Dropdown(
label="Upscaler", choices=[upscaler.name for upscaler in shared.sd_upscalers],
value="None", label="Upscaler",
info="Won't scale if you choose -Swap in Source- via img2img, only 1x-postprocessing will affect (texturing, denoising, restyling etc.)" value="None",
info="Won't scale if you choose -Swap in Source- via img2img, only 1x-postprocessing will affect (texturing, denoising, restyling etc.)"
)
upscalers_update = gr.Button(
value="🔄",
variant="tool",
)
upscalers_update.click(
update_upscalers_list,
inputs=[upscaler_name],
outputs=[upscaler_name],
) )
gr.Markdown("<br>") gr.Markdown("<br>")
with gr.Row(): with gr.Row():
@ -139,6 +214,30 @@ class FaceSwapScript(scripts.Script):
upscaler_visibility = gr.Slider( upscaler_visibility = gr.Slider(
0, 1, 1, step=0.1, label="Upscaler Visibility (if scale = 1)" 0, 1, 1, step=0.1, label="Upscaler Visibility (if scale = 1)"
) )
# TAB TOOLS
with gr.Tab("Tools 🆕"):
with gr.Tab("Face Models"):
gr.Markdown("Load an image containing one person, name it and click 'Build and Save'")
img_fm = gr.Image(
type="pil",
label="Load Image to build Face Model",
)
with gr.Row(equal_height=True):
fm_name = gr.Textbox(
value="",
placeholder="Please type any name (e.g. Elena)",
label="Face Model Name",
)
save_fm_btn = gr.Button("Build and Save")
save_fm = gr.Markdown("You can find saved models in 'models/reactor/faces'")
save_fm_btn.click(
build_face_model,
inputs=[img_fm, fm_name],
outputs=[save_fm],
)
# TAB SETTINGS
with gr.Tab("Settings"): with gr.Tab("Settings"):
models = get_models() models = get_models()
with gr.Row(visible=EP_is_visible): with gr.Row(visible=EP_is_visible):
@ -161,21 +260,30 @@ class FaceSwapScript(scripts.Script):
with gr.Row(): with gr.Row():
if len(models) == 0: if len(models) == 0:
logger.warning( logger.warning(
"You should at least have one model in models directory, please read the doc here : https://github.com/Gourieff/sd-webui-reactor/" "You should at least have one model in models directory, please read the doc here: https://github.com/Gourieff/sd-webui-reactor/"
) )
model = gr.Dropdown( model = gr.Dropdown(
choices=models, choices=models,
label="Model not found, please download one and reload WebUI", label="Model not found, please download one and refresh the list"
) )
else: else:
model = gr.Dropdown( model = gr.Dropdown(
choices=models, label="Model", value=models[0] choices=models, label="Model", value=models[0]
) )
models_update = gr.Button(
value="🔄",
variant="tool",
)
models_update.click(
update_models_list,
inputs=[model],
outputs=[model],
)
console_logging_level = gr.Radio( console_logging_level = gr.Radio(
["No log", "Minimum", "Default"], ["No log", "Minimum", "Default"],
value="Minimum", value="Minimum",
label="Console Log Level", label="Console Log Level",
type="index", type="index"
) )
gr.Markdown("<br>") gr.Markdown("<br>")
with gr.Row(): with gr.Row():
@ -189,6 +297,8 @@ class FaceSwapScript(scripts.Script):
label="Target Image Hash Check", label="Target Image Hash Check",
info="Affects if you use Extras tab or img2img with only 'Swap in source image' on." info="Affects if you use Extras tab or img2img with only 'Swap in source image' on."
) )
gr.Markdown("<span style='display:block;text-align:right;padding:3px;font-size:0.666em'>by Eugene Gourieff</span>")
return [ return [
img, img,
@ -213,6 +323,8 @@ class FaceSwapScript(scripts.Script):
target_hash_check, target_hash_check,
device, device,
mask_face, mask_face,
select_source,
face_model,
] ]
@ -267,10 +379,14 @@ class FaceSwapScript(scripts.Script):
target_hash_check, target_hash_check,
device, device,
mask_face, mask_face,
select_source,
face_model,
): ):
self.enable = enable self.enable = enable
if self.enable: if self.enable:
logger.debug("*** Start process")
reset_messaged() reset_messaged()
if check_process_halt(): if check_process_halt():
return return
@ -295,6 +411,8 @@ class FaceSwapScript(scripts.Script):
self.target_hash_check = target_hash_check self.target_hash_check = target_hash_check
self.device = device self.device = device
self.mask_face = mask_face self.mask_face = mask_face
self.select_source = select_source
self.face_model = face_model
if self.gender_source is None or self.gender_source == "No": if self.gender_source is None or self.gender_source == "No":
self.gender_source = 0 self.gender_source = 0
if self.gender_target is None or self.gender_target == "No": if self.gender_target is None or self.gender_target == "No":
@ -318,9 +436,11 @@ class FaceSwapScript(scripts.Script):
if self.mask_face is None: if self.mask_face is None:
self.mask_face = False self.mask_face = False
logger.debug("*** Set Device")
set_Device(self.device) set_Device(self.device)
if self.source is not None: if (self.source is not None and self.select_source == 0) or ((self.face_model is not None and self.face_model != "None") and self.select_source == 1):
logger.debug("*** Log patch")
apply_logging_patch(console_logging_level) apply_logging_patch(console_logging_level)
if isinstance(p, StableDiffusionProcessingImg2Img) and self.swap_in_source: if isinstance(p, StableDiffusionProcessingImg2Img) and self.swap_in_source:
logger.status("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index) logger.status("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index)
@ -341,6 +461,8 @@ class FaceSwapScript(scripts.Script):
target_hash_check=self.target_hash_check, target_hash_check=self.target_hash_check,
device=self.device, device=self.device,
mask_face=self.mask_face, mask_face=self.mask_face,
select_source=self.select_source,
face_model = self.face_model,
) )
p.init_images[i] = result p.init_images[i] = result
# result_path = get_image_path(p.init_images[i], p.outpath_samples, "", p.all_seeds[i], p.all_prompts[i], "txt", p=p, suffix="-swapped") # result_path = get_image_path(p.init_images[i], p.outpath_samples, "", p.all_seeds[i], p.all_prompts[i], "txt", p=p, suffix="-swapped")
@ -353,10 +475,13 @@ class FaceSwapScript(scripts.Script):
else: else:
logger.error("Please provide a source face") logger.error("Please provide a source face")
return
def postprocess(self, p: StableDiffusionProcessing, processed: Processed, *args): def postprocess(self, p: StableDiffusionProcessing, processed: Processed, *args):
if self.enable: if self.enable:
logger.debug("*** Check postprocess")
reset_messaged() reset_messaged()
if check_process_halt(): if check_process_halt():
return return
@ -373,42 +498,44 @@ class FaceSwapScript(scripts.Script):
if self.swap_in_generated: if self.swap_in_generated:
logger.status("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index) logger.status("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index)
if self.source is not None: # if self.source is not None:
for i,(img,info) in enumerate(zip(orig_images, orig_infotexts)): for i,(img,info) in enumerate(zip(orig_images, orig_infotexts)):
if check_process_halt(): if check_process_halt():
postprocess_run = False postprocess_run = False
break break
if len(orig_images) > 1: if len(orig_images) > 1:
logger.status("Swap in %s", i) logger.status("Swap in %s", i)
result, output, swapped = swap_face( result, output, swapped = swap_face(
self.source, self.source,
img, img,
source_faces_index=self.source_faces_index, source_faces_index=self.source_faces_index,
faces_index=self.faces_index, faces_index=self.faces_index,
model=self.model, model=self.model,
enhancement_options=self.enhancement_options, enhancement_options=self.enhancement_options,
gender_source=self.gender_source, gender_source=self.gender_source,
gender_target=self.gender_target, gender_target=self.gender_target,
source_hash_check=self.source_hash_check, source_hash_check=self.source_hash_check,
target_hash_check=self.target_hash_check, target_hash_check=self.target_hash_check,
device=self.device, device=self.device,
mask_face=self.mask_face, mask_face=self.mask_face,
) select_source=self.select_source,
if result is not None and swapped > 0: face_model = self.face_model,
result_images.append(result) )
suffix = "-swapped" if result is not None and swapped > 0:
try: result_images.append(result)
img_path = save_image(result, p.outpath_samples, "", p.all_seeds[0], p.all_prompts[0], "png",info=info, p=p, suffix=suffix) suffix = "-swapped"
except: try:
logger.error("Cannot save a result image - please, check SD WebUI Settings (Saving and Paths)") img_path = save_image(result, p.outpath_samples, "", p.all_seeds[0], p.all_prompts[0], "png",info=info, p=p, suffix=suffix)
elif result is None: except:
logger.error("Cannot create a result image") logger.error("Cannot save a result image - please, check SD WebUI Settings (Saving and Paths)")
elif result is None:
# if len(output) != 0: logger.error("Cannot create a result image")
# split_fullfn = os.path.splitext(img_path[0])
# fullfn = split_fullfn[0] + ".txt" # if len(output) != 0:
# with open(fullfn, 'w', encoding="utf8") as f: # split_fullfn = os.path.splitext(img_path[0])
# f.writelines(output) # fullfn = split_fullfn[0] + ".txt"
# with open(fullfn, 'w', encoding="utf8") as f:
# f.writelines(output)
if shared.opts.return_grid and len(result_images) > 2 and postprocess_run: if shared.opts.return_grid and len(result_images) > 2 and postprocess_run:
grid = make_grid(result_images) grid = make_grid(result_images)
@ -423,11 +550,14 @@ class FaceSwapScript(scripts.Script):
def postprocess_batch(self, p, *args, **kwargs): def postprocess_batch(self, p, *args, **kwargs):
if self.enable and not self.save_original: if self.enable and not self.save_original:
logger.debug("*** Check postprocess_batch")
images = kwargs["images"] images = kwargs["images"]
def postprocess_image(self, p, script_pp: scripts.PostprocessImageArgs, *args): def postprocess_image(self, p, script_pp: scripts.PostprocessImageArgs, *args):
if self.enable and self.swap_in_generated and not self.save_original: if self.enable and self.swap_in_generated and not self.save_original:
logger.debug("*** Check postprocess_image")
current_job_number = shared.state.job_no + 1 current_job_number = shared.state.job_no + 1
job_count = shared.state.job_count job_count = shared.state.job_count
if current_job_number == job_count: if current_job_number == job_count:
@ -435,36 +565,38 @@ class FaceSwapScript(scripts.Script):
if check_process_halt(): if check_process_halt():
return return
if self.source is not None: # if (self.source is not None and self.select_source == 0) or ((self.face_model is not None and self.face_model != "None") and self.select_source == 1):
logger.status("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index) logger.status("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index)
image: Image.Image = script_pp.image image: Image.Image = script_pp.image
result, output, swapped = swap_face( result, output, swapped = swap_face(
self.source, self.source,
image, image,
source_faces_index=self.source_faces_index, source_faces_index=self.source_faces_index,
faces_index=self.faces_index, faces_index=self.faces_index,
model=self.model, model=self.model,
enhancement_options=self.enhancement_options, enhancement_options=self.enhancement_options,
gender_source=self.gender_source, gender_source=self.gender_source,
gender_target=self.gender_target, gender_target=self.gender_target,
source_hash_check=self.source_hash_check, source_hash_check=self.source_hash_check,
target_hash_check=self.target_hash_check, target_hash_check=self.target_hash_check,
device=self.device, device=self.device,
mask_face=self.mask_face, mask_face=self.mask_face,
) select_source=self.select_source,
try: face_model = self.face_model,
pp = scripts_postprocessing.PostprocessedImage(result) )
pp.info = {} try:
p.extra_generation_params.update(pp.info) pp = scripts_postprocessing.PostprocessedImage(result)
script_pp.image = pp.image pp.info = {}
p.extra_generation_params.update(pp.info)
script_pp.image = pp.image
# if len(output) != 0: # if len(output) != 0:
# result_path = get_image_path(script_pp.image, p.outpath_samples, "", p.all_seeds[0], p.all_prompts[0], "txt", p=p, suffix="-swapped") # result_path = get_image_path(script_pp.image, p.outpath_samples, "", p.all_seeds[0], p.all_prompts[0], "txt", p=p, suffix="-swapped")
# if len(output) != 0: # if len(output) != 0:
# with open(result_path, 'w', encoding="utf8") as f: # with open(result_path, 'w', encoding="utf8") as f:
# f.writelines(output) # f.writelines(output)
except: except:
logger.error("Cannot create a result image") logger.error("Cannot create a result image")
class FaceSwapScriptExtras(scripts_postprocessing.ScriptPostprocessing): class FaceSwapScriptExtras(scripts_postprocessing.ScriptPostprocessing):
@ -473,11 +605,56 @@ class FaceSwapScriptExtras(scripts_postprocessing.ScriptPostprocessing):
def ui(self): def ui(self):
with gr.Accordion(f"{app_title}", open=False): with gr.Accordion(f"{app_title}", open=False):
def update_fm_list(selected: str):
return gr.Dropdown.update(
value=selected, choices=get_model_names(get_facemodels)
)
def update_upscalers_list(selected: str):
return gr.Dropdown.update(
value=selected, choices=[upscaler.name for upscaler in shared.sd_upscalers]
)
def update_models_list(selected: str):
return gr.Dropdown.update(
value=selected, choices=get_models()
)
# TAB MAIN
with gr.Tab("Main"): with gr.Tab("Main"):
with gr.Column(): with gr.Column():
img = gr.Image(type="pil") img = gr.Image(type="pil")
enable = gr.Checkbox(False, label="Enable", info=f"The Fast and Simple FaceSwap Extension - {version_flag}") enable = gr.Checkbox(False, label="Enable", info=f"The Fast and Simple FaceSwap Extension - {version_flag}")
mask_face = gr.Checkbox(False, label="Face Mask Correction", info="Apply this option if you see some pixelation around face contours") # gr.Markdown("<br>")
with gr.Row():
select_source = gr.Radio(
["Image","Face Model"],
value="Image",
label="Select Source",
type="index",
scale=1,
)
face_models = get_model_names(get_facemodels)
face_model = gr.Dropdown(
choices=face_models,
label="Choose Face Model",
value="None",
scale=2,
)
fm_update = gr.Button(
value="🔄",
variant="tool",
)
fm_update.click(
update_fm_list,
inputs=[face_model],
outputs=[face_model],
)
setattr(face_model, "do_not_save_to_config", True)
mask_face = gr.Checkbox(
False,
label="Face Mask Correction",
info="Apply this option if you see some pixelation around face contours"
)
gr.Markdown("Source Image (above):") gr.Markdown("Source Image (above):")
with gr.Row(): with gr.Row():
source_faces_index = gr.Textbox( source_faces_index = gr.Textbox(
@ -519,23 +696,58 @@ class FaceSwapScriptExtras(scripts_postprocessing.ScriptPostprocessing):
0, 1, 0.5, step=0.1, label="CodeFormer Weight", info="0 = maximum effect, 1 = minimum effect" 0, 1, 0.5, step=0.1, label="CodeFormer Weight", info="0 = maximum effect, 1 = minimum effect"
) )
# TAB UPSCALE
with gr.Tab("Upscale"): with gr.Tab("Upscale"):
restore_first = gr.Checkbox( restore_first = gr.Checkbox(
True, True,
label="1. Restore Face -> 2. Upscale (-Uncheck- if you want vice versa)", label="1. Restore Face -> 2. Upscale (-Uncheck- if you want vice versa)",
info="Postprocessing Order" info="Postprocessing Order"
) )
upscaler_name = gr.Dropdown( with gr.Row():
choices=[upscaler.name for upscaler in shared.sd_upscalers], upscaler_name = gr.Dropdown(
label="Upscaler", choices=[upscaler.name for upscaler in shared.sd_upscalers],
value="None", label="Upscaler",
info="Won't scale if you choose -Swap in Source- via img2img, only 1x-postprocessing will affect (texturing, denoising, restyling etc.)" value="None",
info="Won't scale if you choose -Swap in Source- via img2img, only 1x-postprocessing will affect (texturing, denoising, restyling etc.)"
)
upscalers_update = gr.Button(
value="🔄",
variant="tool",
)
upscalers_update.click(
update_upscalers_list,
inputs=[upscaler_name],
outputs=[upscaler_name],
) )
with gr.Row(): with gr.Row():
upscaler_scale = gr.Slider(1, 8, 1, step=0.1, label="Scale by") upscaler_scale = gr.Slider(1, 8, 1, step=0.1, label="Scale by")
upscaler_visibility = gr.Slider( upscaler_visibility = gr.Slider(
0, 1, 1, step=0.1, label="Upscaler Visibility (if scale = 1)" 0, 1, 1, step=0.1, label="Upscaler Visibility (if scale = 1)"
) )
# TAB TOOLS
with gr.Tab("Tools 🆕"):
with gr.Tab("Face Models"):
gr.Markdown("Load an image containing one person, name it and click 'Build and Save'")
img_fm = gr.Image(
type="pil",
label="Load Image to build Face Model",
)
with gr.Row(equal_height=True):
fm_name = gr.Textbox(
value="",
placeholder="Please type any name (e.g. Elena)",
label="Face Model Name",
)
save_fm_btn = gr.Button("Build and Save")
save_fm = gr.Markdown("You can find saved models in 'models/reactor/faces'")
save_fm_btn.click(
build_face_model,
inputs=[img_fm, fm_name],
outputs=[save_fm],
)
# TAB SETTINGS
with gr.Tab("Settings"): with gr.Tab("Settings"):
models = get_models() models = get_models()
with gr.Row(visible=EP_is_visible): with gr.Row(visible=EP_is_visible):
@ -558,22 +770,33 @@ class FaceSwapScriptExtras(scripts_postprocessing.ScriptPostprocessing):
with gr.Row(): with gr.Row():
if len(models) == 0: if len(models) == 0:
logger.warning( logger.warning(
"You should at least have one model in models directory, please read the doc here : https://github.com/Gourieff/sd-webui-reactor/" "You should at least have one model in models directory, please read the doc here: https://github.com/Gourieff/sd-webui-reactor/"
) )
model = gr.Dropdown( model = gr.Dropdown(
choices=models, choices=models,
label="Model not found, please download one and reload WebUI", label="Model not found, please download one and refresh the list",
) )
else: else:
model = gr.Dropdown( model = gr.Dropdown(
choices=models, label="Model", value=models[0] choices=models, label="Model", value=models[0]
) )
models_update = gr.Button(
value="🔄",
variant="tool",
)
models_update.click(
update_models_list,
inputs=[model],
outputs=[model],
)
console_logging_level = gr.Radio( console_logging_level = gr.Radio(
["No log", "Minimum", "Default"], ["No log", "Minimum", "Default"],
value="Minimum", value="Minimum",
label="Console Log Level", label="Console Log Level",
type="index", type="index",
) )
gr.Markdown("<span style='display:block;text-align:right;padding-right:3px;font-size:0.666em;margin: -9px 0'>by Eugene Gourieff</span>")
args = { args = {
'img': img, 'img': img,
@ -593,6 +816,8 @@ class FaceSwapScriptExtras(scripts_postprocessing.ScriptPostprocessing):
'codeformer_weight': codeformer_weight, 'codeformer_weight': codeformer_weight,
'device': device, 'device': device,
'mask_face': mask_face, 'mask_face': mask_face,
'select_source': select_source,
'face_model': face_model,
} }
return args return args
@ -643,6 +868,8 @@ class FaceSwapScriptExtras(scripts_postprocessing.ScriptPostprocessing):
self.codeformer_weight = args['codeformer_weight'] self.codeformer_weight = args['codeformer_weight']
self.device = args['device'] self.device = args['device']
self.mask_face = args['mask_face'] self.mask_face = args['mask_face']
self.select_source = args['select_source']
self.face_model = args['face_model']
if self.gender_source is None or self.gender_source == "No": if self.gender_source is None or self.gender_source == "No":
self.gender_source = 0 self.gender_source = 0
if self.gender_target is None or self.gender_target == "No": if self.gender_target is None or self.gender_target == "No":
@ -667,7 +894,7 @@ class FaceSwapScriptExtras(scripts_postprocessing.ScriptPostprocessing):
set_Device(self.device) set_Device(self.device)
if self.source is not None: if (self.source is not None and self.select_source == 0) or ((self.face_model is not None and self.face_model != "None") and self.select_source == 1):
apply_logging_patch(self.console_logging_level) apply_logging_patch(self.console_logging_level)
logger.status("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index) logger.status("Working: source face index %s, target face index %s", self.source_faces_index, self.faces_index)
image: Image.Image = pp.image image: Image.Image = pp.image
@ -684,6 +911,8 @@ class FaceSwapScriptExtras(scripts_postprocessing.ScriptPostprocessing):
target_hash_check=True, target_hash_check=True,
device=self.device, device=self.device,
mask_face=self.mask_face, mask_face=self.mask_face,
select_source=self.select_source,
face_model=self.face_model,
) )
try: try:
pp.info["ReActor"] = True pp.info["ReActor"] = True

View File

@ -1,10 +1,27 @@
import os import os
from pathlib import Path from pathlib import Path
try:
from modules.paths_internal import models_path
except:
try:
from modules.paths import models_path
except:
models_path = os.path.abspath("models")
IS_RUN: bool = False IS_RUN: bool = False
BASE_PATH = os.path.join(Path(__file__).parents[1]) BASE_PATH = os.path.join(Path(__file__).parents[1])
DEVICE_LIST: list = ["CPU", "CUDA"] DEVICE_LIST: list = ["CPU", "CUDA"]
MODELS_PATH = models_path
REACTOR_MODELS_PATH = os.path.join(models_path, "reactor")
FACE_MODELS_PATH = os.path.join(REACTOR_MODELS_PATH, "faces")
if not os.path.exists(REACTOR_MODELS_PATH):
os.makedirs(REACTOR_MODELS_PATH)
if not os.path.exists(FACE_MODELS_PATH):
os.makedirs(FACE_MODELS_PATH)
def updateDevice(): def updateDevice():
try: try:
LAST_DEVICE_PATH = os.path.join(BASE_PATH, "last_device.txt") LAST_DEVICE_PATH = os.path.join(BASE_PATH, "last_device.txt")

View File

@ -1,14 +1,17 @@
import os import os, glob
from collections import Counter from collections import Counter
from PIL import Image from PIL import Image
from math import isqrt, ceil from math import isqrt, ceil
from typing import List from typing import List
import logging import logging
import hashlib import hashlib
import torch
from safetensors.torch import save_file, safe_open
from insightface.app.common import Face
from modules.images import FilenameGenerator, get_next_sequence_number from modules.images import FilenameGenerator, get_next_sequence_number
from modules import shared, script_callbacks from modules import shared, script_callbacks
from scripts.reactor_globals import DEVICE, BASE_PATH from scripts.reactor_globals import DEVICE, BASE_PATH, FACE_MODELS_PATH
def set_Device(value): def set_Device(value):
global DEVICE global DEVICE
@ -133,3 +136,42 @@ def addLoggingLevel(levelName, levelNum, methodName=None):
def get_image_md5hash(image: Image.Image): def get_image_md5hash(image: Image.Image):
md5hash = hashlib.md5(image.tobytes()) md5hash = hashlib.md5(image.tobytes())
return md5hash.hexdigest() return md5hash.hexdigest()
def save_face_model(face: Face, filename: str) -> None:
try:
tensors = {
"bbox": torch.tensor(face["bbox"]),
"kps": torch.tensor(face["kps"]),
"det_score": torch.tensor(face["det_score"]),
"landmark_3d_68": torch.tensor(face["landmark_3d_68"]),
"pose": torch.tensor(face["pose"]),
"landmark_2d_106": torch.tensor(face["landmark_2d_106"]),
"embedding": torch.tensor(face["embedding"]),
"gender": torch.tensor(face["gender"]),
"age": torch.tensor(face["age"]),
}
save_file(tensors, filename)
# print(f"Face model has been saved to '{filename}'")
except Exception as e:
print(f"Error: {e}")
def load_face_model(filename: str):
face = {}
model_path = os.path.join(FACE_MODELS_PATH, filename)
with safe_open(model_path, framework="pt") as f:
for k in f.keys():
face[k] = f.get_tensor(k).numpy()
return Face(face)
def get_facemodels():
models_path = os.path.join(FACE_MODELS_PATH, "*")
models = glob.glob(models_path)
models = [x for x in models if x.endswith(".safetensors")]
return models
def get_model_names(get_models):
models = get_models()
names = ["None"]
for x in models:
names.append(os.path.basename(x))
return names

View File

@ -8,8 +8,12 @@ import numpy as np
from PIL import Image from PIL import Image
import insightface import insightface
from insightface.app.common import Face
from scripts.reactor_globals import FACE_MODELS_PATH
from scripts.reactor_helpers import get_image_md5hash, get_Device, save_face_model, load_face_model
from scripts.console_log_patch import apply_logging_patch
from scripts.reactor_helpers import get_image_md5hash, get_Device
from modules.face_restoration import FaceRestoration from modules.face_restoration import FaceRestoration
try: # A1111 try: # A1111
from modules import codeformer_model from modules import codeformer_model
@ -26,7 +30,7 @@ except:
try: try:
from modules.paths import models_path from modules.paths import models_path
except: except:
model_path = os.path.abspath("models") models_path = os.path.abspath("models")
import warnings import warnings
@ -78,10 +82,11 @@ def check_process_halt(msgforced: bool = False):
FS_MODEL = None FS_MODEL = None
ANALYSIS_MODEL = None
MASK_MODEL = None MASK_MODEL = None
CURRENT_FS_MODEL_PATH = None CURRENT_FS_MODEL_PATH = None
CURRENT_MASK_MODEL_PATH = None CURRENT_MASK_MODEL_PATH = None
ANALYSIS_MODEL = None
SOURCE_FACES = None SOURCE_FACES = None
SOURCE_IMAGE_HASH = None SOURCE_IMAGE_HASH = None
@ -108,8 +113,6 @@ def getFaceSwapModel(model_path: str):
return FS_MODEL return FS_MODEL
def restore_face(image: Image, enhancement_options: EnhancementOptions): def restore_face(image: Image, enhancement_options: EnhancementOptions):
result_image = image result_image = image
@ -173,6 +176,7 @@ def enhance_image(image: Image, enhancement_options: EnhancementOptions):
result_image = restore_face(result_image, enhancement_options) result_image = restore_face(result_image, enhancement_options)
return result_image return result_image
def enhance_image_and_mask(image: Image.Image, enhancement_options: EnhancementOptions,target_img_orig:Image.Image,entire_mask_image:Image.Image)->Image.Image: def enhance_image_and_mask(image: Image.Image, enhancement_options: EnhancementOptions,target_img_orig:Image.Image,entire_mask_image:Image.Image)->Image.Image:
result_image = image result_image = image
@ -309,6 +313,8 @@ def swap_face(
target_hash_check: bool = False, target_hash_check: bool = False,
device: str = "CPU", device: str = "CPU",
mask_face: bool = False, mask_face: bool = False,
select_source: int = 0,
face_model: str = "None",
): ):
global SOURCE_FACES, SOURCE_IMAGE_HASH, TARGET_FACES, TARGET_IMAGE_HASH, PROVIDERS global SOURCE_FACES, SOURCE_IMAGE_HASH, TARGET_FACES, TARGET_IMAGE_HASH, PROVIDERS
result_image = target_img result_image = target_img
@ -333,40 +339,56 @@ def swap_face(
source_img = Image.open(io.BytesIO(img_bytes)) source_img = Image.open(io.BytesIO(img_bytes))
source_img = cv2.cvtColor(np.array(source_img), cv2.COLOR_RGB2BGR)
target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR) target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)
target_img_orig = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR) target_img_orig = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)
entire_mask_image = np.zeros_like(np.array(target_img)) entire_mask_image = np.zeros_like(np.array(target_img))
output: List = [] output: List = []
output_info: str = "" output_info: str = ""
swapped = 0 swapped = 0
if source_hash_check: if select_source == 0 and source_img is not None:
source_img = cv2.cvtColor(np.array(source_img), cv2.COLOR_RGB2BGR)
source_image_md5hash = get_image_md5hash(source_img) if source_hash_check:
if SOURCE_IMAGE_HASH is None: source_image_md5hash = get_image_md5hash(source_img)
SOURCE_IMAGE_HASH = source_image_md5hash
source_image_same = False if SOURCE_IMAGE_HASH is None:
else:
source_image_same = True if SOURCE_IMAGE_HASH == source_image_md5hash else False
if not source_image_same:
SOURCE_IMAGE_HASH = source_image_md5hash SOURCE_IMAGE_HASH = source_image_md5hash
source_image_same = False
else:
source_image_same = True if SOURCE_IMAGE_HASH == source_image_md5hash else False
if not source_image_same:
SOURCE_IMAGE_HASH = source_image_md5hash
logger.info("Source Image MD5 Hash = %s", SOURCE_IMAGE_HASH) logger.info("Source Image MD5 Hash = %s", SOURCE_IMAGE_HASH)
logger.info("Source Image the Same? %s", source_image_same) logger.info("Source Image the Same? %s", source_image_same)
if SOURCE_FACES is None or not source_image_same: if SOURCE_FACES is None or not source_image_same:
logger.status("Analyzing Source Image...")
source_faces = analyze_faces(source_img)
SOURCE_FACES = source_faces
elif source_image_same:
logger.status("Using Hashed Source Face(s) Model...")
source_faces = SOURCE_FACES
else:
logger.status("Analyzing Source Image...") logger.status("Analyzing Source Image...")
source_faces = analyze_faces(source_img) source_faces = analyze_faces(source_img)
SOURCE_FACES = source_faces
elif source_image_same: elif select_source == 1 and (face_model is not None and face_model != "None"):
logger.status("Using Ready Source Face(s) Model...") source_face_model = [load_face_model(face_model)]
source_faces = SOURCE_FACES if source_face_model is not None:
source_faces_index = [0]
source_faces = source_face_model
logger.status("Using Loaded Source Face Model...")
else:
logger.error(f"Cannot load Face Model File: {face_model}.safetensors")
else: else:
logger.status("Analyzing Source Image...") logger.error("Cannot detect any Source")
source_faces = analyze_faces(source_img)
if source_faces is not None: if source_faces is not None:
@ -390,7 +412,7 @@ def swap_face(
target_faces = analyze_faces(target_img) target_faces = analyze_faces(target_img)
TARGET_FACES = target_faces TARGET_FACES = target_faces
elif target_image_same: elif target_image_same:
logger.status("Using Ready Target Face(s) Model...") logger.status("Using Hashed Target Face(s) Model...")
target_faces = TARGET_FACES target_faces = TARGET_FACES
else: else:
@ -398,7 +420,13 @@ def swap_face(
target_faces = analyze_faces(target_img) target_faces = analyze_faces(target_img)
logger.status("Detecting Source Face, Index = %s", source_faces_index[0]) logger.status("Detecting Source Face, Index = %s", source_faces_index[0])
source_face, wrong_gender, source_age, source_gender = get_face_single(source_img, source_faces, face_index=source_faces_index[0], gender_source=gender_source) if select_source == 0 and source_img is not None:
source_face, wrong_gender, source_age, source_gender = get_face_single(source_img, source_faces, face_index=source_faces_index[0], gender_source=gender_source)
else:
source_face = sorted(source_faces, key=lambda x: x.bbox[0])[source_faces_index[0]]
wrong_gender = 0
source_age = source_face["age"]
source_gender = "Female" if source_face["gender"] == 0 else "Male"
if source_age != "None" or source_gender != "None": if source_age != "None" or source_gender != "None":
logger.status("Detected: -%s- y.o. %s", source_age, source_gender) logger.status("Detected: -%s- y.o. %s", source_age, source_gender)
@ -491,3 +519,29 @@ def swap_face(
logger.status("No source face(s) found") logger.status("No source face(s) found")
return result_image, output, swapped return result_image, output, swapped
def build_face_model(image: Image.Image, name: str):
if image is None:
error_msg = "Please load an Image"
logger.error(error_msg)
return error_msg
if name is None:
error_msg = "Please filled out the 'Face Model Name' field"
logger.error(error_msg)
return error_msg
apply_logging_patch(1)
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
logger.status("Building Face Model...")
face_model = analyze_faces(image)[0]
if face_model is not None:
face_model_path = os.path.join(FACE_MODELS_PATH, name + ".safetensors")
save_face_model(face_model,face_model_path)
logger.status("--Done!--")
done_msg = f"Face model has been saved to '{face_model_path}'"
logger.status(done_msg)
return done_msg
else:
no_face_msg = "No face found, please try another image"
logger.error(no_face_msg)
return no_face_msg

View File

@ -1,5 +1,5 @@
app_title = "ReActor" app_title = "ReActor"
version_flag = "v0.5.1-b1" version_flag = "v0.5.1-b2"
from scripts.reactor_logger import logger, get_Run, set_Run from scripts.reactor_logger import logger, get_Run, set_Run