prototype
This commit is contained in:
parent
a0239a59d7
commit
b0d56aa249
43
README.md
Normal file
43
README.md
Normal file
@ -0,0 +1,43 @@
|
||||
# roop for StableDiffusion
|
||||
|
||||
This is an extension for StableDiffusion's [AUTOMATIC1111 web-ui](https://github.com/AUTOMATIC1111/stable-diffusion-webui/) that allows face-replacement in images. It is based on [roop](https://github.com/s0md3v/roop) but will be developed seperately.
|
||||
|
||||

|
||||
|
||||
### Disclaimer
|
||||
|
||||
This software is meant to be a productive contribution to the rapidly growing AI-generated media industry. It will help artists with tasks such as animating a custom character or using the character as a model for clothing etc.
|
||||
|
||||
The developers of this software are aware of its possible unethical applicaitons and are committed to take preventative measures against them. It has a built-in check which prevents the program from working on inappropriate media including but not limited to nudity, graphic content, sensitive material such as war footage etc. We will continue to develop this project in the positive direction while adhering to law and ethics. This project may be shut down or include watermarks on the output if requested by law.
|
||||
|
||||
Users of this software are expected to use this software responsibly while abiding the local law. If face of a real person is being used, users are suggested to get consent from the concerned person and clearly mention that it is a deepfake when posting content online. Developers of this software will not be responsible for actions of end-users.
|
||||
|
||||
## Installation
|
||||
|
||||
To install the extension, follow these steps:
|
||||
|
||||
+ In web-ui, go to the "Extensions" tab and use this URL `https://github.com/s0md3v/roop` in the "install from URL" tab.
|
||||
+ Download the "inswapper_128.onnx" model from [here](ttps://huggingface.co/henryruhs/roop/resolve/main/inswapper_128.onnx) and put it inside `<web-ui-dir>/extensions/roop/models` directory.
|
||||
|
||||
On Windows, Microsoft Visual C++ 14.0 or greater must be installed before installing the extension. [During the install, make sure to include the Python and C++ packages.](https://github.com/s0md3v/roop/issues/153)
|
||||
|
||||
## Usage
|
||||
|
||||
1. Under "roop" drop-down menu, import an image containing a face.
|
||||
2. Turn on the "Enable" checkbox
|
||||
3. That's it, now the generated result will have the face you selected
|
||||
|
||||
### The result face is blurry
|
||||
Use the "Restore Face" option. You can also try the "Upscaler" option or for more finer control, use an upscaler from the "Extras" tab.
|
||||
|
||||
### There are multiple faces in result
|
||||
Select the face numbers you wish to swap using the "Comma separated face number(s)" option.
|
||||
|
||||
### The result is totally black
|
||||
This means roop detected that your image is NSFW.
|
||||
|
||||
### Img2Img
|
||||
|
||||
You can choose to activate the swap on the source image or on the generated image, or on both using the checkboxes. Activating on source image allows you to start from a given base and apply the diffusion process to it.
|
||||
|
||||
Inpainting should work but only the masked part will be swapped.
|
||||
BIN
example/example.png
Normal file
BIN
example/example.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 683 KiB |
44
install.py
Normal file
44
install.py
Normal file
@ -0,0 +1,44 @@
|
||||
import launch
|
||||
import os
|
||||
import pkg_resources
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
req_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "requirements.txt")
|
||||
|
||||
import os
|
||||
|
||||
models_dir = os.path.abspath("models/roop")
|
||||
|
||||
if not os.path.exists(models_dir):
|
||||
os.makedirs(models_dir)
|
||||
print(f"roop : You can put the model in {models_dir} directory")
|
||||
|
||||
print("Check roop requirements")
|
||||
with open(req_file) as file:
|
||||
for package in file:
|
||||
try:
|
||||
python = sys.executable
|
||||
package = package.strip()
|
||||
|
||||
if not launch.is_installed(package):
|
||||
print(f"Install {package}")
|
||||
launch.run_pip(
|
||||
f"install {package}", f"sd-webui-roop requirement: {package}"
|
||||
)
|
||||
elif "==" in package:
|
||||
package_name, package_version = package.split("==")
|
||||
installed_version = pkg_resources.get_distribution(package_name).version
|
||||
if installed_version != package_version:
|
||||
print(
|
||||
f"Install {package}, {installed_version} vs {package_version}"
|
||||
)
|
||||
launch.run_pip(
|
||||
f"install {package}",
|
||||
f"sd-webui-roop requirement: changing {package_name} version from {installed_version} to {package_version}",
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
||||
print(f"Warning: Failed to install {package}, roop will not work.")
|
||||
raise e
|
||||
1
models/Put_the_model_here.txt
Normal file
1
models/Put_the_model_here.txt
Normal file
@ -0,0 +1 @@
|
||||
The model file required is "inswapper_128.onnx".Mirrors are given the roop project [installation guide](https://github.com/s0md3v/roop/wiki/1.-Installation).
|
||||
6
requirements.txt
Normal file
6
requirements.txt
Normal file
@ -0,0 +1,6 @@
|
||||
insightface==0.7.3
|
||||
onnx==1.14.0
|
||||
onnxruntime==1.15.0
|
||||
tensorflow==2.12.0
|
||||
opencv-python==4.7.0.72
|
||||
diffusers==0.17.1
|
||||
57
scripts/cimage.py
Normal file
57
scripts/cimage.py
Normal file
@ -0,0 +1,57 @@
|
||||
from typing import List, Union, Dict, Set, Tuple
|
||||
|
||||
from diffusers.pipelines.stable_diffusion.safety_checker import (
|
||||
StableDiffusionSafetyChecker,
|
||||
)
|
||||
from transformers import AutoFeatureExtractor
|
||||
import torch
|
||||
from PIL import Image, ImageFilter
|
||||
import numpy as np
|
||||
|
||||
safety_model_id: str = "CompVis/stable-diffusion-safety-checker"
|
||||
safety_feature_extractor: AutoFeatureExtractor = None
|
||||
safety_checker: StableDiffusionSafetyChecker = None
|
||||
|
||||
|
||||
def numpy_to_pil(images: np.ndarray) -> List[Image.Image]:
|
||||
if images.ndim == 3:
|
||||
images = images[None, ...]
|
||||
images = (images * 255).round().astype("uint8")
|
||||
pil_images = [Image.fromarray(image) for image in images]
|
||||
|
||||
return pil_images
|
||||
|
||||
|
||||
def check_image(x_image: np.ndarray) -> Tuple[np.ndarray, List[bool]]:
|
||||
global safety_feature_extractor, safety_checker
|
||||
|
||||
if safety_feature_extractor is None:
|
||||
safety_feature_extractor = AutoFeatureExtractor.from_pretrained(safety_model_id)
|
||||
safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id)
|
||||
|
||||
safety_checker_input = safety_feature_extractor(
|
||||
images=numpy_to_pil(x_image), return_tensors="pt"
|
||||
)
|
||||
x_checked_image, hs = safety_checker(
|
||||
images=x_image, clip_input=safety_checker_input.pixel_values
|
||||
)
|
||||
|
||||
return x_checked_image, hs
|
||||
|
||||
|
||||
def check_batch(x: torch.Tensor) -> torch.Tensor:
|
||||
x_samples_ddim_numpy = x.cpu().permute(0, 2, 3, 1).numpy()
|
||||
x_checked_image, _ = check_image(x_samples_ddim_numpy)
|
||||
x = torch.from_numpy(x_checked_image).permute(0, 3, 1, 2)
|
||||
return x
|
||||
|
||||
|
||||
def convert_to_sd(img: Image) -> Image:
|
||||
_, hs = check_image(np.array(img))
|
||||
if any(hs):
|
||||
img = (
|
||||
img.resize((int(img.width * 0.1), int(img.height * 0.1)))
|
||||
.resize(img.size, Image.BOX)
|
||||
.filter(ImageFilter.BLUR)
|
||||
)
|
||||
return img
|
||||
199
scripts/faceswap.py
Normal file
199
scripts/faceswap.py
Normal file
@ -0,0 +1,199 @@
|
||||
import gradio as gr
|
||||
import modules.scripts as scripts
|
||||
from modules.upscaler import Upscaler, UpscalerData
|
||||
from modules import scripts, shared, images, scripts_postprocessing
|
||||
from modules.processing import (
|
||||
StableDiffusionProcessing,
|
||||
StableDiffusionProcessingImg2Img,
|
||||
)
|
||||
from modules.shared import cmd_opts, opts, state
|
||||
from PIL import Image
|
||||
import glob
|
||||
from modules.face_restoration import FaceRestoration
|
||||
|
||||
from scripts.roop_logging import logger
|
||||
from scripts.swapper import UpscaleOptions, swap_face, ImageResult
|
||||
from scripts.cimage import check_batch
|
||||
from scripts.roop_version import version_flag
|
||||
import os
|
||||
|
||||
|
||||
def get_models():
|
||||
models_path = os.path.join(
|
||||
scripts.basedir(), "extensions/sd-webui-roop/models/*"
|
||||
)
|
||||
models = glob.glob(models_path)
|
||||
models_path = os.path.join(scripts.basedir(), "models/roop/*")
|
||||
models += glob.glob(models_path)
|
||||
models = [x for x in models if x.endswith(".onnx") or x.endswith(".pth")]
|
||||
return models
|
||||
|
||||
|
||||
class FaceSwapScript(scripts.Script):
|
||||
def title(self):
|
||||
return f"roop"
|
||||
|
||||
def show(self, is_img2img):
|
||||
return scripts.AlwaysVisible
|
||||
|
||||
def ui(self, is_img2img):
|
||||
with gr.Accordion(f"roop {version_flag}", open=False):
|
||||
with gr.Column():
|
||||
img = gr.inputs.Image(type="pil")
|
||||
enable = gr.Checkbox(False, placeholder="enable", label="Enable")
|
||||
faces_index = gr.Textbox(
|
||||
value="0",
|
||||
placeholder="Which face to swap (comma separated), start from 0",
|
||||
label="Comma separated face number(s)",
|
||||
)
|
||||
with gr.Row():
|
||||
face_restorer_name = gr.Radio(
|
||||
label="Restore Face",
|
||||
choices=["None"] + [x.name() for x in shared.face_restorers],
|
||||
value=shared.face_restorers[0].name(),
|
||||
type="value",
|
||||
)
|
||||
face_restorer_visibility = gr.Slider(
|
||||
0, 1, 1, step=0.1, label="Restore visibility"
|
||||
)
|
||||
upscaler_name = gr.inputs.Dropdown(
|
||||
choices=[upscaler.name for upscaler in shared.sd_upscalers],
|
||||
label="Upscaler",
|
||||
)
|
||||
upscaler_scale = gr.Slider(1, 8, 1, step=0.1, label="Upscaler scale")
|
||||
upscaler_visibility = gr.Slider(
|
||||
0, 1, 1, step=0.1, label="Upscaler visibility (if scale = 1)"
|
||||
)
|
||||
|
||||
models = get_models()
|
||||
if len(models) == 0:
|
||||
logger.warning(
|
||||
"You should at least have one model in models directory, please read the doc here : https://github.com/s0md3v/sd-webui-roop/"
|
||||
)
|
||||
model = gr.inputs.Dropdown(
|
||||
choices=models,
|
||||
label="Model not found, please download one and reload automatic 1111",
|
||||
)
|
||||
else:
|
||||
model = gr.inputs.Dropdown(
|
||||
choices=models, label="Model", default=models[0]
|
||||
)
|
||||
|
||||
swap_in_source = gr.Checkbox(
|
||||
False,
|
||||
placeholder="Swap face in source image",
|
||||
label="Swap in source image",
|
||||
visible=is_img2img,
|
||||
)
|
||||
swap_in_generated = gr.Checkbox(
|
||||
True,
|
||||
placeholder="Swap face in generated image",
|
||||
label="Swap in generated image",
|
||||
visible=is_img2img,
|
||||
)
|
||||
|
||||
return [
|
||||
img,
|
||||
enable,
|
||||
faces_index,
|
||||
model,
|
||||
face_restorer_name,
|
||||
face_restorer_visibility,
|
||||
upscaler_name,
|
||||
upscaler_scale,
|
||||
upscaler_visibility,
|
||||
swap_in_source,
|
||||
swap_in_generated,
|
||||
]
|
||||
|
||||
@property
|
||||
def upscaler(self) -> UpscalerData:
|
||||
for upscaler in shared.sd_upscalers:
|
||||
if upscaler.name == self.upscaler_name:
|
||||
return upscaler
|
||||
return None
|
||||
|
||||
@property
|
||||
def face_restorer(self) -> FaceRestoration:
|
||||
for face_restorer in shared.face_restorers:
|
||||
if face_restorer.name() == self.face_restorer_name:
|
||||
return face_restorer
|
||||
return None
|
||||
|
||||
@property
|
||||
def upscale_options(self) -> UpscaleOptions:
|
||||
return UpscaleOptions(
|
||||
scale=self.upscaler_scale,
|
||||
upscaler=self.upscaler,
|
||||
face_restorer=self.face_restorer,
|
||||
upscale_visibility=self.upscaler_visibility,
|
||||
restorer_visibility=self.face_restorer_visibility,
|
||||
)
|
||||
|
||||
def process(
|
||||
self,
|
||||
p: StableDiffusionProcessing,
|
||||
img,
|
||||
enable,
|
||||
faces_index,
|
||||
model,
|
||||
face_restorer_name,
|
||||
face_restorer_visibility,
|
||||
upscaler_name,
|
||||
upscaler_scale,
|
||||
upscaler_visibility,
|
||||
swap_in_source,
|
||||
swap_in_generated,
|
||||
):
|
||||
self.source = img
|
||||
self.face_restorer_name = face_restorer_name
|
||||
self.upscaler_scale = upscaler_scale
|
||||
self.upscaler_visibility = upscaler_visibility
|
||||
self.face_restorer_visibility = face_restorer_visibility
|
||||
self.enable = enable
|
||||
self.upscaler_name = upscaler_name
|
||||
self.swap_in_generated = swap_in_generated
|
||||
self.model = model
|
||||
self.faces_index = {
|
||||
int(x) for x in faces_index.strip(",").split(",") if x.isnumeric()
|
||||
}
|
||||
if len(self.faces_index) == 0:
|
||||
self.faces_index = {0}
|
||||
if self.enable:
|
||||
if self.source is not None:
|
||||
if isinstance(p, StableDiffusionProcessingImg2Img) and swap_in_source:
|
||||
logger.info(f"roop enabled, face index %s", self.faces_index)
|
||||
|
||||
for i in range(len(p.init_images)):
|
||||
logger.info(f"Swap in source %s", i)
|
||||
result = swap_face(
|
||||
self.source,
|
||||
p.init_images[i],
|
||||
faces_index=self.faces_index,
|
||||
model=self.model,
|
||||
upscale_options=self.upscale_options,
|
||||
)
|
||||
p.init_images[i] = result.image()
|
||||
else:
|
||||
logger.error(f"Please provide a source face")
|
||||
|
||||
def postprocess_batch(self, p, *args, **kwargs):
|
||||
if self.enable:
|
||||
images = kwargs["images"]
|
||||
images[:] = check_batch(images)[:]
|
||||
|
||||
def postprocess_image(self, p, script_pp: scripts.PostprocessImageArgs, *args):
|
||||
if self.enable and self.swap_in_generated:
|
||||
if self.source is not None:
|
||||
image: Image.Image = script_pp.image
|
||||
result: ImageResult = swap_face(
|
||||
self.source,
|
||||
image,
|
||||
faces_index=self.faces_index,
|
||||
model=self.model,
|
||||
upscale_options=self.upscale_options,
|
||||
)
|
||||
pp = scripts_postprocessing.PostprocessedImage(result.image())
|
||||
pp.info = {}
|
||||
p.extra_generation_params.update(pp.info)
|
||||
script_pp.image = pp.image
|
||||
41
scripts/roop_logging.py
Normal file
41
scripts/roop_logging.py
Normal file
@ -0,0 +1,41 @@
|
||||
import logging
|
||||
import copy
|
||||
import sys
|
||||
|
||||
from modules import shared
|
||||
|
||||
|
||||
class ColoredFormatter(logging.Formatter):
|
||||
COLORS = {
|
||||
"DEBUG": "\033[0;36m", # CYAN
|
||||
"INFO": "\033[0;32m", # GREEN
|
||||
"WARNING": "\033[0;33m", # YELLOW
|
||||
"ERROR": "\033[0;31m", # RED
|
||||
"CRITICAL": "\033[0;37;41m", # WHITE ON RED
|
||||
"RESET": "\033[0m", # RESET COLOR
|
||||
}
|
||||
|
||||
def format(self, record):
|
||||
colored_record = copy.copy(record)
|
||||
levelname = colored_record.levelname
|
||||
seq = self.COLORS.get(levelname, self.COLORS["RESET"])
|
||||
colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}"
|
||||
return super().format(colored_record)
|
||||
|
||||
|
||||
# Create a new logger
|
||||
logger = logging.getLogger("roop")
|
||||
logger.propagate = False
|
||||
|
||||
# Add handler if we don't have one.
|
||||
if not logger.handlers:
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
handler.setFormatter(
|
||||
ColoredFormatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
||||
)
|
||||
logger.addHandler(handler)
|
||||
|
||||
# Configure logger
|
||||
loglevel_string = getattr(shared.cmd_opts, "controlnet_loglevel", "INFO")
|
||||
loglevel = getattr(logging, loglevel_string.upper(), "info")
|
||||
logger.setLevel(loglevel)
|
||||
5
scripts/roop_version.py
Normal file
5
scripts/roop_version.py
Normal file
@ -0,0 +1,5 @@
|
||||
version_flag = "v0.0.1"
|
||||
|
||||
from scripts.roop_logging import logger
|
||||
|
||||
logger.info(f"roop {version_flag}")
|
||||
167
scripts/swapper.py
Normal file
167
scripts/swapper.py
Normal file
@ -0,0 +1,167 @@
|
||||
import copy
|
||||
import math
|
||||
import os
|
||||
import tempfile
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Union, Dict, Set, Tuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
import insightface
|
||||
import onnxruntime
|
||||
from scripts.cimage import convert_to_sd
|
||||
|
||||
from modules.face_restoration import FaceRestoration, restore_faces
|
||||
from modules.upscaler import Upscaler, UpscalerData
|
||||
from scripts.roop_logging import logger
|
||||
|
||||
providers = onnxruntime.get_available_providers()
|
||||
|
||||
|
||||
@dataclass
|
||||
class UpscaleOptions:
|
||||
scale: int = 1
|
||||
upscaler: UpscalerData = None
|
||||
upscale_visibility: float = 0.5
|
||||
face_restorer: FaceRestoration = None
|
||||
restorer_visibility: float = 0.5
|
||||
|
||||
|
||||
def save_image(img: Image, filename: str):
|
||||
convert_to_sd(img).save(filename)
|
||||
|
||||
|
||||
def cosine_distance(vector1: np.ndarray, vector2: np.ndarray) -> float:
|
||||
vec1 = vector1.flatten()
|
||||
vec2 = vector2.flatten()
|
||||
|
||||
dot_product = np.dot(vec1, vec2)
|
||||
norm1 = np.linalg.norm(vec1)
|
||||
norm2 = np.linalg.norm(vec2)
|
||||
|
||||
cosine_distance = 1 - (dot_product / (norm1 * norm2))
|
||||
return cosine_distance
|
||||
|
||||
|
||||
def cosine_similarity(test_vec: np.ndarray, source_vecs: List[np.ndarray]) -> float:
|
||||
cos_dist = sum(cosine_distance(test_vec, source_vec) for source_vec in source_vecs)
|
||||
average_cos_dist = cos_dist / len(source_vecs)
|
||||
return average_cos_dist
|
||||
|
||||
|
||||
ANALYSIS_MODEL = None
|
||||
|
||||
|
||||
def getAnalysisModel():
|
||||
global ANALYSIS_MODEL
|
||||
if ANALYSIS_MODEL is None:
|
||||
ANALYSIS_MODEL = insightface.app.FaceAnalysis(
|
||||
name="buffalo_l", providers=providers
|
||||
)
|
||||
return ANALYSIS_MODEL
|
||||
|
||||
|
||||
FS_MODEL = None
|
||||
CURRENT_FS_MODEL_PATH = None
|
||||
|
||||
|
||||
def getFaceSwapModel(model_path: str):
|
||||
global FS_MODEL
|
||||
global CURRENT_FS_MODEL_PATH
|
||||
if CURRENT_FS_MODEL_PATH is None or CURRENT_FS_MODEL_PATH != model_path:
|
||||
CURRENT_FS_MODEL_PATH = model_path
|
||||
FS_MODEL = insightface.model_zoo.get_model(model_path, providers=providers)
|
||||
|
||||
return FS_MODEL
|
||||
|
||||
|
||||
def upscale_image(image: Image, upscale_options: UpscaleOptions):
|
||||
result_image = image
|
||||
if upscale_options.upscaler is not None and upscale_options.upscaler.name != "None":
|
||||
original_image = result_image.copy()
|
||||
logger.info(
|
||||
"Upscale with %s scale = %s",
|
||||
upscale_options.upscaler.name,
|
||||
upscale_options.scale,
|
||||
)
|
||||
result_image = upscale_options.upscaler.scaler.upscale(
|
||||
image, upscale_options.scale, upscale_options.upscaler.data_path
|
||||
)
|
||||
if upscale_options.scale == 1:
|
||||
result_image = Image.blend(
|
||||
original_image, result_image, upscale_options.upscale_visibility
|
||||
)
|
||||
|
||||
if upscale_options.face_restorer is not None:
|
||||
original_image = result_image.copy()
|
||||
logger.info("Restore face with %s", upscale_options.face_restorer.name())
|
||||
numpy_image = np.array(result_image)
|
||||
numpy_image = upscale_options.face_restorer.restore(numpy_image)
|
||||
restored_image = Image.fromarray(numpy_image)
|
||||
result_image = Image.blend(
|
||||
original_image, restored_image, upscale_options.restorer_visibility
|
||||
)
|
||||
|
||||
return result_image
|
||||
|
||||
|
||||
def get_face_single(img_data: np.ndarray, face_index=0, det_size=(640, 640)):
|
||||
face_analyser = copy.deepcopy(getAnalysisModel())
|
||||
face_analyser.prepare(ctx_id=0, det_size=det_size)
|
||||
face = face_analyser.get(img_data)
|
||||
|
||||
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
||||
det_size_half = (det_size[0] // 2, det_size[1] // 2)
|
||||
return get_face_single(img_data, face_index=face_index, det_size=det_size_half)
|
||||
|
||||
try:
|
||||
return sorted(face, key=lambda x: x.bbox[0])[face_index]
|
||||
except IndexError:
|
||||
return None
|
||||
|
||||
|
||||
@dataclass
|
||||
class ImageResult:
|
||||
path: Union[str, None] = None
|
||||
similarity: Union[Dict[int, float], None] = None # face, 0..1
|
||||
|
||||
def image(self) -> Union[Image.Image, None]:
|
||||
if self.path:
|
||||
return Image.open(self.path)
|
||||
return None
|
||||
|
||||
|
||||
def swap_face(
|
||||
source_img: Image.Image,
|
||||
target_img: Image.Image,
|
||||
model: Union[str, None] = None,
|
||||
faces_index: Set[int] = {0},
|
||||
upscale_options: Union[UpscaleOptions, None] = None,
|
||||
) -> ImageResult:
|
||||
fn = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
|
||||
if model is not None:
|
||||
source_img = cv2.cvtColor(np.array(source_img), cv2.COLOR_RGB2BGR)
|
||||
target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)
|
||||
source_face = get_face_single(source_img, face_index=0)
|
||||
if source_face is not None:
|
||||
result = target_img
|
||||
model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), model)
|
||||
face_swapper = getFaceSwapModel(model_path)
|
||||
|
||||
for face_num in faces_index:
|
||||
target_face = get_face_single(target_img, face_index=face_num)
|
||||
if target_face is not None:
|
||||
result = face_swapper.get(result, target_face, source_face)
|
||||
else:
|
||||
logger.info(f"No target face found for {face_num}")
|
||||
|
||||
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
||||
if upscale_options is not None:
|
||||
result_image = upscale_image(result_image, upscale_options)
|
||||
|
||||
save_image(result_image, fn.name)
|
||||
else:
|
||||
logger.info("No source face found")
|
||||
return ImageResult(path=fn.name)
|
||||
Loading…
x
Reference in New Issue
Block a user