Fixes and updates
This commit is contained in:
parent
941b94cd0f
commit
abddfb0611
@ -26,4 +26,4 @@ def check_batch(x: torch.Tensor) -> torch.Tensor:
|
|||||||
|
|
||||||
|
|
||||||
def convert_to_sd(img: Image) -> Image:
|
def convert_to_sd(img: Image) -> Image:
|
||||||
return img
|
return img
|
||||||
@ -1,199 +1,158 @@
|
|||||||
import gradio as gr
|
import gradio as gr
|
||||||
import modules.scripts as scripts
|
import modules.scripts as scripts
|
||||||
from modules.upscaler import Upscaler, UpscalerData
|
from modules import scripts, shared, images, scripts_postprocessing
|
||||||
from modules import scripts, shared, images, scripts_postprocessing
|
from modules.processing import (
|
||||||
from modules.processing import (
|
StableDiffusionProcessing,
|
||||||
StableDiffusionProcessing,
|
StableDiffusionProcessingImg2Img,
|
||||||
StableDiffusionProcessingImg2Img,
|
)
|
||||||
)
|
from modules.shared import cmd_opts, opts, state
|
||||||
from modules.shared import cmd_opts, opts, state
|
from PIL import Image
|
||||||
from PIL import Image
|
import glob
|
||||||
import glob
|
from modules.face_restoration import FaceRestoration
|
||||||
from modules.face_restoration import FaceRestoration
|
|
||||||
|
from scripts.roop_logging import logger
|
||||||
from scripts.roop_logging import logger
|
from scripts.swapper import swap_face, ImageResult
|
||||||
from scripts.swapper import UpscaleOptions, swap_face, ImageResult
|
from scripts.cimage import check_batch
|
||||||
from scripts.cimage import check_batch
|
from scripts.roop_version import version_flag
|
||||||
from scripts.roop_version import version_flag
|
import os
|
||||||
import os
|
|
||||||
|
|
||||||
|
def get_models():
|
||||||
def get_models():
|
models_path = os.path.join(scripts.basedir(), "models/roop/*")
|
||||||
models_path = os.path.join(
|
models = glob.glob(models_path)
|
||||||
scripts.basedir(), "extensions/sd-webui-roop/models/*"
|
models = [x for x in models if x.endswith(".onnx") or x.endswith(".pth")]
|
||||||
)
|
return models
|
||||||
models = glob.glob(models_path)
|
|
||||||
models_path = os.path.join(scripts.basedir(), "models/roop/*")
|
|
||||||
models += glob.glob(models_path)
|
class FaceSwapScript(scripts.Script):
|
||||||
models = [x for x in models if x.endswith(".onnx") or x.endswith(".pth")]
|
def title(self):
|
||||||
return models
|
return f"nsfw-roop"
|
||||||
|
|
||||||
|
def show(self, is_img2img):
|
||||||
class FaceSwapScript(scripts.Script):
|
return scripts.AlwaysVisible
|
||||||
def title(self):
|
|
||||||
return f"roop"
|
def ui(self, is_img2img):
|
||||||
|
with gr.Accordion(f"nsfw-roop {version_flag}", open=False):
|
||||||
def show(self, is_img2img):
|
with gr.Column():
|
||||||
return scripts.AlwaysVisible
|
img = gr.inputs.Image(type="pil")
|
||||||
|
enable = gr.Checkbox(False, placeholder="enable", label="Enable")
|
||||||
def ui(self, is_img2img):
|
faces_index = gr.Textbox(
|
||||||
with gr.Accordion(f"roop {version_flag}", open=False):
|
value="0",
|
||||||
with gr.Column():
|
placeholder="Which face to swap (comma separated), start from 0",
|
||||||
img = gr.inputs.Image(type="pil")
|
label="Comma separated face number(s)",
|
||||||
enable = gr.Checkbox(False, placeholder="enable", label="Enable")
|
)
|
||||||
faces_index = gr.Textbox(
|
with gr.Row():
|
||||||
value="0",
|
face_restorer_name = gr.Radio(
|
||||||
placeholder="Which face to swap (comma separated), start from 0",
|
label="Restore Face",
|
||||||
label="Comma separated face number(s)",
|
choices=["None"] + [x.name() for x in shared.face_restorers],
|
||||||
)
|
value=shared.face_restorers[0].name(),
|
||||||
with gr.Row():
|
type="value",
|
||||||
face_restorer_name = gr.Radio(
|
)
|
||||||
label="Restore Face",
|
face_restorer_visibility = gr.Slider(
|
||||||
choices=["None"] + [x.name() for x in shared.face_restorers],
|
0, 1, 1, step=0.1, label="Restore visibility"
|
||||||
value=shared.face_restorers[0].name(),
|
)
|
||||||
type="value",
|
|
||||||
)
|
models = get_models()
|
||||||
face_restorer_visibility = gr.Slider(
|
if len(models) == 0:
|
||||||
0, 1, 1, step=0.1, label="Restore visibility"
|
logger.warning(
|
||||||
)
|
"You should at least have one model in models directory, please read the doc here : https://github.com/s0md3v/sd-webui-roop-nsfw/"
|
||||||
upscaler_name = gr.inputs.Dropdown(
|
)
|
||||||
choices=[upscaler.name for upscaler in shared.sd_upscalers],
|
model = gr.inputs.Dropdown(
|
||||||
label="Upscaler",
|
choices=models,
|
||||||
)
|
label="Model not found, please download one and reload automatic 1111",
|
||||||
upscaler_scale = gr.Slider(1, 8, 1, step=0.1, label="Upscaler scale")
|
)
|
||||||
upscaler_visibility = gr.Slider(
|
else:
|
||||||
0, 1, 1, step=0.1, label="Upscaler visibility (if scale = 1)"
|
model = gr.inputs.Dropdown(
|
||||||
)
|
choices=models, label="Model", default=models[0]
|
||||||
|
)
|
||||||
models = get_models()
|
|
||||||
if len(models) == 0:
|
swap_in_source = gr.Checkbox(
|
||||||
logger.warning(
|
False,
|
||||||
"You should at least have one model in models directory, please read the doc here : https://github.com/s0md3v/sd-webui-roop/"
|
placeholder="Swap face in source image",
|
||||||
)
|
label="Swap in source image",
|
||||||
model = gr.inputs.Dropdown(
|
visible=is_img2img,
|
||||||
choices=models,
|
)
|
||||||
label="Model not found, please download one and reload automatic 1111",
|
swap_in_generated = gr.Checkbox(
|
||||||
)
|
True,
|
||||||
else:
|
placeholder="Swap face in generated image",
|
||||||
model = gr.inputs.Dropdown(
|
label="Swap in generated image",
|
||||||
choices=models, label="Model", default=models[0]
|
visible=is_img2img,
|
||||||
)
|
)
|
||||||
|
|
||||||
swap_in_source = gr.Checkbox(
|
return [
|
||||||
False,
|
img,
|
||||||
placeholder="Swap face in source image",
|
enable,
|
||||||
label="Swap in source image",
|
faces_index,
|
||||||
visible=is_img2img,
|
model,
|
||||||
)
|
face_restorer_name,
|
||||||
swap_in_generated = gr.Checkbox(
|
face_restorer_visibility,
|
||||||
True,
|
swap_in_source,
|
||||||
placeholder="Swap face in generated image",
|
swap_in_generated,
|
||||||
label="Swap in generated image",
|
]
|
||||||
visible=is_img2img,
|
|
||||||
)
|
@property
|
||||||
|
def face_restorer(self) -> FaceRestoration:
|
||||||
return [
|
for face_restorer in shared.face_restorers:
|
||||||
img,
|
if face_restorer.name() == self.face_restorer_name:
|
||||||
enable,
|
return face_restorer
|
||||||
faces_index,
|
return None
|
||||||
model,
|
|
||||||
face_restorer_name,
|
def process(
|
||||||
face_restorer_visibility,
|
self,
|
||||||
upscaler_name,
|
p: StableDiffusionProcessing,
|
||||||
upscaler_scale,
|
img,
|
||||||
upscaler_visibility,
|
enable,
|
||||||
swap_in_source,
|
faces_index,
|
||||||
swap_in_generated,
|
model,
|
||||||
]
|
face_restorer_name,
|
||||||
|
face_restorer_visibility,
|
||||||
@property
|
swap_in_source,
|
||||||
def upscaler(self) -> UpscalerData:
|
swap_in_generated,
|
||||||
for upscaler in shared.sd_upscalers:
|
):
|
||||||
if upscaler.name == self.upscaler_name:
|
self.source = img
|
||||||
return upscaler
|
self.face_restorer_name = face_restorer_name
|
||||||
return None
|
self.face_restorer_visibility = face_restorer_visibility
|
||||||
|
self.enable = enable
|
||||||
@property
|
self.swap_in_generated = swap_in_generated
|
||||||
def face_restorer(self) -> FaceRestoration:
|
self.model = model
|
||||||
for face_restorer in shared.face_restorers:
|
self.faces_index = {
|
||||||
if face_restorer.name() == self.face_restorer_name:
|
int(x) for x in faces_index.strip(",").split(",") if x.isnumeric()
|
||||||
return face_restorer
|
}
|
||||||
return None
|
if len(self.faces_index) == 0:
|
||||||
|
self.faces_index = {0}
|
||||||
@property
|
if self.enable:
|
||||||
def upscale_options(self) -> UpscaleOptions:
|
if self.source is not None:
|
||||||
return UpscaleOptions(
|
if isinstance(p, StableDiffusionProcessingImg2Img) and swap_in_source:
|
||||||
scale=self.upscaler_scale,
|
logger.info(f"nsfw-roop enabled, face index %s", self.faces_index)
|
||||||
upscaler=self.upscaler,
|
|
||||||
face_restorer=self.face_restorer,
|
for i in range(len(p.init_images)):
|
||||||
upscale_visibility=self.upscaler_visibility,
|
logger.info(f"Swap in source %s", i)
|
||||||
restorer_visibility=self.face_restorer_visibility,
|
result = swap_face(
|
||||||
)
|
self.source,
|
||||||
|
p.init_images[i],
|
||||||
def process(
|
faces_index=self.faces_index,
|
||||||
self,
|
model=self.model,
|
||||||
p: StableDiffusionProcessing,
|
)
|
||||||
img,
|
p.init_images[i] = result.image()
|
||||||
enable,
|
else:
|
||||||
faces_index,
|
logger.error(f"Please provide a source face")
|
||||||
model,
|
|
||||||
face_restorer_name,
|
def postprocess_batch(self, p, *args, **kwargs):
|
||||||
face_restorer_visibility,
|
if self.enable:
|
||||||
upscaler_name,
|
images = kwargs["images"]
|
||||||
upscaler_scale,
|
images[:] = check_batch(images)[:]
|
||||||
upscaler_visibility,
|
|
||||||
swap_in_source,
|
def postprocess_image(self, p, script_pp: scripts.PostprocessImageArgs, *args):
|
||||||
swap_in_generated,
|
if self.enable and self.swap_in_generated:
|
||||||
):
|
if self.source is not None:
|
||||||
self.source = img
|
image: Image.Image = script_pp.image
|
||||||
self.face_restorer_name = face_restorer_name
|
result: ImageResult = swap_face(
|
||||||
self.upscaler_scale = upscaler_scale
|
self.source,
|
||||||
self.upscaler_visibility = upscaler_visibility
|
image,
|
||||||
self.face_restorer_visibility = face_restorer_visibility
|
faces_index=self.faces_index,
|
||||||
self.enable = enable
|
model=self.model,
|
||||||
self.upscaler_name = upscaler_name
|
)
|
||||||
self.swap_in_generated = swap_in_generated
|
pp = scripts_postprocessing.PostprocessedImage(result.image())
|
||||||
self.model = model
|
pp.info = {}
|
||||||
self.faces_index = {
|
p.extra_generation_params.update(pp.info)
|
||||||
int(x) for x in faces_index.strip(",").split(",") if x.isnumeric()
|
script_pp.image = pp.image
|
||||||
}
|
|
||||||
if len(self.faces_index) == 0:
|
|
||||||
self.faces_index = {0}
|
|
||||||
if self.enable:
|
|
||||||
if self.source is not None:
|
|
||||||
if isinstance(p, StableDiffusionProcessingImg2Img) and swap_in_source:
|
|
||||||
logger.info(f"roop enabled, face index %s", self.faces_index)
|
|
||||||
|
|
||||||
for i in range(len(p.init_images)):
|
|
||||||
logger.info(f"Swap in source %s", i)
|
|
||||||
result = swap_face(
|
|
||||||
self.source,
|
|
||||||
p.init_images[i],
|
|
||||||
faces_index=self.faces_index,
|
|
||||||
model=self.model,
|
|
||||||
upscale_options=self.upscale_options,
|
|
||||||
)
|
|
||||||
p.init_images[i] = result.image()
|
|
||||||
else:
|
|
||||||
logger.error(f"Please provide a source face")
|
|
||||||
|
|
||||||
def postprocess_batch(self, p, *args, **kwargs):
|
|
||||||
if self.enable:
|
|
||||||
images = kwargs["images"]
|
|
||||||
images[:] = check_batch(images)[:]
|
|
||||||
|
|
||||||
def postprocess_image(self, p, script_pp: scripts.PostprocessImageArgs, *args):
|
|
||||||
if self.enable and self.swap_in_generated:
|
|
||||||
if self.source is not None:
|
|
||||||
image: Image.Image = script_pp.image
|
|
||||||
result: ImageResult = swap_face(
|
|
||||||
self.source,
|
|
||||||
image,
|
|
||||||
faces_index=self.faces_index,
|
|
||||||
model=self.model,
|
|
||||||
upscale_options=self.upscale_options,
|
|
||||||
)
|
|
||||||
pp = scripts_postprocessing.PostprocessedImage(result.image())
|
|
||||||
pp.info = {}
|
|
||||||
p.extra_generation_params.update(pp.info)
|
|
||||||
script_pp.image = pp.image
|
|
||||||
|
|||||||
@ -1,5 +1,5 @@
|
|||||||
version_flag = "v0.0.1"
|
version_flag = "v0.0.2"
|
||||||
|
|
||||||
from scripts.roop_logging import logger
|
from scripts.roop_logging import logger
|
||||||
|
|
||||||
logger.info(f"roop {version_flag}")
|
logger.info(f"nswf-roop {version_flag}")
|
||||||
|
|||||||
@ -1,167 +1,124 @@
|
|||||||
import copy
|
import copy
|
||||||
import math
|
import math
|
||||||
import os
|
import os
|
||||||
import tempfile
|
import tempfile
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import List, Union, Dict, Set, Tuple
|
from typing import List, Union, Dict, Set, Tuple
|
||||||
|
|
||||||
import cv2
|
import cv2
|
||||||
import numpy as np
|
import numpy as np
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
import insightface
|
import insightface
|
||||||
import onnxruntime
|
import onnxruntime
|
||||||
from scripts.cimage import convert_to_sd
|
from scripts.cimage import convert_to_sd
|
||||||
|
|
||||||
from modules.face_restoration import FaceRestoration, restore_faces
|
from modules.face_restoration import FaceRestoration, restore_faces
|
||||||
from modules.upscaler import Upscaler, UpscalerData
|
from scripts.roop_logging import logger
|
||||||
from scripts.roop_logging import logger
|
|
||||||
|
providers = onnxruntime.get_available_providers()
|
||||||
providers = onnxruntime.get_available_providers()
|
|
||||||
|
|
||||||
|
def save_image(img: Image, filename: str):
|
||||||
@dataclass
|
convert_to_sd(img).save(filename)
|
||||||
class UpscaleOptions:
|
|
||||||
scale: int = 1
|
|
||||||
upscaler: UpscalerData = None
|
def cosine_distance(vector1: np.ndarray, vector2: np.ndarray) -> float:
|
||||||
upscale_visibility: float = 0.5
|
vec1 = vector1.flatten()
|
||||||
face_restorer: FaceRestoration = None
|
vec2 = vector2.flatten()
|
||||||
restorer_visibility: float = 0.5
|
|
||||||
|
dot_product = np.dot(vec1, vec2)
|
||||||
|
norm1 = np.linalg.norm(vec1)
|
||||||
def save_image(img: Image, filename: str):
|
norm2 = np.linalg.norm(vec2)
|
||||||
convert_to_sd(img).save(filename)
|
|
||||||
|
cosine_distance = 1 - (dot_product / (norm1 * norm2))
|
||||||
|
return cosine_distance
|
||||||
def cosine_distance(vector1: np.ndarray, vector2: np.ndarray) -> float:
|
|
||||||
vec1 = vector1.flatten()
|
|
||||||
vec2 = vector2.flatten()
|
def cosine_similarity(test_vec: np.ndarray, source_vecs: List[np.ndarray]) -> float:
|
||||||
|
cos_dist = sum(cosine_distance(test_vec, source_vec) for source_vec in source_vecs)
|
||||||
dot_product = np.dot(vec1, vec2)
|
average_cos_dist = cos_dist / len(source_vecs)
|
||||||
norm1 = np.linalg.norm(vec1)
|
return average_cos_dist
|
||||||
norm2 = np.linalg.norm(vec2)
|
|
||||||
|
|
||||||
cosine_distance = 1 - (dot_product / (norm1 * norm2))
|
ANALYSIS_MODEL = None
|
||||||
return cosine_distance
|
|
||||||
|
|
||||||
|
def getAnalysisModel():
|
||||||
def cosine_similarity(test_vec: np.ndarray, source_vecs: List[np.ndarray]) -> float:
|
global ANALYSIS_MODEL
|
||||||
cos_dist = sum(cosine_distance(test_vec, source_vec) for source_vec in source_vecs)
|
if ANALYSIS_MODEL is None:
|
||||||
average_cos_dist = cos_dist / len(source_vecs)
|
ANALYSIS_MODEL = insightface.app.FaceAnalysis(
|
||||||
return average_cos_dist
|
name="buffalo_l", providers=providers
|
||||||
|
)
|
||||||
|
return ANALYSIS_MODEL
|
||||||
ANALYSIS_MODEL = None
|
|
||||||
|
|
||||||
|
FS_MODEL = None
|
||||||
def getAnalysisModel():
|
CURRENT_FS_MODEL_PATH = None
|
||||||
global ANALYSIS_MODEL
|
|
||||||
if ANALYSIS_MODEL is None:
|
|
||||||
ANALYSIS_MODEL = insightface.app.FaceAnalysis(
|
def getFaceSwapModel(model_path: str):
|
||||||
name="buffalo_l", providers=providers
|
global FS_MODEL
|
||||||
)
|
global CURRENT_FS_MODEL_PATH
|
||||||
return ANALYSIS_MODEL
|
if CURRENT_FS_MODEL_PATH is None or CURRENT_FS_MODEL_PATH != model_path:
|
||||||
|
CURRENT_FS_MODEL_PATH = model_path
|
||||||
|
FS_MODEL = insightface.model_zoo.get_model(model_path, providers=providers)
|
||||||
FS_MODEL = None
|
|
||||||
CURRENT_FS_MODEL_PATH = None
|
return FS_MODEL
|
||||||
|
|
||||||
|
|
||||||
def getFaceSwapModel(model_path: str):
|
def get_face_single(img_data: np.ndarray, face_index=0, det_size=(640, 640)):
|
||||||
global FS_MODEL
|
face_analyser = copy.deepcopy(getAnalysisModel())
|
||||||
global CURRENT_FS_MODEL_PATH
|
face_analyser.prepare(ctx_id=0, det_size=det_size)
|
||||||
if CURRENT_FS_MODEL_PATH is None or CURRENT_FS_MODEL_PATH != model_path:
|
face = face_analyser.get(img_data)
|
||||||
CURRENT_FS_MODEL_PATH = model_path
|
|
||||||
FS_MODEL = insightface.model_zoo.get_model(model_path, providers=providers)
|
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
||||||
|
det_size_half = (det_size[0] // 2, det_size[1] // 2)
|
||||||
return FS_MODEL
|
return get_face_single(img_data, face_index=face_index, det_size=det_size_half)
|
||||||
|
|
||||||
|
try:
|
||||||
def upscale_image(image: Image, upscale_options: UpscaleOptions):
|
return sorted(face, key=lambda x: x.bbox[0])[face_index]
|
||||||
result_image = image
|
except IndexError:
|
||||||
if upscale_options.upscaler is not None and upscale_options.upscaler.name != "None":
|
return None
|
||||||
original_image = result_image.copy()
|
|
||||||
logger.info(
|
|
||||||
"Upscale with %s scale = %s",
|
@dataclass
|
||||||
upscale_options.upscaler.name,
|
class ImageResult:
|
||||||
upscale_options.scale,
|
path: Union[str, None] = None
|
||||||
)
|
similarity: Union[Dict[int, float], None] = None # face, 0..1
|
||||||
result_image = upscale_options.upscaler.scaler.upscale(
|
|
||||||
image, upscale_options.scale, upscale_options.upscaler.data_path
|
def image(self) -> Union[Image.Image, None]:
|
||||||
)
|
if self.path:
|
||||||
if upscale_options.scale == 1:
|
return Image.open(self.path)
|
||||||
result_image = Image.blend(
|
return None
|
||||||
original_image, result_image, upscale_options.upscale_visibility
|
|
||||||
)
|
|
||||||
|
def swap_face(
|
||||||
if upscale_options.face_restorer is not None:
|
source_img: Image.Image,
|
||||||
original_image = result_image.copy()
|
target_img: Image.Image,
|
||||||
logger.info("Restore face with %s", upscale_options.face_restorer.name())
|
model: Union[str, None] = None,
|
||||||
numpy_image = np.array(result_image)
|
faces_index: Set[int] = {0},
|
||||||
numpy_image = upscale_options.face_restorer.restore(numpy_image)
|
) -> ImageResult:
|
||||||
restored_image = Image.fromarray(numpy_image)
|
fn = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
|
||||||
result_image = Image.blend(
|
if model is not None:
|
||||||
original_image, restored_image, upscale_options.restorer_visibility
|
source_img = cv2.cvtColor(np.array(source_img), cv2.COLOR_RGB2BGR)
|
||||||
)
|
target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)
|
||||||
|
source_face = get_face_single(source_img, face_index=0)
|
||||||
return result_image
|
if source_face is not None:
|
||||||
|
result = target_img
|
||||||
|
model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), model)
|
||||||
def get_face_single(img_data: np.ndarray, face_index=0, det_size=(640, 640)):
|
face_swapper = getFaceSwapModel(model_path)
|
||||||
face_analyser = copy.deepcopy(getAnalysisModel())
|
|
||||||
face_analyser.prepare(ctx_id=0, det_size=det_size)
|
for face_num in faces_index:
|
||||||
face = face_analyser.get(img_data)
|
target_face = get_face_single(target_img, face_index=face_num)
|
||||||
|
if target_face is not None:
|
||||||
if len(face) == 0 and det_size[0] > 320 and det_size[1] > 320:
|
result = face_swapper.get(result, target_face, source_face)
|
||||||
det_size_half = (det_size[0] // 2, det_size[1] // 2)
|
else:
|
||||||
return get_face_single(img_data, face_index=face_index, det_size=det_size_half)
|
logger.info(f"No target face found for {face_num}")
|
||||||
|
|
||||||
try:
|
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
||||||
return sorted(face, key=lambda x: x.bbox[0])[face_index]
|
|
||||||
except IndexError:
|
save_image(result_image, fn.name)
|
||||||
return None
|
else:
|
||||||
|
logger.info("No source face found")
|
||||||
|
return ImageResult(path=fn.name)
|
||||||
@dataclass
|
|
||||||
class ImageResult:
|
|
||||||
path: Union[str, None] = None
|
|
||||||
similarity: Union[Dict[int, float], None] = None # face, 0..1
|
|
||||||
|
|
||||||
def image(self) -> Union[Image.Image, None]:
|
|
||||||
if self.path:
|
|
||||||
return Image.open(self.path)
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def swap_face(
|
|
||||||
source_img: Image.Image,
|
|
||||||
target_img: Image.Image,
|
|
||||||
model: Union[str, None] = None,
|
|
||||||
faces_index: Set[int] = {0},
|
|
||||||
upscale_options: Union[UpscaleOptions, None] = None,
|
|
||||||
) -> ImageResult:
|
|
||||||
fn = tempfile.NamedTemporaryFile(delete=False, suffix=".png")
|
|
||||||
if model is not None:
|
|
||||||
source_img = cv2.cvtColor(np.array(source_img), cv2.COLOR_RGB2BGR)
|
|
||||||
target_img = cv2.cvtColor(np.array(target_img), cv2.COLOR_RGB2BGR)
|
|
||||||
source_face = get_face_single(source_img, face_index=0)
|
|
||||||
if source_face is not None:
|
|
||||||
result = target_img
|
|
||||||
model_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), model)
|
|
||||||
face_swapper = getFaceSwapModel(model_path)
|
|
||||||
|
|
||||||
for face_num in faces_index:
|
|
||||||
target_face = get_face_single(target_img, face_index=face_num)
|
|
||||||
if target_face is not None:
|
|
||||||
result = face_swapper.get(result, target_face, source_face)
|
|
||||||
else:
|
|
||||||
logger.info(f"No target face found for {face_num}")
|
|
||||||
|
|
||||||
result_image = Image.fromarray(cv2.cvtColor(result, cv2.COLOR_BGR2RGB))
|
|
||||||
if upscale_options is not None:
|
|
||||||
result_image = upscale_image(result_image, upscale_options)
|
|
||||||
|
|
||||||
save_image(result_image, fn.name)
|
|
||||||
else:
|
|
||||||
logger.info("No source face found")
|
|
||||||
return ImageResult(path=fn.name)
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user