diff --git a/OpenCV Projects/AI Anime Avatar Generator Model/AnimeGANv2.py b/OpenCV Projects/AI Anime Avatar Generator Model/AnimeGANv2.py new file mode 100644 index 000000000..288b4b755 --- /dev/null +++ b/OpenCV Projects/AI Anime Avatar Generator Model/AnimeGANv2.py @@ -0,0 +1,315 @@ +from tools.ops import * +from tools.utils import * +from glob import glob +import time +import numpy as np +from net import generator +from net.discriminator import D_net +from tools.data_loader import ImageGenerator +from tools.vgg19 import Vgg19 + +class AnimeGANv2(object) : + def __init__(self, sess, args): + self.model_name = 'AnimeGANv2' + self.sess = sess + self.checkpoint_dir = args.checkpoint_dir + self.log_dir = args.log_dir + self.dataset_name = args.dataset + + self.epoch = args.epoch + self.init_epoch = args.init_epoch # args.epoch // 20 + + self.gan_type = args.gan_type + self.batch_size = args.batch_size + self.save_freq = args.save_freq + + self.init_lr = args.init_lr + self.d_lr = args.d_lr + self.g_lr = args.g_lr + + """ Weight """ + self.g_adv_weight = args.g_adv_weight + self.d_adv_weight = args.d_adv_weight + self.con_weight = args.con_weight + self.sty_weight = args.sty_weight + self.color_weight = args.color_weight + self.tv_weight = args.tv_weight + + self.training_rate = args.training_rate + self.ld = args.ld + + self.img_size = args.img_size + self.img_ch = args.img_ch + + """ Discriminator """ + self.n_dis = args.n_dis + self.ch = args.ch + self.sn = args.sn + + self.sample_dir = os.path.join(args.sample_dir, self.model_dir) + check_folder(self.sample_dir) + + self.real = tf.placeholder(tf.float32, [self.batch_size, self.img_size[0], self.img_size[1], self.img_ch], name='real_A') + self.anime = tf.placeholder(tf.float32, [self.batch_size, self.img_size[0], self.img_size[1], self.img_ch], name='anime_A') + self.anime_smooth = tf.placeholder(tf.float32, [self.batch_size, self.img_size[0], self.img_size[1], self.img_ch], name='anime_smooth_A') + self.test_real = tf.placeholder(tf.float32, [1, None, None, self.img_ch], name='test_input') + + self.anime_gray = tf.placeholder(tf.float32, [self.batch_size, self.img_size[0], self.img_size[1], self.img_ch],name='anime_B') + + + self.real_image_generator = ImageGenerator('./dataset/train_photo', self.img_size, self.batch_size) + self.anime_image_generator = ImageGenerator('./dataset/{}'.format(self.dataset_name + '/style'), self.img_size, self.batch_size) + self.anime_smooth_generator = ImageGenerator('./dataset/{}'.format(self.dataset_name + '/smooth'), self.img_size, self.batch_size) + self.dataset_num = max(self.real_image_generator.num_images, self.anime_image_generator.num_images) + + self.vgg = Vgg19() + + print() + print("##### Information #####") + print("# gan type : ", self.gan_type) + print("# dataset : ", self.dataset_name) + print("# max dataset number : ", self.dataset_num) + print("# batch_size : ", self.batch_size) + print("# epoch : ", self.epoch) + print("# init_epoch : ", self.init_epoch) + print("# training image size [H, W] : ", self.img_size) + print("# g_adv_weight,d_adv_weight,con_weight,sty_weight,color_weight,tv_weight : ", self.g_adv_weight,self.d_adv_weight,self.con_weight,self.sty_weight,self.color_weight,self.tv_weight) + print("# init_lr,g_lr,d_lr : ", self.init_lr,self.g_lr,self.d_lr) + print(f"# training_rate G -- D: {self.training_rate} : 1" ) + print() + + ################################################################################## + # Generator + ################################################################################## + + def generator(self, x_init, reuse=False, scope="generator"): + with tf.variable_scope(scope, reuse=reuse): + G = generator.G_net(x_init) + return G.fake + + ################################################################################## + # Discriminator + ################################################################################## + + def discriminator(self, x_init, reuse=False, scope="discriminator"): + D = D_net(x_init, self.ch, self.n_dis, self.sn, reuse=reuse, scope=scope) + return D + + ################################################################################## + # Model + ################################################################################## + def gradient_panalty(self, real, fake, scope="discriminator"): + if self.gan_type.__contains__('dragan') : + eps = tf.random_uniform(shape=tf.shape(real), minval=0., maxval=1.) + _, x_var = tf.nn.moments(real, axes=[0, 1, 2, 3]) + x_std = tf.sqrt(x_var) # magnitude of noise decides the size of local region + + fake = real + 0.5 * x_std * eps + + alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1], minval=0., maxval=1.) + interpolated = real + alpha * (fake - real) + + logit, _= self.discriminator(interpolated, reuse=True, scope=scope) + + grad = tf.gradients(logit, interpolated)[0] # gradient of D(interpolated) + grad_norm = tf.norm(flatten(grad), axis=1) # l2 norm + + GP = 0 + # WGAN - LP + if self.gan_type.__contains__('lp'): + GP = self.ld * tf.reduce_mean(tf.square(tf.maximum(0.0, grad_norm - 1.))) + + elif self.gan_type.__contains__('gp') or self.gan_type == 'dragan' : + GP = self.ld * tf.reduce_mean(tf.square(grad_norm - 1.)) + + return GP + + def build_model(self): + + """ Define Generator, Discriminator """ + self.generated = self.generator(self.real) + self.test_generated = self.generator(self.test_real, reuse=True) + + + anime_logit = self.discriminator(self.anime) + anime_gray_logit = self.discriminator(self.anime_gray, reuse=True) + + generated_logit = self.discriminator(self.generated, reuse=True) + smooth_logit = self.discriminator(self.anime_smooth, reuse=True) + + """ Define Loss """ + if self.gan_type.__contains__('gp') or self.gan_type.__contains__('lp') or self.gan_type.__contains__('dragan') : + GP = self.gradient_panalty(real=self.anime, fake=self.generated) + else : + GP = 0.0 + + # init pharse + init_c_loss = con_loss(self.vgg, self.real, self.generated) + init_loss = self.con_weight * init_c_loss + + self.init_loss = init_loss + + # gan + c_loss, s_loss = con_sty_loss(self.vgg, self.real, self.anime_gray, self.generated) + tv_loss = self.tv_weight * total_variation_loss(self.generated) + t_loss = self.con_weight * c_loss + self.sty_weight * s_loss + color_loss(self.real,self.generated) * self.color_weight + tv_loss + + g_loss = self.g_adv_weight * generator_loss(self.gan_type, generated_logit) + d_loss = self.d_adv_weight * discriminator_loss(self.gan_type, anime_logit, anime_gray_logit, generated_logit, smooth_logit) + GP + + self.Generator_loss = t_loss + g_loss + self.Discriminator_loss = d_loss + + """ Training """ + t_vars = tf.trainable_variables() + G_vars = [var for var in t_vars if 'generator' in var.name] + D_vars = [var for var in t_vars if 'discriminator' in var.name] + + self.init_optim = tf.train.AdamOptimizer(self.init_lr, beta1=0.5, beta2=0.999).minimize(self.init_loss, var_list=G_vars) + self.G_optim = tf.train.AdamOptimizer(self.g_lr , beta1=0.5, beta2=0.999).minimize(self.Generator_loss, var_list=G_vars) + self.D_optim = tf.train.AdamOptimizer(self.d_lr , beta1=0.5, beta2=0.999).minimize(self.Discriminator_loss, var_list=D_vars) + + """" Summary """ + self.G_loss = tf.summary.scalar("Generator_loss", self.Generator_loss) + self.D_loss = tf.summary.scalar("Discriminator_loss", self.Discriminator_loss) + + self.G_gan = tf.summary.scalar("G_gan", g_loss) + self.G_vgg = tf.summary.scalar("G_vgg", t_loss) + self.G_init_loss = tf.summary.scalar("G_init", init_loss) + + self.V_loss_merge = tf.summary.merge([self.G_init_loss]) + self.G_loss_merge = tf.summary.merge([self.G_loss, self.G_gan, self.G_vgg, self.G_init_loss]) + self.D_loss_merge = tf.summary.merge([self.D_loss]) + + def train(self): + # initialize all variables + self.sess.run(tf.global_variables_initializer()) + + # saver to save model + self.saver = tf.train.Saver(max_to_keep=self.epoch) + + # summary writer + self.writer = tf.summary.FileWriter(self.log_dir + '/' + self.model_dir, self.sess.graph) + + """ Input Image""" + real_img_op, anime_img_op, anime_smooth_op = self.real_image_generator.load_images(), self.anime_image_generator.load_images(), self.anime_smooth_generator.load_images() + + + # restore check-point if it exits + could_load, checkpoint_counter = self.load(self.checkpoint_dir) + if could_load: + start_epoch = checkpoint_counter + 1 + + print(" [*] Load SUCCESS") + else: + start_epoch = 0 + + print(" [!] Load failed...") + + # loop for epoch + init_mean_loss = [] + mean_loss = [] + # training times , G : D = self.training_rate : 1 + j = self.training_rate + for epoch in range(start_epoch, self.epoch): + for idx in range(int(self.dataset_num / self.batch_size)): + anime, anime_smooth, real = self.sess.run([anime_img_op, anime_smooth_op, real_img_op]) + train_feed_dict = { + self.real:real[0], + self.anime:anime[0], + self.anime_gray:anime[1], + self.anime_smooth:anime_smooth[1] + } + + if epoch < self.init_epoch : + # Init G + start_time = time.time() + + real_images, generator_images, _, v_loss, summary_str = self.sess.run([self.real, self.generated, + self.init_optim, + self.init_loss, self.V_loss_merge], feed_dict = train_feed_dict) + self.writer.add_summary(summary_str, epoch) + init_mean_loss.append(v_loss) + + print("Epoch: %3d Step: %5d / %5d time: %f s init_v_loss: %.8f mean_v_loss: %.8f" % (epoch, idx,int(self.dataset_num / self.batch_size), time.time() - start_time, v_loss, np.mean(init_mean_loss))) + if (idx+1)%200 ==0: + init_mean_loss.clear() + else : + start_time = time.time() + + if j == self.training_rate: + # Update D + _, d_loss, summary_str = self.sess.run([self.D_optim, self.Discriminator_loss, self.D_loss_merge], + feed_dict=train_feed_dict) + self.writer.add_summary(summary_str, epoch) + + # Update G + real_images, generator_images, _, g_loss, summary_str = self.sess.run([self.real, self.generated,self.G_optim, + self.Generator_loss, self.G_loss_merge], feed_dict = train_feed_dict) + self.writer.add_summary(summary_str, epoch) + + mean_loss.append([d_loss, g_loss]) + if j == self.training_rate: + + print( + "Epoch: %3d Step: %5d / %5d time: %f s d_loss: %.8f, g_loss: %.8f -- mean_d_loss: %.8f, mean_g_loss: %.8f" % ( + epoch, idx, int(self.dataset_num / self.batch_size), time.time() - start_time, d_loss, g_loss, np.mean(mean_loss, axis=0)[0], + np.mean(mean_loss, axis=0)[1])) + else: + print( + "Epoch: %3d Step: %5d / %5d time: %f s , g_loss: %.8f -- mean_g_loss: %.8f" % ( + epoch, idx, int(self.dataset_num / self.batch_size), time.time() - start_time, g_loss, np.mean(mean_loss, axis=0)[1])) + + if (idx + 1) % 200 == 0: + mean_loss.clear() + + j = j - 1 + if j < 1: + j = self.training_rate + + + if (epoch + 1) >= self.init_epoch and np.mod(epoch + 1, self.save_freq) == 0: + self.save(self.checkpoint_dir, epoch) + + if epoch >= self.init_epoch -1: + """ Result Image """ + val_files = glob('./dataset/{}/*.*'.format('val')) + save_path = './{}/{:03d}/'.format(self.sample_dir, epoch) + check_folder(save_path) + for i, sample_file in enumerate(val_files): + print('val: '+ str(i) + sample_file) + sample_image = np.asarray(load_test_data(sample_file, self.img_size)) + test_real,test_generated = self.sess.run([self.test_real,self.test_generated],feed_dict = {self.test_real:sample_image} ) + save_images(test_real, save_path+'{:03d}_a.jpg'.format(i), None) + save_images(test_generated, save_path+'{:03d}_b.jpg'.format(i), None) + + @property + def model_dir(self): + return "{}_{}_{}_{}_{}_{}_{}_{}_{}".format(self.model_name, self.dataset_name, + self.gan_type, + int(self.g_adv_weight), int(self.d_adv_weight), + int(self.con_weight), int(self.sty_weight), + int(self.color_weight), int(self.tv_weight)) + + + def save(self, checkpoint_dir, step): + checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir) + if not os.path.exists(checkpoint_dir): + os.makedirs(checkpoint_dir) + self.saver.save(self.sess, os.path.join(checkpoint_dir, self.model_name + '.model'), global_step=step) + + def load(self, checkpoint_dir): + print(" [*] Reading checkpoints...") + checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir) + + ckpt = tf.train.get_checkpoint_state(checkpoint_dir) # checkpoint file information + + if ckpt and ckpt.model_checkpoint_path: + ckpt_name = os.path.basename(ckpt.model_checkpoint_path) # first line + self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name)) + counter = int(ckpt_name.split('-')[-1]) + print(" [*] Success to read {}".format(os.path.join(checkpoint_dir, ckpt_name))) + return True, counter + else: + print(" [*] Failed to find a checkpoint") + return False, 0 diff --git a/OpenCV Projects/AI Anime Avatar Generator Model/LICENSE.md b/OpenCV Projects/AI Anime Avatar Generator Model/LICENSE.md new file mode 100644 index 000000000..8882e076d --- /dev/null +++ b/OpenCV Projects/AI Anime Avatar Generator Model/LICENSE.md @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 RAMESWAR BISOYI + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/OpenCV Projects/AI Anime Avatar Generator Model/README.md b/OpenCV Projects/AI Anime Avatar Generator Model/README.md new file mode 100644 index 000000000..823139a98 --- /dev/null +++ b/OpenCV Projects/AI Anime Avatar Generator Model/README.md @@ -0,0 +1,99 @@ +# πŸ€– AI Anime Avatar Generator using OpenCV and Matplotlib + +This repository contains a Python script to transform any given image into a smooth, anime-like avatar with soft edges, vibrant colors, and smooth shading transitions. The effect is inspired by traditional anime art styles. The project uses OpenCV and Matplotlib for image processing and display. + +## πŸ› οΈ Features +- Load an image from a file and display it in RGB format. +- Apply a smoother, more anime-like effect with soft edges and vibrant colors. +- Use Gaussian blurring, adaptive thresholding, and bilateral filtering to create the effect. +- Boost saturation and brightness to mimic anime-style vibrancy. +- Display the original image and the cartoonized image side by side. + +## 🎯 Prerequisites + +Make sure you have Python 3.x installed. You'll also need the following Python libraries: + +```bash +pip install numpy opencv-python matplotlib +``` +## πŸš€ How to Use +- Clone the repository: + +```bash +git clone https://github.com/yourusername/anime-avatar-generator.git +cd anime-avatar-generator +``` +- Replace the image path in the generate_anime_avatar() + - function with the path to your image file. + + +- Run the Python script: +```bash +python anime_avatar_generator.py +``` +Your anime-like avatar will be displayed side-by-side with the original image. + +## πŸ“ Code Overview +- This project includes the following key functions: + +#### Load Image: +This function loads an image from a file and converts it into the RGB format that is compatible with the OpenCV and Matplotlib libraries. The image is then passed on to the next function for further processing. + +#### Smooth Anime Effect: +This function is the core of the anime avatar transformation. It applies several filters and transformations, including Gaussian blurring for smoother shading, adaptive thresholding for soft edges, and bilateral filtering for enhancing the vibrancy of the colors. These techniques create the anime-like style that combines soft shading transitions and vibrant hues. + +#### Display Images: +The final step involves displaying the original image and the cartoonized (anime-like) avatar side by side using Matplotlib. This helps in visually comparing the original with the modified version. + +## πŸ“Έ Example +- Here’s an example of how an input image would be transformed into an anime-like avatar. + +#### Input Image: +
+ +#### Output Image: +
+ +## πŸ€– Technology Stack +- This project is built using the following technologies: + +##### Python: +The primary programming language for this project, known for its simplicity and a wide range of image processing libraries. +##### OpenCV: +A powerful open-source computer vision library that provides easy-to-use tools for image transformations and filters. +##### NumPy: +A fundamental library for scientific computing in Python, used for handling image arrays and numerical operations. +Matplotlib: A popular Python library for visualizing data. Here, it is used to display the images before and after processing. +## πŸ’‘ Inspiration +- The concept of cartoonizing or applying anime-like effects to real-life images has been inspired by the popularity of anime and animated media across the world. People enjoy seeing themselves or their friends as animated characters, and with modern image processing libraries, it's possible to recreate similar effects programmatically. + +## 🧩 Possible Future Enhancements +###### Here are some ideas for extending the current functionality of the anime avatar generator: + + - Add support for multiple cartoon styles (e.g., Western comic style, watercolor style). + - Implement a web interface using Flask or Streamlit for easy image uploads and processing. + - Add more filters and effects for customization (e.g., background color changes, overlay effects). + - Export the final anime avatar as a PNG or SVG file, making it easier to use on websites, profiles, or as avatars on social media. + - Integrate the project with popular social media platforms for instant sharing of generated avatars. +## ⚑ Performance Optimizations +- Processing high-resolution images may take longer and consume significant memory. To optimize performance: + +- Use multithreading to speed up the filtering process. +- Implement image resizing before processing to reduce the workload on your system. +- Consider implementing GPU acceleration with libraries like CuPy or CUDA. +## 🧰 Troubleshooting + +- Common Issues: +No Module Named β€˜cv2’ Error: + +- If you encounter this error, it means the OpenCV library is not installed correctly. Make sure to install it using pip install opencv-python. +Image Not Displaying Properly: + +- Ensure the image path is correct, and the image file is in a supported format (e.g., PNG, JPG). +- Slow Processing: +Large images may take longer to process due to the multiple filters being applied. You can try reducing the image resolution before applying the anime effect. + +## πŸ“œ License + +- This project is licensed under the MIT License. You are free to use, modify, and distribute this code as long as you include a copy of the license with your project. For more details, see the LICENSE file. + diff --git a/OpenCV Projects/AI Anime Avatar Generator Model/Shinkai_53.pb b/OpenCV Projects/AI Anime Avatar Generator Model/Shinkai_53.pb new file mode 100644 index 000000000..6e4498e31 Binary files /dev/null and b/OpenCV Projects/AI Anime Avatar Generator Model/Shinkai_53.pb differ diff --git a/OpenCV Projects/AI Anime Avatar Generator Model/animegan2pb.py b/OpenCV Projects/AI Anime Avatar Generator Model/animegan2pb.py new file mode 100644 index 000000000..7628a17e8 --- /dev/null +++ b/OpenCV Projects/AI Anime Avatar Generator Model/animegan2pb.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- +# @Time : 2021/8/31 18:51 +# @Author : Xin Chen +# @File : animegan2pb.py +# @Software: PyCharm + +import os +import tensorflow as tf +from tensorflow.python.framework import graph_util + +def freeze_graph(model_folder, output_graph): + ''' + :param input_checkpoint: + :param output_graph: PB save dir + :return: + ''' + checkpoint = tf.train.get_checkpoint_state(model_folder) + print(checkpoint) + input_checkpoint = checkpoint.model_checkpoint_path + + # input node and output node from the network ( AnimeGANv2 generator) + # input_op = 'generator_input:0' + # output_op = 'generator/G_MODEL/out_layer/Tanh:0' + + output_node_names = "generator/G_MODEL/out_layer/Tanh" + saver = tf.train.import_meta_graph(input_checkpoint + '.meta', clear_devices=True) + graph = tf.get_default_graph() + input_graph_def = graph.as_graph_def() + + with tf.Session() as sess: + saver.restore(sess, input_checkpoint) + output_graph_def = graph_util.convert_variables_to_constants( + sess=sess, + input_graph_def=input_graph_def, # :sess.graph_def + output_node_names=output_node_names.split(",")) + + with tf.gfile.GFile(output_graph, "wb") as f: + f.write(output_graph_def.SerializeToString()) + print("%d ops in the final graph." % len(output_graph_def.node)) + + for op in graph.get_operations(): + print(op.name, op.values()) + +if __name__ == '__main__': + model_folder = "/media/ada/0009B35A000DC852/a2/checkpoint/generator_Shinkai_weight" + pb_save_path = "Shinkai_53.pb" + freeze_graph(model_folder, pb_save_path) + + + """ pb model 2 onnx command""" + cmd = "python -m tf2onnx.convert --input Shinkai_53.pb --inputs generator_input:0 --outputs generator/G_MODEL/out_layer/Tanh:0 --output Shinkai_53.onnx" + res = os.system(cmd) + print(res) diff --git a/OpenCV Projects/AI Anime Avatar Generator Model/elon intro.jpg b/OpenCV Projects/AI Anime Avatar Generator Model/elon intro.jpg new file mode 100644 index 000000000..3c4626a75 Binary files /dev/null and b/OpenCV Projects/AI Anime Avatar Generator Model/elon intro.jpg differ diff --git a/OpenCV Projects/AI Anime Avatar Generator Model/model.py b/OpenCV Projects/AI Anime Avatar Generator Model/model.py new file mode 100644 index 000000000..4c607349a --- /dev/null +++ b/OpenCV Projects/AI Anime Avatar Generator Model/model.py @@ -0,0 +1,97 @@ +import cv2 +import numpy as np +import matplotlib.pyplot as plt + +# Step 1: Load and Display the Image +def load_image(image_path): + """ + Load an image from file and convert it to RGB format. + + Parameters: + image_path (str): Path to the image file. + + Returns: + np.array: Loaded image in RGB format. + """ + image = cv2.imread(image_path) + return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + +# Step 2: Smooth and Enhance Cartoon Effect +def smooth_anime_effect(image): + """ + Apply a smoother, more anime-like effect with soft edges, vibrant colors, + and smooth shading transitions. + + Parameters: + image (np.array): Input image in RGB format. + + Returns: + np.array: Smoothed anime-like cartoon image. + """ + # Convert to grayscale + gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) + + # Use a larger blur to smooth the gray image + gray_blur = cv2.GaussianBlur(gray, (9, 9), 0) + + # Detect edges using adaptive thresholding for softer edges + edges = cv2.adaptiveThreshold( + gray_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 9, 2 + ) + + # Create a smooth, vibrant version of the original image + smooth_color = cv2.bilateralFilter(image, d=10, sigmaColor=300, sigmaSpace=300) + + # Boost contrast and vibrancy to mimic the anime style + hsv = cv2.cvtColor(smooth_color, cv2.COLOR_RGB2HSV) + hsv[..., 1] = np.clip(hsv[..., 1] * 1.4, 0, 255) # Increase saturation + hsv[..., 2] = np.clip(hsv[..., 2] * 1.1, 0, 255) # Slight brightness boost + vibrant_image = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB) + + # Combine the vibrant color image with softened edges + cartoon = cv2.bitwise_and(vibrant_image, vibrant_image, mask=edges) + + return cartoon + +# Step 3: Display Original and Smoothed Cartoon Images Side by Side +def display_images(original, cartoon): + """ + Display the original and anime-like cartoon images side by side. + + Parameters: + original (np.array): Original image. + cartoon (np.array): Cartoonized image. + """ + fig, axes = plt.subplots(1, 2, figsize=(12, 6)) + + # Display Original Image + axes[0].imshow(original) + axes[0].set_title('Original Image') + axes[0].axis('off') + + # Display Anime-Like Cartoon Image + axes[1].imshow(cartoon) + axes[1].set_title('AI Anime Avatar Image') + axes[1].axis('off') + + plt.show() + +# Step 4: Generate the Anime Avatar +def generate_anime_avatar(image_path): + """ + Generate a smooth anime-like avatar using soft edges and vibrant colors. + + Parameters: + image_path (str): Path to the input image file. + """ + # Load the original image + original_image = load_image(image_path) + + # Apply smooth anime effect + cartoon_image = smooth_anime_effect(original_image) + + # Display the original and anime-like cartoon images + display_images(original_image, cartoon_image) + +# Usage (replace with your image path) +generate_anime_avatar("elon intro.jpg") diff --git a/OpenCV Projects/AI Anime Avatar Generator Model/output_elon.png b/OpenCV Projects/AI Anime Avatar Generator Model/output_elon.png new file mode 100644 index 000000000..2d80bb6ee Binary files /dev/null and b/OpenCV Projects/AI Anime Avatar Generator Model/output_elon.png differ diff --git a/OpenCV Projects/AI Anime Avatar Generator Model/requirements.txt b/OpenCV Projects/AI Anime Avatar Generator Model/requirements.txt new file mode 100644 index 000000000..02ffcc1b6 --- /dev/null +++ b/OpenCV Projects/AI Anime Avatar Generator Model/requirements.txt @@ -0,0 +1,3 @@ +opencv-python==4.8.0.76 +matplotlib==3.7.2 +numpy==1.24.4