不要谈什么天分,运气,
你需要的是一个截稿日,
以及一个不交稿就能打爆你狗头的人,
然后你就会被自己的才华吓到。 -------查理·布洛克
reinhard算法:Color Transfer between Images,作者Erik Reinhard
welsh算法:Transferring Color to Greyscale Images,作者Tomihisa Welsh
人像图换肤色,风景图颜色迁移
def color_trans_reinhard(in_img, ref_img, in_mask_lists&#61;[None], ref_mask_lists&#61;[None]):ref_img_lab &#61; cv2.cvtColor(ref_img, cv2.COLOR_BGR2LAB)in_img_lab &#61; cv2.cvtColor(in_img, cv2.COLOR_BGR2LAB)in_avg &#61; np.ones(in_img.shape, np.float32)in_std &#61; np.ones(in_img.shape, np.float32)ref_avg &#61; np.ones(in_img.shape, np.float32)ref_std &#61; np.ones(in_img.shape, np.float32)mask_all &#61; np.zeros(in_img.shape, np.float32)for in_mask, ref_mask in zip(in_mask_lists, ref_mask_lists):#mask,取值为 0, 255, shape[height,width]in_avg_tmp, in_std_tmp &#61; cv2.meanStdDev(in_img_lab, mask&#61;in_mask)np.copyto(in_avg, in_avg_tmp.reshape(1,1,-1), where&#61;np.expand_dims(in_mask,2)!&#61;0) #numpy.copyto(destination, source)np.copyto(in_std, in_std_tmp.reshape(1,1,-1), where&#61;np.expand_dims(in_mask,2)!&#61;0) ref_avg_tmp, ref_std_tmp &#61; cv2.meanStdDev(ref_img_lab, mask&#61;ref_mask)np.copyto(ref_avg, ref_avg_tmp.reshape(1,1,-1), where&#61;np.expand_dims(in_mask,2)!&#61;0) #numpy.copyto(destination, source)np.copyto(ref_std, ref_std_tmp.reshape(1,1,-1), where&#61;np.expand_dims(in_mask,2)!&#61;0) #maskmask_all[in_mask!&#61;0] &#61; 1in_std[in_std&#61;&#61;0] &#61;1 #避免除数为0的情况transfered_lab &#61; (in_img_lab - in_avg)/(in_std) *ref_std &#43; ref_avg transfered_lab[transfered_lab<0] &#61; 0transfered_lab[transfered_lab>255] &#61; 255out_img &#61; cv2.cvtColor(transfered_lab.astype(np.uint8), cv2.COLOR_LAB2BGR)if in_mask_lists[0] is not None and ref_mask_lists[0] is not None:np.copyto(out_img, in_img, where&#61;mask_all&#61;&#61;0) return out_img"""
#img1 &#61; cv2.imread("imgs/1.png")
#img2 &#61; cv2.imread("imgs/2.png")
#img1 &#61; cv2.imread("welsh22/1.png", 1)
#img2 &#61; cv2.imread("welsh22/2.png", 1)
img1 &#61; cv2.imread("welsh22/gray.jpg", 1)
img2 &#61; cv2.imread("welsh22/consult.jpg", 1)
cv2.imwrite("out.jpg", color_trans_reinhard(img1, img2, [np.ones(img1.shape[:-1],np.uint8)*255], [np.ones(img2.shape[:-1],np.uint8)*255]))"""
img1 &#61; cv2.imread("ab.jpeg?s=#34;)
img2 &#61; cv2.imread("hsy.jpeg?s=#34;)
mask1 &#61; cv2.imread("ab_parsing.jpg", 0)
mask1[mask1<128]&#61;0
mask1[mask1>&#61;128]&#61;255
mask2 &#61; cv2.imread("hsy_parsing.jpg", 0)
mask2[mask2<128]&#61;0
mask2[mask2>&#61;128]&#61;255
cv2.imwrite("out.jpg", color_trans_reinhard(img1, img2, [mask1], [mask2]))
改进点&#xff0c;
def get_domain_std(img_l, pixel, height, width, window_size):window_left &#61; max(pixel[1] - window_size, 0)window_right &#61; min(pixel[1] &#43; window_size &#43; 1, width)window_top &#61; max(pixel[0] - window_size, 0)window_bottom &#61; min(pixel[0] &#43; window_size &#43; 1, height)window_slice &#61; img_l[window_top: window_bottom, window_left: window_right]return np.std(window_slice)def get_weight_pixel(ref_img_l, ref_img_a, ref_img_b, ref_img_height, ref_img_width, segment, window_size, ratio, ref_mask_lists&#61;[None]):weight_list &#61; []pixel_a_list &#61; []pixel_b_list &#61; []ref_img_mask &#61; np.ones((ref_img_height, ref_img_width), np.uint8)if ref_mask_lists[0] is not None:for x in ref_mask_lists:ref_img_mask &#61; np.bitwise_or(x, ref_img_mask)ref_img_l_mean &#61; cv2.blur(ref_img_l, (window_size, window_size))ref_img_l_std &#61; np.sqrt(cv2.blur(np.power((ref_img_l - ref_img_l_mean), 2), (window_size, window_size)))for _ in range(segment):height_index &#61; np.random.randint(ref_img_height)width_index &#61; np.random.randint(ref_img_width)pixel &#61; [height_index, width_index] #[x,y]if ref_img_mask[pixel[0], pixel[1]] &#61;&#61; 0:continuepixel_light &#61; ref_img_l[pixel[0], pixel[1]]pixel_a &#61; ref_img_a[pixel[0], pixel[1]]pixel_b &#61; ref_img_b[pixel[0], pixel[1]]#pixel_std &#61; get_domain_std(ref_img_l, pixel, ref_img_height, ref_img_width, window_size)pixel_std &#61; ref_img_l_std[height_index, width_index]weight_value &#61; int(pixel_light * ratio &#43; pixel_std * (1 - ratio))if weight_value not in weight_list:weight_list.append(weight_value)pixel_a_list.append(pixel_a)pixel_b_list.append(pixel_b) return np.array(weight_list), np.array(pixel_a_list), np.array(pixel_b_list)def color_trans_welsh(in_img, ref_img, in_mask_lists&#61;[None], ref_mask_lists&#61;[None]):start &#61; time.time()#参考图ref_img_height, ref_img_width, ref_img_channel &#61; ref_img.shapewindow_size&#61;5 #窗口大小segment&#61; 10000#随机点个数ratio&#61;0.5 #求weight的比例系数ref_img_lab &#61; cv2.cvtColor(ref_img, cv2.COLOR_BGR2Lab)ref_img_l, ref_img_a, ref_img_b &#61; cv2.split(ref_img_lab)#计算参考图weightref_img_weight_array, ref_img_pixel_a_array, ref_img_pixel_b_array &#61; get_weight_pixel(ref_img_l, ref_img_a, ref_img_b, ref_img_height, ref_img_width, segment, window_size, ratio, ref_mask_lists)ref_img_max_pixel, ref_img_min_pixel &#61; np.max(ref_img_l), np.min(ref_img_l)#输入图in_img_height, in_img_width, in_img_channel &#61; in_img.shapein_img_lab &#61; cv2.cvtColor(in_img, cv2.COLOR_BGR2LAB)# 获取灰度图像的亮度信息&#xff1b;in_img_l, in_img_a, in_img_b &#61; cv2.split(in_img_lab)in_img_max_pixel, in_img_min_pixel &#61; np.max(in_img_l), np.min(in_img_l)pixel_ratio &#61; (ref_img_max_pixel - ref_img_min_pixel) / (in_img_max_pixel - in_img_min_pixel)# 把输入图像的亮度值映射到参考图像范围内&#xff1b;in_img_l &#61; ref_img_min_pixel &#43; (in_img_l - in_img_min_pixel) * pixel_ratioin_img_l &#61; in_img_l.astype(np.uint8)in_img_l_mean &#61; cv2.blur(in_img_l, (window_size, window_size))in_img_l_std &#61; np.sqrt(cv2.blur(np.power((in_img_l - in_img_l_mean), 2), (window_size, window_size)))in_img_weight_pixel &#61; ratio * in_img_l &#43; (1 - ratio) * in_img_l_stdnearest_pixel_index &#61; np.argmin(np.abs(ref_img_weight_array.reshape(1,1,-1) - np.expand_dims(in_img_weight_pixel, 2)), axis&#61;2).astype(np.float32)in_img_a &#61; cv2.remap(ref_img_pixel_a_array.reshape(1, -1), nearest_pixel_index, np.zeros_like(nearest_pixel_index, np.float32), interpolation&#61;cv2.INTER_LINEAR)in_img_b &#61; cv2.remap(ref_img_pixel_b_array.reshape(1, -1), nearest_pixel_index, np.zeros_like(nearest_pixel_index, np.float32), interpolation&#61;cv2.INTER_LINEAR)merge_img &#61; cv2.merge([in_img_l, in_img_a, in_img_b])bgr_img &#61; cv2.cvtColor(merge_img, cv2.COLOR_LAB2BGR)mask_all &#61; np.zeros(in_img.shape[:-1], np.int32)if in_mask_lists[0] is not None and ref_mask_lists[0] is not None:for x in in_mask_lists:mask_all &#61; np.bitwise_or(x, mask_all)mask_all &#61; cv2.merge([mask_all, mask_all, mask_all])np.copyto(bgr_img, in_img, where&#61;mask_all&#61;&#61;0) end &#61; time.time()print("time", end-start)return bgr_imgif __name__ &#61;&#61; &#39;__main__&#39;:# 创建参考图像的分析类&#xff1b;#ref_img &#61; cv2.imread("consult.jpg")#ref_img &#61; cv2.imread("2.png")ref_img &#61; cv2.imread("../imgs/2.png")# 读取灰度图像&#xff1b;opencv默认读取的是3通道的&#xff0c;不需要我们扩展通道&#xff1b;#in_img &#61; cv2.imread("gray.jpg")#in_img &#61; cv2.imread("1.png")in_img &#61; cv2.imread("../imgs/1.png")bgr_img &#61; color_trans_welsh(in_img, ref_img)cv2.imwrite("out_ren.jpg", bgr_img)"""ref_img &#61; cv2.imread("../hsy.jpeg?s=#34;)ref_mask &#61; cv2.imread("../hsy_parsing.jpg", 0)ref_mask[ref_mask<128] &#61; 0ref_mask[ref_mask>&#61;128] &#61; 255in_img &#61; cv2.imread("../ab.jpeg?s=#34;)in_mask &#61; cv2.imread("../ab_parsing.jpg", 0)in_mask[in_mask<128] &#61; 0in_mask[in_mask>&#61;128] &#61; 255bgr_img &#61; color_trans_welsh(in_img, ref_img, in_mask_lists&#61;[in_mask], ref_mask_lists&#61;[ref_mask])cv2.imwrite("bgr.jpg", bgr_img)"""
从左到右&#xff0c;分别为原图&#xff0c;参考图&#xff0c;reinhard效果&#xff0c;welsh效果
从左到右&#xff0c;分别为原图&#xff0c;原图皮肤mask&#xff0c;参考图&#xff0c;参考图皮肤mask&#xff0c;reinhard效果&#xff0c;welsh效果