目錄
1、項目背景
2、适用範圍
3、使用方法
相信一提起馬賽克這個東西,不少小夥伴都痛心疾首,雖然最近幾年也頻繁傳出有在研發去除馬賽克的軟體,一直沒有成品問世。不過最近一位程式員及經過不斷努力終于完成了這款軟體。
據悉這位程式員“deeppomf”用深度神經網絡開發出了一個能抹去馬賽克讓原圖重制的神奇程式:DeepCreamPy 。為了使這款軟體達到更好的效果,作者在短短幾個月内收集了超過10萬張未打碼的原圖,但其中95%的圖檔他都沒有仔細看過,隻因為太過于浪費時間了。軟體被上傳分享後,在一周内被下載下傳了500多次。不過目前該軟體的局限性還很大,隻能完成一些簡單的修複。

從實際效果來看,複原後的圖檔塗抹痕迹仍然比較明顯,不過處理線條比較簡單的漫畫可以說是綽綽有餘。
DeepCreamPy僅适用于薄碼,如果馬賽克太大太厚,去碼可能會失效。另外,它對真人圖檔無效。如果你非要嘗試,可以看一下強行使用的效果:
而且DeepCreamPy目前的版本還不能完全自動處理圖檔,需要用Photoshop首先對馬賽克部分進行手動預處理。
第一步:安裝程式
1、如果你是64位Windows使用者,恭喜你可以直接下載下傳exe程式
下載下傳位址:
https://github.com/deeppomf/DeepCreamPy/releases/latest2、否則需要自己編譯,編譯代碼需要一下元件:
Python 3.6
TensorFlow 1.10
Keras 2.2.4
Pillow
h5py
請注意軟體版本,Windows上的TensorFlow不相容Python 2,也不相容Python 3.7。
代碼如下:
import numpy as np
from PIL import Image
import os
from copy import deepcopy
import config
from libs.pconv_hybrid_model import PConvUnet
from libs.utils import *
class Decensor:
def __init__(self):
self.args = config.get_args()
self.is_mosaic = self.args.is_mosaic
self.mask_color = [self.args.mask_color_red/255.0, self.args.mask_color_green/255.0, self.args.mask_color_blue/255.0]
if not os.path.exists(self.args.decensor_output_path):
os.makedirs(self.args.decensor_output_path)
self.load_model()
def get_mask(self, colored):
mask = np.ones(colored.shape, np.uint8)
i, j = np.where(np.all(colored[0] == self.mask_color, axis=-1))
mask[0, i, j] = 0
return mask
def load_model(self):
self.model = PConvUnet()
self.model.load(
r"./models/model.h5",
train_bn=False,
lr=0.00005
)
def decensor_all_images_in_folder(self):
#load model once at beginning and reuse same model
#self.load_model()
color_dir = self.args.decensor_input_path
file_names = os.listdir(color_dir)
#convert all images into np arrays and put them in a list
for file_name in file_names:
color_file_path = os.path.join(color_dir, file_name)
color_bn, color_ext = os.path.splitext(file_name)
if os.path.isfile(color_file_path) and color_ext.casefold() == ".png":
print("--------------------------------------------------------------------------")
print("Decensoring the image {}".format(color_file_path))
colored_img = Image.open(color_file_path)
#if we are doing a mosaic decensor
if self.is_mosaic:
#get the original file that hasn't been colored
ori_dir = self.args.decensor_input_original_path
#since the original image might not be a png, test multiple file formats
valid_formats = {".png", ".jpg", ".jpeg"}
for test_file_name in os.listdir(ori_dir):
test_bn, test_ext = os.path.splitext(test_file_name)
if (test_bn == color_bn) and (test_ext.casefold() in valid_formats):
ori_file_path = os.path.join(ori_dir, test_file_name)
ori_img = Image.open(ori_file_path)
# colored_img.show()
self.decensor_image(ori_img, colored_img, file_name)
break
else: #for...else, i.e if the loop finished without encountering break
print("Corresponding original, uncolored image not found in {}.".format(ori_file_path))
print("Check if it exists and is in the PNG or JPG format.")
else:
self.decensor_image(colored_img, colored_img, file_name)
print("--------------------------------------------------------------------------")
#decensors one image at a time
#TODO: decensor all cropped parts of the same image in a batch (then i need input for colored an array of those images and make additional changes)
def decensor_image(self, ori, colored, file_name):
width, height = ori.size
#save the alpha channel if the image has an alpha channel
has_alpha = False
if (ori.mode == "RGBA"):
has_alpha = True
alpha_channel = np.asarray(ori)[:,:,3]
alpha_channel = np.expand_dims(alpha_channel, axis =-1)
ori = ori.convert('RGB')
ori_array = image_to_array(ori)
ori_array = np.expand_dims(ori_array, axis = 0)
if self.is_mosaic:
#if mosaic decensor, mask is empty
# mask = np.ones(ori_array.shape, np.uint8)
# print(mask.shape)
colored = colored.convert('RGB')
color_array = image_to_array(colored)
color_array = np.expand_dims(color_array, axis = 0)
mask = self.get_mask(color_array)
# mask_reshaped = mask[0,:,:,:] * 255.0
# mask_img = Image.fromarray(mask_reshaped.astype('uint8'))
# mask_img.show()
else:
mask = self.get_mask(ori_array)
#colored image is only used for finding the regions
regions = find_regions(colored.convert('RGB'))
print("Found {region_count} censored regions in this image!".format(region_count = len(regions)))
if len(regions) == 0 and not self.is_mosaic:
print("No green regions detected!")
return
output_img_array = ori_array[0].copy()
for region_counter, region in enumerate(regions, 1):
bounding_box = expand_bounding(ori, region)
crop_img = ori.crop(bounding_box)
# crop_img.show()
#convert mask back to image
mask_reshaped = mask[0,:,:,:] * 255.0
mask_img = Image.fromarray(mask_reshaped.astype('uint8'))
#resize the cropped images
crop_img = crop_img.resize((512, 512))
crop_img_array = image_to_array(crop_img)
crop_img_array = np.expand_dims(crop_img_array, axis = 0)
#resize the mask images
mask_img = mask_img.crop(bounding_box)
mask_img = mask_img.resize((512, 512))
# mask_img.show()
#convert mask_img back to array
mask_array = image_to_array(mask_img)
#the mask has been upscaled so there will be values not equal to 0 or 1
mask_array[mask_array > 0] = 1
if self.is_mosaic:
a, b = np.where(np.all(mask_array == 0, axis = -1))
print(a, b)
coords = [coord for coord in zip(a,b) if ((coord[0] + coord[1]) % 2 == 0)]
a,b = zip(*coords)
mask_array[a,b] = 1
# mask_array = mask_array * 255.0
# img = Image.fromarray(mask_array.astype('uint8'))
# img.show()
# return
mask_array = np.expand_dims(mask_array, axis = 0)
# Run predictions for this batch of images
pred_img_array = self.model.predict([crop_img_array, mask_array, mask_array])
pred_img_array = pred_img_array * 255.0
pred_img_array = np.squeeze(pred_img_array, axis = 0)
#scale prediction image back to original size
bounding_width = bounding_box[2]-bounding_box[0]
bounding_height = bounding_box[3]-bounding_box[1]
#convert np array to image
# print(bounding_width,bounding_height)
# print(pred_img_array.shape)
pred_img = Image.fromarray(pred_img_array.astype('uint8'))
# pred_img.show()
pred_img = pred_img.resize((bounding_width, bounding_height), resample = Image.BICUBIC)
pred_img_array = image_to_array(pred_img)
# print(pred_img_array.shape)
pred_img_array = np.expand_dims(pred_img_array, axis = 0)
# copy the decensored regions into the output image
for i in range(len(ori_array)):
for col in range(bounding_width):
for row in range(bounding_height):
bounding_width_index = col + bounding_box[0]
bounding_height_index = row + bounding_box[1]
if (bounding_width_index, bounding_height_index) in region:
output_img_array[bounding_height_index][bounding_width_index] = pred_img_array[i,:,:,:][row][col]
print("{region_counter} out of {region_count} regions decensored.".format(region_counter=region_counter, region_count=len(regions)))
output_img_array = output_img_array * 255.0
#restore the alpha channel if the image had one
if has_alpha:
output_img_array = np.concatenate((output_img_array, alpha_channel), axis = 2)
output_img = Image.fromarray(output_img_array.astype('uint8'))
#save the decensored image
#file_name, _ = os.path.splitext(file_name)
save_path = os.path.join(self.args.decensor_output_path, file_name)
output_img.save(save_path)
print("Decensored image saved to {save_path}!".format(save_path=save_path))
return
if __name__ == '__main__':
decensor = Decensor()
decensor.decensor_all_images_in_folder()
注意:運作Demo需要翻牆下載下傳模型,這裡為了友善小夥伴,我已經下載下傳完畢:
https://download.csdn.net/download/m0_38106923/10798221第二步:手動處理黑條遮擋和馬賽克遮擋
首先打開Photoshop或者其它圖檔處理器。對于單色條遮住人物敏感部位的情況,使用純綠色(色号#00FF00#)預處理圖檔,以綠條取代圖檔中的黑條。
強烈建議使用鉛筆而不是毛刷工具,如果使用毛刷,請一定要關閉抗鋸齒功能。或者用魔棒選中馬賽克區域,再用油漆桶上色。這裡我給各位分享一個Python編寫的處理工具,代碼比較長,是以和模型檔案放在一起,各位可以嘗試下載下傳。
最後将處理的檔案以PNG格式存儲在軟體的”decensor_input”檔案夾中。如果敏感部位不是黑條,而是馬賽克,還需要将未上色的原始圖檔放入”decensor_input_original” 檔案夾中,并確定其檔案名和放在”decensor_input”中的預處理圖檔檔案名相同
第三步:運作去馬賽克軟體
1、64位Windows使用者下載下傳程式的exe後,輕按兩下軟體即可
2、自行編譯項目的使用者,需要執行以下兩個指令
對于黑條遮擋的圖檔,輸入以下指令:
$ pythondecensor.py
對于馬賽克遮擋的圖檔,輸入以下指令:
$ python decensor.py —is_mosaic=True
注意事項
如果你圖檔處理後成了這樣:
一定是你處理的姿勢不對,請注意不要犯以下兩種錯誤:
第一幅圖中,圖檔馬賽克區域沒有完全塗滿;第二幅圖中,由于開啟了抗鋸齒功能,導緻馬賽克邊緣區域不是純綠色填充,請關閉抗鋸齒功能!!