python+opencv基础图像处理

  1. import
import cv2
import matplotlib.pyplot as plt
%matplotlib inline
  1. read
img = cv2.imread(imagename,cv2.IMREAD_COLOR)
// cv2.IMREAD_COLOR - load in bgr color mode
// cv2.IMREAD_GRAY - load in grayscale mode
// cv2.IMREAD_UNCHANGED - load original image (with alpha channel)
  1. 新建图片
new_img = np.zeros((img.shape), dtype = "uint8") #[img.shape[0], img.shape[1]]
new_img = np.zeros((color_goal, *img.shape), dtype = "uint8") #[color_goal, img.shape[1], img.shape[1]]
img_like = np.zeros_like(img) # [img.shape[0], img.shape[1]
  1. basics: convert color space, resize, rotate
# convert color space
img = cv2.cvtColor(semantic_mask, cv2.COLOR_BGR2RGB)
img_rgba = cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA))  
h,w, _ = img.shape
# resize
img_resize = cv2.resize(img, (w,h), interpolation=cv2.INTER_CUBIC)
img_resize = cv2.resize(img, tar_img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST) # 注意用别的图片的size时,要.shape[:2][::-1]
# rotate
img_rot_right = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE) 
img_rot_left = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE) 
  1. show
plt.figure()
plt.imshow(img)

img_gray= cv2.cvtColor(semantic_mask, cv2.COLOR_RGB2GRAY)// convert to gray image
plt.figure()
plt.imshow(img_gray,cmap='gray')
  1. inRange
color_lbound = np.array([2,100,200]) -20
color_hbound = np.array([2,100,200])+20
mask = cv2.inRange(semantic_mask, color_lbound, color_hbound)
image[mask>0] = new_color
  1. threshold and bit operation
ret, mask = cv2.threshold(img_gray, 1, 200, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
    
img_bg = cv2.bitwise_and(bg,bg,mask = mask_inv)
img_fg = cv2.bitwise_and(fg,fg,mask = mask)
  1. merge background and foreground (weighted)
weighted_merge = cv2.addWeighted(bg_img, 0.5, fg_img, 0.5, 0)
  1. merge with transparent

def add_transparent_image(background, foreground, x_offset=None, y_offset=None):
    bg_h, bg_w, bg_channels = background.shape
    fg_h, fg_w, fg_channels = foreground.shape

    assert bg_channels == 3, f'background image should have exactly 3 channels (RGB). found:{bg_channels}'
    assert fg_channels == 4, f'foreground image should have exactly 4 channels (RGBA). found:{fg_channels}'

    # center by default
    if x_offset is None: x_offset = (bg_w - fg_w) // 2
    if y_offset is None: y_offset = (bg_h - fg_h) // 2

    w = min(fg_w, bg_w, fg_w + x_offset, bg_w - x_offset)
    h = min(fg_h, bg_h, fg_h + y_offset, bg_h - y_offset)

    if w < 1 or h < 1: return

    # clip foreground and background images to the overlapping regions
    bg_x = max(0, x_offset)
    bg_y = max(0, y_offset)
    fg_x = max(0, x_offset * -1)
    fg_y = max(0, y_offset * -1)
    foreground = foreground[fg_y:fg_y + h, fg_x:fg_x + w]
    background_subsection = background[bg_y:bg_y + h, bg_x:bg_x + w]

    # separate alpha and color channels from the foreground image
    foreground_colors = foreground[:, :, :3]
    alpha_channel = foreground[:, :, 3] / 255  # 0-255 => 0.0-1.0

    # construct an alpha_mask that matches the image shape
    alpha_mask = np.dstack((alpha_channel, alpha_channel, alpha_channel))

   # combine the background with the overlay image weighted by alpha
    composite = background_subsection * (1 - alpha_mask) + foreground_colors * alpha_mask

    # overwrite the section of the background image that has been updated
    background[bg_y:bg_y + h, bg_x:bg_x + w] = composite

    return background


bg_img = cv2.resize(bg_img, result_paint_merge_save.shape[:2][::-1])
bg_img = np.array(bg_img).astype(np.uint8)
result_paint_merge_save = np.array(result_paint_merge_save).astype(np.uint8)
result = add_transparent_image(bg_img, result_paint_merge_save)
cv2.imwrite(str(jsonfolder / "masked_merge_bg.png"), result)
  1. 补充alpha通道
alpha = np.zeros([img.shape[0], img.shape[1], 1])
alpha[:] = 255
img_w_alpha = np.concatenate([img, alpha], -1)
  1. 根据一整张mask分隔图片
img_mask = cv2.imread(uploaded_mask_file)
if ifrotate:
    img_mask = cv2.rotate(img_mask, cv2.ROTATE_90_CLOCKWISE)
img_mask = cv2.resize(img_mask, img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
unique_mask_colors, unique_mask_colors_count = np.unique(img_mask.reshape(-1, img_mask.shape[-1]), axis=0, return_counts=True)

for current_mask_c in unique_mask_colors:
    mask = cv2.inRange(img_mask, current_mask_c, current_mask_c)
    mask_bool = np.zeros_like(mask)
    mask_bool[mask==0] = False
    mask_bool[mask==255] = True
    if np.sum(mask_bool) == 0:
        print(str(k) + "no such mask")
        continue
    img_wmask = cv2.bitwise_and(img, img, mask=mask)
最后编辑于
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容

  • rljs by sennchi Timeline of History Part One The Cognitiv...
    sennchi阅读 12,164评论 0 10
  • 看着超市琳琅满目的物品,食物的清香不停止的刺激着我的嗅觉。过去那个抓着我要这要那的孩子,已经不在我的身边~~~变成...
    天边紫竹林阅读 845评论 0 0
  • 在我们的一生当中, 究竟什么是对的 什么是错的。小的时候爸妈会用他们的惯性思维来告诉我们 打人是不对的、不...
    lostttttt阅读 1,293评论 0 2
  • 滕王阁 作者: 月满船 4 min read 0人收录 南昌因滕王阁而著名,现在更是城市的地标,也是因为滕王...
    巴山夜雨十年灯阅读 1,183评论 0 0