使用仿射变换处理图片
在目标检测和后续图像处理的流程中,warpAffine 是 OpenCV 提供的强大工具,常用于仿射变换操作。对于 YOLO 等目标检测模型,warpAffine 不仅适合于实现图片的 Letterbox(加黑边缩放) 操作,也可以用于在识别目标后,裁剪出检测区域并进行规范化变换、旋转等操作。
通过调整仿射变换矩阵,我们可以在统一的 API 下完成多个常见的图像处理需求。这种方式特别适合于与 CUDA 加速实现的 Letterbox 代码无缝结合,从而实现不同的功能。
仿射变换矩阵设置
裁剪出来后resize
先将图片的裁剪位置的起点移动到图片的(0,0)点。对应的仿射变换矩阵为,其中 ,
对图片做scale变换。对应的仿射变换矩阵为
,其中, ,dsth和dstw为目标图片的大小最终的仿射变换矩阵为
直接裁剪
先将图片的裁剪位置的起点移动到图片的(0,0)点。对应的仿射变换矩阵为
,其中 ,设置目标图片大小的宽高为框的宽高。M就是最终的仿射变换矩阵。
旋转
旋转的一般仿射变换矩阵
任意原点顺时针旋转仿射变换矩阵计算
- 中心点平移到原点
设原图的中心点为 ,其中:
对应的仿射变换矩阵为:
- 旋转
顺时针旋转 角度,相当于逆时针旋转 。
对应的仿射变换矩阵为:
- 旋转后向图片中心移动
图片旋转后,宽高会变化,新宽度 和高度 为:
旋转后图片的新中心点为,其中:
对应的仿射变换矩阵为:
- 最终变换矩阵计算
综合变换矩阵为:
展开为:
使用透视变换处理图片
- 封装了透视变换的矩阵,可以根据4个及以上的点计算单应性矩阵
- 增加函数应用于透视变换
python 程序
对程序做了二次封装
现在支持
letter_box
裁剪图片
裁剪后resize图片到指定大小
裁剪后 按照短边resize图片,保持宽高比
resize图片到指定大小
按照短边resize图片,保持宽高比
对图片做镜像出来,水平、垂直镜像
对图片做任意角度顺时针旋转,不丢失部分画面
计算单应性矩阵做透视变换
归一化
对目前程序做优化,都使用
cv2.warpPerspective
函数做图像变换
import cv2
import numpy as np
from enum import Enum
from typing import List, Tuple, Union
from pydantic import BaseModel, field_validator
class NormType(Enum):
NONE = 0
MeanStd = 1
AlphaBeta = 2
class ChannelType(Enum):
NONE = 0
SwapRB = 1
class Norm(BaseModel):
mean: List[float] = [0.0, 0.0, 0.0]
std: List[float] = [1.0, 1.0, 1.0]
alpha: float = 1 / 255.0
beta: float = 0.0
norm_type: NormType = NormType.NONE
channel_type: ChannelType = ChannelType.NONE
@field_validator("mean", "std", mode="before")
@classmethod
def validate_length(cls, value, field):
"""Ensure mean and std have exactly 3 elements."""
if len(value) != 3:
raise ValueError(f"{field.alias} must have exactly 3 elements.")
return value
@staticmethod
def mean_std(mean: List[float], std: List[float], channel_type: ChannelType = ChannelType.NONE) -> "Norm":
return Norm(mean=mean, std=std, norm_type=NormType.MeanStd, channel_type=channel_type)
@staticmethod
def alpha_beta(alpha: float, beta: float, channel_type: ChannelType = ChannelType.NONE) -> "Norm":
return Norm(alpha=alpha, beta=beta, norm_type=NormType.AlphaBeta, channel_type=channel_type)
@staticmethod
def none() -> "Norm":
return Norm()
def __repr__(self):
return (
f"Norm(mean={self.mean}, std={self.std}, alpha={self.alpha}, beta={self.beta}, "
f"norm_type={self.norm_type}, channel_type={self.channel_type})"
)
class WarpMatrix:
def __init__(self, matrix: np.ndarray, target: Tuple[int, int]) -> None:
self.matrix = matrix
self.target = target
def __repr__(self):
matrix_str = np.array2string(self.matrix, formatter={'float_kind': lambda x: f"{x:.2f}"})
return f"WarpMatrix(matrix={matrix_str}, target={self.target})"
def __matmul__(self, other: "WarpMatrix"):
if not isinstance(other, WarpMatrix):
raise ValueError("Type error")
print(np.vstack([self.matrix, [0, 0, 1]]))
print(np.vstack([other.matrix, [0, 0, 1]]))
matrix = np.vstack([self.matrix, [0, 0, 1]]) @ np.vstack([other.matrix, [0, 0, 1]])
if np.array_equal(matrix[-1], [0, 0, 1]):
matrix = matrix[:-1]
return WarpMatrix(matrix=matrix, target=self.target)
def invert(self) -> np.ndarray:
"""
获取变换矩阵的逆矩阵
"""
rows = self.matrix.shape[0]
if rows == 3:
return np.linalg.inv(self.matrix)
return np.linalg.inv(np.vstack([self.matrix, [0, 0, 1]]))
@staticmethod
def perspective_matrix(src: List[Tuple[int, int]], dst: List[Tuple[int, int]], target : Tuple[int, int]) -> "WarpMatrix":
"""
计算透视变换矩阵。
Args:
src: 源图像上的 4 个点 [(x1, y1), (x2, y2), (x3, y3), (x4, y4)]
dst: 目标图像上的 4 个点 [(u1, v1), (u2, v2), (u3, v3), (u4, v4)]
Returns:
PerspectiveMatrix: 透视变换矩阵
"""
src_pts = np.array(src, dtype=np.float32)
dst_pts = np.array(dst, dtype=np.float32)
# 计算透视变换矩阵
# matrix = cv2.getPerspectiveTransform(src_pts, dst_pts)
# 单应性变换矩阵
matrix, _ = cv2.findHomography(src_pts, dst_pts)
# matrix = PerspectiveMatrix.compute_homography(src_pts, dst_pts)
return WarpMatrix(matrix=matrix, target=target)
@staticmethod
def letter_box_matrix(src: Tuple[int, int], target: Tuple[int, int]) -> "WarpMatrix":
"""
计算letter box 的仿射变换矩阵Ax = y
可以分为3个步骤计算A, A = TPS。也可以分为两步
1. 缩放 S = [[scale, 0, 0], [0, scale, 0], [0, 0, 1]]
2. 平移 P = [[1, 0, -scale * fw * 0.5], [0, 1, -scale * fh * 0.5], [0, 0, 1]] 将图像的中心点移动到缩放后图片的原点
3. 平移 T = [[1, 0, tw * 0.5], [0, 1, th * 0.5], [0, 0, 1]] 将当前图像中心点移动到目标图片的中心
"""
scale = min(target[0] / src[0], target[1] / src[1])
offset_x = (target[0] - scale * src[0]) * 0.5
offset_y = (target[1] - scale * src[1]) * 0.5
matrix = np.array([
[scale, 0, offset_x],
[0, scale, offset_y],
], dtype=np.float32)
return WarpMatrix(matrix=matrix, target=target)
@staticmethod
def resize_matrix(src: Tuple[int, int], target: Union[Tuple[int, int], int]) -> "WarpMatrix":
if isinstance(target, tuple):
scale_x = target[0] / src[0]
scale_y = target[1] / src[1]
matrix = np.array([
[scale_x, 0, 0],
[0, scale_y, 0]
], dtype=np.float32)
new_target = target
elif isinstance(target, int):
scale = target / min(src)
new_width = int(src[0] * scale)
new_height = int(src[1] * scale)
matrix = np.array([
[scale, 0, 0],
[0, scale, 0]
], dtype=np.float32)
new_target = (new_width, new_height)
else:
raise ValueError("Target must be either a tuple (width, height) or an integer for the shorter edge.")
return WarpMatrix(matrix=matrix, target=new_target)
@staticmethod
def crop_resize_matrix(
start_point: Tuple[int, int],
end_point: Tuple[int, int],
target: Union[Tuple[int, int], int, None] = None
) -> "WarpMatrix":
"""
生成裁剪后并resize的仿射变换矩阵
Args:
start_point (Tuple[int, int]): Top-left (x, y).
end_point (Tuple[int, int]): Bottom-right (x, y).
target (Union[Tuple[int, int], int, None]):
If tuple, 使用原始的resize方法.
If int, 使用短边resize方法,保持长宽比.
If None, 相当于只截取
Returns:
WarpaffineMatrix: Affine transformation matrix and target dimensions.
计算crop resize 的仿射变换矩阵Ax = y
可以分为2个步骤计算A, A = SP
1. 平移 P = [[1, 0, -sx], [0, 1, -sy], [0, 0, 1]] 框的起始点平移到原点
2. 缩放 S = [[scale_x, 0, 0], [0, scale_y, 0], [0, 0, 1]] 对图片进行缩放
A = SP = [[scale_x, 0, -scale_x*sx], [0, scale_y, --scale_y*sy], [0, 0, 1]]
"""
crop_width = end_point[0] - start_point[0]
crop_height = end_point[1] - start_point[1]
if target is None:
target_width, target_height = crop_width, crop_height
elif isinstance(target, tuple):
target_width, target_height = target
elif isinstance(target, int):
scale = target / min(crop_width, crop_height)
target_width = int(crop_width * scale)
target_height = int(crop_height * scale)
else:
raise ValueError("Target must be a tuple (width, height), an integer, or None.")
scale_x = target_width / crop_width
scale_y = target_height / crop_height
matrix = np.array([
[scale_x, 0, -start_point[0] * scale_x],
[0, scale_y, -start_point[1] * scale_y]
], dtype=np.float32)
return WarpMatrix(matrix=matrix, target=(target_width, target_height))
@staticmethod
def flip_matrix(horizontal: bool, vertical: bool, src: Tuple[int, int]) -> "WarpMatrix":
"""
获取镜像变换矩阵 水平镜像、垂直镜像
"""
scale_x = -1 if horizontal else 1
scale_y = -1 if vertical else 1
translate_x = src[0] if horizontal else 0
translate_y = src[1] if vertical else 0
matrix = np.array([
[scale_x, 0, translate_x],
[0, scale_y, translate_y]
], dtype=np.float32)
return WarpMatrix(matrix=matrix, target=src)
@staticmethod
def rotate_matrix(src: Tuple[int, int], angle: float) -> "WarpMatrix":
"""
获取顺时针旋转任意角度的仿射变换矩阵, 图片不丢失信息
Args:
src: 原始图片的宽高 (width, height)
angle: 旋转角度(顺时针为正,单位为度)
Returns:
WarpaffineMatrix: 包含旋转变换矩阵的对象
"""
# 转换角度为弧度
theta = np.deg2rad(angle)
# 计算旋转矩阵的元素
cos_theta = np.cos(theta)
sin_theta = np.sin(theta)
width, height = src
center_x, center_y = width / 2, height / 2
new_width = int(abs(width * cos_theta) + abs(height * sin_theta))
new_height = int(abs(width * sin_theta) + abs(height * cos_theta))
new_center_x, new_center_y = new_width / 2, new_height / 2
i2d0 = cos_theta
i2d1 = sin_theta
i2d2 = new_center_x - cos_theta * center_x - sin_theta * center_y
i2d3 = -sin_theta
i2d4 = cos_theta
i2d5 = new_center_y + sin_theta * center_x - cos_theta * center_y
# 构建仿射矩阵
matrix = np.array([
[i2d0, i2d1, i2d2],
[i2d3, i2d4, i2d5]], dtype=np.float32)
return WarpMatrix(matrix=matrix, target=(new_width, new_height))
class ImageTransform:
@staticmethod
def normilization(image: np.ndarray, norm: Norm) -> np.ndarray:
image = image.astype(np.float32)
if norm.channel_type == ChannelType.SwapRB:
image = image[..., ::-1] # Swap RGB to BGR or vice versa
mean = np.array(norm.mean, dtype=np.float32)
std = np.array(norm.std, dtype=np.float32)
if norm.norm_type == NormType.MeanStd:
image = (image * norm.alpha - mean) / std
elif norm.norm_type == NormType.AlphaBeta:
image = image * norm.alpha + norm.beta
return image
@staticmethod
def WarpTransform(
image: np.ndarray,
warp_matrix: WarpMatrix,
flags=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(114, 114, 114)
) -> np.ndarray:
if image is None or warp_matrix is None:
raise ValueError("Input image and warp matrix cannot be None.")
rows = warp_matrix.matrix.shape[0]
if rows == 2:
transformed_image = cv2.warpAffine(
image,
warp_matrix.matrix,
warp_matrix.target,
flags=flags,
borderMode=borderMode,
borderValue=borderValue
)
else:
transformed_image = cv2.warpPerspective(
image,
warp_matrix.matrix,
warp_matrix.target,
flags=flags,
borderMode=borderMode,
borderValue=borderValue
)
return transformed_image
if __name__ == "__main__":
image_path = "test.jpg"
image = cv2.imread(image_path)
h, w, _ = image.shape
print("shape : ", image.shape)
rotate_matrix = WarpMatrix.rotate_matrix((w, h), 60)
letter_box_matrix = WarpMatrix.letter_box_matrix(rotate_matrix.target, (640, 640))
matrix = letter_box_matrix @ rotate_matrix
transform_image = ImageTransform.WarpTransform(image, matrix)
cv2.imwrite("transform.jpg", transform_image)
print(letter_box_matrix)