Tags: DIP
[TOC]
简书著作权归作者所有,任何形式的转载都请联系作者获得授权并注明出处。
双边滤波器(bilateral filter)
双边滤波器也是以高斯函数为基础的滤波器,高斯函数的曲线是一种钟形曲线, 高斯滤波器只考虑了像素之间的空间关系,对于距离更近的像素给与更大的权重而对于距离更远的像素基于更小的权重。而双边滤波器和高斯滤波器相比加入了像素之间的相关性,两个像素点之间的像素值越接近则权重越大。
具体到数学公式:
对于像素(i,j), 卷积矩阵模板中像素(k,l)的权重计算公式为:
像素(i,j)新的像素值计算公式为:
其中权重公式的前半部分计算空间关系,后半部分负责计算像素值关系。
看下面图的话,我们可以看出二者是如何一起起作用的,双边滤波器是如何起到保边的作用的,从指数函数的计算性质我们也可以得出,他们的作用效果在下图(权重)中实际上是相乘的,因此得到一个半钟形权重曲线,从而最后在像素计算的时候起到保边的效果。
下面给出我自己实现的将空间相似性(高斯滤波器)和像素相似性分开的滤波器,将二者结合起来就是双边滤波器了。
#include "opencv2/photo.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/core.hpp"
#include <iostream>
#include <algorithm>
#include <stdlib.h>
//using namespace std;
using namespace cv;
Mat img;
int kernelSize=5;
float distance(int x, int y, int i, int j) {
return float(sqrt(pow(x - i, 2) + pow(y - j, 2)));
}
double gaussian(float x, double sigma) {
return exp(-(pow(x, 2))/(2 * pow(sigma, 2))) / (2 * CV_PI * pow(sigma, 2));
}
void myBilateralFilter(pixelWeight(Mat source, Mat filteredImage, int x, int y, int kernelSize, double sigmaI, double sigmaS, int method = 1)
{
double iFiltered = 0;
double wP = 0;
double w = 0;
int neighbor_x = 0;
int neighbor_y = 0;
int half = kernelSize / 2;
if(method == 1){
for(int i = 0; i < kernelSize; i++) {
for(int j = 0; j < kernelSize; j++) {
neighbor_x = x - (half - i);
neighbor_y = y - (half - j);
double gi = gaussian(source.at<uchar>(neighbor_x, neighbor_y) - source.at<uchar>(x, y), sigmaI); //只考虑了像素相似性
//double gs = gaussian(distance(x, y, neighbor_x, neighbor_y), sigmaS);
w = gi;
iFiltered = iFiltered + source.at<uchar>(neighbor_x, neighbor_y) * w;
wP = wP + w;
}
}
}
if(method == 2){
for(int i = 0; i < kernelSize; i++) {
for(int j = 0; j < kernelSize; j++) {
neighbor_x = x - (half - i);
neighbor_y = y - (half - j);
double gs = gaussian(distance(x, y, neighbor_x, neighbor_y), sigmaS); //只考虑空间相似性,高斯滤波器
w = gs;
iFiltered = iFiltered + source.at<uchar>(neighbor_x, neighbor_y) * w;
wP = wP + w;
}
}
}
iFiltered = iFiltered / wP;
filteredImage.at<double>(x, y) = iFiltered;
}
void mySilateralFilter(Mat src, Mat dst, int kernelSize, double sigmaI, double sigmaS, int method){
int width = src.cols;
int height = src.rows;
for(int i = 2; i < height - 2; i++) {
for(int j = 2; j < width - 2; j++) {
pixelWeight(src, dst, i, j, kernelSize, sigmaI, sigmaS, method);
}
}
}
int main(){
img = imread("/Users/yuhua.cheng/Desktop/FaceBeauty/data/origin2.png", -1);
if(img.empty()){
std::cout << "Unable to load source image!" << std::endl;
return -1;
}
cv::cvtColor(img, img, cv::COLOR_BGRA2GRAY);
std::cout << "Fine!" << std::endl;
imshow("img", img);
Mat myDst = Mat::zeros(img.rows, img.cols, CV_64F);
Mat opencvDst;
cv::bilateralFilter(img, opencvDst, 5, 12.0,16.0);
myBilateralFilter(img, myDst, 5, 12.0, 16.0, 2);
cv::imwrite("OpencvBilateral.jpg", opencvDst);
cv::imwrite("myBilateral2.jpg", myDst);
return 0;
}
表面模糊算法(Surface Blur Filter)
表面模糊算法是photoshop中用到的保边算法。
参数
- 模糊半径:r
模糊半径为r, 则矩阵大小为(2r+1)*(2r+1)
- 模糊阈值:T
r,T 变大则模糊区域越大,去除噪声的效果越好。
卷积矩阵计算
是卷积矩阵元素的值,是对于图像像素值,是矩阵模板中心对应的图像像素值, T是模糊阈值。这样看的话,卷积矩阵中心元素肯定是1。一般而言还会对进行预处理:
每个像素经过卷积运算之后得到的值为:
下面同样给出我自己实现的算法(没有优化速度比较慢,而且只能处理单通道图像):
Mat surfaceBlur(const Mat& src, int radius, int threshold){
// 表面滤波算法
Mat result = src.clone();
int height = src.rows;
int width = src.cols;
int newHeight = height+2*radius;
int newWidth = width+2*radius;
Mat temp = Mat::zeros(newHeight, newWidth, CV_8U);
Mat temp2 = temp.rowRange(radius, newHeight-radius).colRange(radius, newWidth-radius);
src.copyTo(temp2);
Mat kernel = Mat_<float>::zeros(radius,radius);
// cout << kernel;
for(int m = radius; m < newHeight-radius; m++){
for(int n = radius; n < newWidth-radius; n++){
float tempPixel = 0;
float WP = 0;
float Wij = 0;
for(int i = -radius; i < radius+1; i++){
for(int j = -radius; j < radius+1; j++){
// kernel.at(i+radius,j+radius) = 1 - abs(temp.at<Vec3b>(m+i, n+j) - temp.at<Vec3b>(m,n));
Wij = 1 - abs(temp.at<uchar>(m+i, n+j) - temp.at<uchar>(m,n))/(2.5*threshold);
if(Wij < 0){
Wij = 0;
}
tempPixel += Wij*temp.at<uchar>(m+i, n+j);
WP += Wij;
}
}
temp.at<uchar>(m,n) = int(tempPixel/WP);
}
}
temp.rowRange(radius, newHeight-radius).colRange(radius, newWidth-radius).copyTo(result);
return result;
}
局部均方差(Local Mean Square Error Filter)
对于一幅图像,在(2n+1, 2m+1) 窗口内,其局部均值为:
局部均方差为:
计算得到均值和均方差之后,新的像素值计算公式为:
其中:
为用户输入参数
参考这篇文章, 方差简单计算公式:
这一步实际就是均方差矩阵和均值矩阵平方的差值。
参数选择最主要就是在半径选择和参数选择上,ImageShop给出经验参数:1)半径:max(width, height)*0.02, 2) : 10 + DenoiseLevel * DenoiseLevel * 5, DenoiseLevel为磨皮参数程度,范围为0-10越大磨皮越厉害。
下面贴出我自己的简单实现:
// Nonl-Local Mean Square Error Filter
#include <iostream>
#include <opencv2/core/core.hpp>
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/photo/photo.hpp"
using namespace std;
int value1 = 20;
int value2 = 21;
cv::Mat img;
cv::Mat EMPFilter(cv::Mat& img, int value1, int value2){
int radius = value2;
float epsilon = value1/10000.0; // value1为int value1/1000 为 0
cv::Mat result = img.clone();
cv::Mat_<cv::Vec3f> tempImg;
cv::Mat_<cv::Vec3f> tempResult;
img.convertTo(tempImg, CV_32FC3, 1.0/255); // rescale to float 0.0 - 1.0
cv::Mat_<cv::Vec3f> meanIP;
cv::boxFilter(tempImg, meanIP, -1, cv::Size(radius, radius), cv::Point(-1,-1), true);
cv::Mat_<cv::Vec3f> corrIP;
cv::boxFilter(tempImg.mul(tempImg), corrIP, -1, cv::Size(radius, radius), cv::Point(-1,-1), true);
cv::Mat_<cv::Vec3f> varIP = corrIP - meanIP.mul(meanIP);
cout << tempImg(0,0) << " " << corrIP(0,0) << " " << varIP(0,0);
for(int row = 0; row < img.rows; row++){
float* tempImgPtr = tempImg.ptr<float>(row);
float* meanPtr = meanIP.ptr<float>(row);
float* corrPtr = varIP.ptr<float>(row);
for(int col = 0; col < img.cols; col++){
tempImgPtr[0] = meanPtr[0] + (corrPtr[0]*(tempImgPtr[0]-meanPtr[0])/(corrPtr[0] + epsilon));
tempImgPtr[1] = meanPtr[1] + (corrPtr[1]*(tempImgPtr[1]-meanPtr[1])/(corrPtr[1] + epsilon));
tempImgPtr[2] = meanPtr[2] + (corrPtr[2]*(tempImgPtr[2]-meanPtr[2])/(corrPtr[2] + epsilon));
tempImgPtr += 3;
meanPtr += 3;
corrPtr += 3;
}
}
tempImg.convertTo(result, CV_8UC3, 255);
cv::imshow("result", result);
cv::waitKey(0);
return result;
}
void onTrackbar(int, void *){
cv::Mat result = EMPFilter(img, value1, value2);
}
int main(){
img = cv::imread("/Users/yuhua.cheng/Documents/dataset/Face/liuming_org.jpg", -1);
if(img.empty()){
cout << "Unable to load image!" << endl;
return -1;
}
if(img.type() == 24){
cv::cvtColor(img, img, cv::COLOR_BGRA2BGR);
}
int height = img.rows;
int width = img.cols;
if(height > width){
cv::resize(img, img, cv::Size(500, 800));
}
else{
cv::resize(img, img, cv::Size(1000, 800));
}
cv::imshow("origin img", img);
cv::Mat bilateral;
cv::bilateralFilter(img, bilateral, 10, 50.5, 50.5);
cv::imshow("bilateral", bilateral);
cv::namedWindow("result", 1);
cv::createTrackbar("value1", "result", &value1, 50, onTrackbar);
cv::createTrackbar("value2", "result", &value2, 40, onTrackbar);
onTrackbar(0, 0);
return 0;
}
贴几张效果图:
如果可以在现有基础上加上人脸关键点识别或者肤色识别的部分,只处理皮肤区域可以得到更好的效果,不会造成全局模糊。
实际上我们知道使用积分图可以快速实现均值滤波, 从而快速计算均值矩阵和均方差矩阵。
这也正是这篇专利:https://patents.google.com/patent/CN102509266B/en实现的。
opencv内置cv::integral函数,可以用来计算积分图。积分图计算的时候需要注意的是选择合适爹数据类型防止数据溢出。
下面看下我自己实现的基于积分图的非局部均方差滤波方法:
导向滤波器(guided filter)
导向滤波器是He KaiMing在Guided Image Filtering中提出的滤波算法,不仅仅可以用来去噪,还可以用于图像去雾,图像Matting等。
算法思路可以参考这篇文章的讲解。
opencv contrib模块的 ximgproc中有guidedFilter()函数可以直接使用。
也可以参考这篇文章的实现。下面是我参考它修改了一点的代码:
cv::Mat guidedFilter(cv::Mat I, cv::Mat p, int r, double eps)
{
/*
% GUIDEDFILTER O(N) time implementation of guided filter.
%
% - guidance image: I (should be a gray-scale/single channel image)
% - filtering input image: p (should be a gray-scale/single channel image)
% - local window radius: r
% - regularization parameter: eps
*/
cv::Mat _I;
I.convertTo(_I, CV_64FC1,1.0/255);
I = _I;
cv::Mat _p;
p.convertTo(_p, CV_64FC1,1.0/255);
p = _p;
int height = I.rows;
int width = I.cols;
int kernelSize = 2*r+1; //r为半径而opencv中filter都是指整个窗口大小
cv::Mat mean_I;
cv::boxFilter(I, mean_I, CV_64FC1, cv::Size(kernelSize, kernelSize));
cv::Mat mean_p;
cv::boxFilter(p, mean_p, CV_64FC1, cv::Size(kernelSize, kernelSize));
cv::Mat mean_Ip;
cv::boxFilter(I.mul(p), mean_Ip, CV_64FC1, cv::Size(kernelSize, kernelSize));
// this is the covariance of (I, p) in each local patch.
cv::Mat cov_Ip = mean_Ip - mean_I.mul(mean_p);
cv::Mat mean_II;
cv::boxFilter(I.mul(I), mean_II, CV_64FC1, cv::Size(kernelSize, kernelSize));
cv::Mat var_I = mean_II - mean_I.mul(mean_I);
cv::Mat a = cov_Ip / (var_I + eps);
cv::Mat b = mean_p - a.mul(mean_I);
cv::Mat mean_a;
cv::boxFilter(a, mean_a, CV_64FC1, cv::Size(kernelSize, kernelSize));
cv::Mat mean_b;
cv::boxFilter(b, mean_b, CV_64FC1, cv::Size(kernelSize, kernelSize));
cv::Mat q = mean_a.mul(I) + mean_b;
return q;
}
Reference
- https://blog.csdn.net/eejieyang/article/details/52333112
- https://blog.csdn.net/baimafujinji/article/details/74750283
- https://blog.csdn.net/kuweicai/article/details/78385871
- https://blog.csdn.net/Trent1985/article/details/80802144
- 这位作者的文章在研究图像处理方面给我很多帮助,很多都参考了他的思路自己复现一遍:http://www.cnblogs.com/Imageshop/p/4679065.html