OpenCV 保边滤波器 各向异性扩散滤波(Anisotropic Filter)
各向异性扩散滤波(Anisotropic filter),是双边滤波的一种简化,具有双边滤波的保留图像边缘同时减少噪声的作用,又加快了算法的实现速度。
首先,我们来了解一下各向异性扩散。
各向异性扩散也叫P-M扩散,各向异性扩散(Anisotropic diffusion)的算法可以详见论文:
Scale-space and edge detection using anisotropic diffusion
具体定义如下:
假设灰度图I(x,y),它的各向异性扩散方程表示如下:
其中,Δ是Laplacian算子,▽是梯度算子,c(x,y,t)是扩散系数,控制扩散速率,Anisotropic filter就是选取得图像梯度函数,因此可以在扩散的时候保留图像的边缘细节信息,K是热传导系数,用来控制边缘的灵敏度;
对于Anisotropic Filter,我们可以通俗的理解,将整张图像看作是个热量场,每个像素当作热流,热流的流动取决于当前像素与周围像素的关系,如果碰到邻域像素是边缘像素,那么,它的流动扩散系数会比较小,也就是热流不想邻域像素扩散了,或者扩散减弱了,如果不是边缘像素,那扩散系数向着流动的方向改变,流过的地方也就变的平滑,如此一来,就在保留边缘的同时,平滑了噪声区域;
假设图像为I,滤波公式如下:
其中,t代表迭代次数;
△表示梯度算子,四个方向的梯度公式如下:
c表示扩散系数,四个方向上的扩散系数计算如下:
注意:
在这里热传导系数K越大,图像越平滑;
λ越大,图像越平滑;
迭代次数t越多,图像滤波效果越明显;
实现代码
#define MIN2(a, b) ((a) < (b) ? (a) : (b))
#define MAX2(a, b) ((a) > (b) ? (a) : (b))
#define CLIP3(x, a, b) MIN2(MAX2(a,x), b)
int main()
{
Mat srcImage = imread("1.jpg");
namedWindow("原图", WINDOW_NORMAL);
imshow("原图", srcImage);
unsigned char *strData;
strData = srcImage.data;
AnisotropicFilter(strData,srcImage.cols,srcImage.rows, srcImage.channels(), 7, 10, 0.23, 3);
Mat grayImage = Mat(srcImage.rows,srcImage.cols,srcImage.type(), strData, 0);
namedWindow("修改图", WINDOW_NORMAL);
imshow("修改图", grayImage);
waitKey(0);
}
//width 为图像的cols
//height 为图像的rows
//stride 为图像每行的数据cols*channel()
void AnisotropicFilter(unsigned char* srcData, int width, int height, int channel, int iter,
float k, float lambda, int offset)
{
int i, j, pos1, pos2, pos3, pos4, n, pos_src;
int NI, SI, EI, WI;
float cN, cS, cE, cW;
int stride = width * channel;
unsigned char* grayData = (unsigned char*)malloc(sizeof(unsigned char) * stride * height);
unsigned char* pSrc = srcData;
float MAP[512];
float kk = 1.0f / (k * k);
for (i = -255; i <= 255; i++)
{
MAP[i + 255] = exp(-i * i * kk) * lambda * i;
}
int r, g, b;
for (n = 0; n < iter; n++)
{
//cout << n << endl;
memcpy(grayData, srcData, sizeof(unsigned char) * height * stride);
pSrc = srcData;
for (j = 0; j < height; j++)
{
//cout << "j : "<<j << endl;
for (i = 0; i < width; i++)
{
//cout << "j : " << j << " i : " << i << endl;
pos_src = CLIP3((i * channel), 0, width*channel - 1) + j * stride;
pos1 = CLIP3((i * channel), 0, width * channel - 1) + CLIP3((j - offset), 0, height - 1) * stride;
pos2 = CLIP3((i * channel), 0, width * channel - 1) + CLIP3((j + offset), 0, height - 1) * stride;
pos3 = (CLIP3((i - offset) * channel, 0, width * channel - 1)) + j * stride;
pos4 = (CLIP3((i + offset) * channel, 0, width * channel - 1)) + j * stride;
//cout << pos_src << " , " << pos1 << " , " << pos2 << " , " << pos3 << " , " << pos4 << endl;
b = grayData[pos_src];
NI = grayData[pos1] - b;
SI = grayData[pos2] - b;
EI = grayData[pos3] - b;
WI = grayData[pos4] - b;
//cout << b << " , " << NI << " , " << SI << " , " << EI << " , " << WI << endl;
cN = MAP[NI + 255];// opt:exp(-NI*NI / (k * k));
cS = MAP[SI + 255];
cE = MAP[EI + 255];
cW = MAP[WI + 255];
int temp = CLIP3((b + (cN + cS + cE + cW)), 0, 255);
/*cout << temp << endl;
cout << pSrc[0] << endl;*/
pSrc[0] = (int)(CLIP3((b + (cN + cS + cE + cW)), 0, 255));
//cout << pSrc[0] << endl;
pos_src = pos_src + 1;
pos1 = pos1 + 1;
pos2 = pos2 + 1;
pos3 = pos3 + 1;
pos4 = pos4 + 1;
g = grayData[pos_src];
NI = grayData[pos1] - g;
SI = grayData[pos2] - g;
EI = grayData[pos3] - g;
WI = grayData[pos4] - g;
cN = MAP[NI + 255];
cS = MAP[SI + 255];
cE = MAP[EI + 255];
cW = MAP[WI + 255];
pSrc[1] = (int)(CLIP3((g + (cN + cS + cE + cW)), 0, 255));
pos_src = pos_src + 1;
pos1 = pos1 + 1;
pos2 = pos2 + 1;
pos3 = pos3 + 1;
pos4 = pos4 + 1;
r = grayData[pos_src];
NI = grayData[pos1] - r;
SI = grayData[pos2] - r;
EI = grayData[pos3] - r;
WI = grayData[pos4] - r;
cN = MAP[NI + 255];
cS = MAP[SI + 255];
cE = MAP[EI + 255];
cW = MAP[WI + 255];
pSrc[2] = (int)(CLIP3((r + (cN + cS + cE + cW)), 0, 255));
pSrc += channel;
}
}
}
free(grayData);
}
参考:https://blog.csdn.net/trent1985/article/details/80625552
更多推荐
所有评论(0)