作者:SufiaLi | 来源:互联网 | 2023-08-13 10:48
光流是图像亮度的运动信息描述。光流法计算最初是由Horn和Schunck于1981年提出的,创造性地将二维速度场与灰度相联系,引入光流约束方程,得到光流计算的基本算法.光流计算基于物体移动的光学特性提
光流是图像亮度的运动信息描述。光流法计算最初是由Horn和Schunck于1981年提出的,创造性地将二维速度场与灰度相联系,引入光流约束方程,得到光流计算的基本算法.光流计算基于物体移动的光学特性提出了2个假设:
①运动物体的灰度在很短的间隔时间内保持不变;
②给定邻域内的速度向量场变化是缓慢的。
算法原理
假设图像上一个像素点(x,y),在t时刻的亮度为E(x+Δx,y+Δy,t+Δt),同时用u(x,y0和v(x,y)来表示该点光流在水平和垂直方向上的移动分量:
u=dx/dt
v=dy/dt
在经过一段时间间隔Δt后该点对应点亮度为E(x+Δx,y+Δy,t+Δt),当Δt很小趋近于0时,我们可以认为该点亮度不变,所以可以有:
E(x,y,t)=E(x+Δx,y+Δy,t+Δt)
当该点的亮度有变化时,将移动后点的亮度由Taylor公式展幵,可得:
}
void LucasKanadeTracker::discard_pre_frame()
{
for (int i = 0; i < max_pyramid_layer; i++)
delete[]pre_pyr[i];
}
void LucasKanadeTracker::get_pre_frame()
{
for (int i = 0; i < max_pyramid_layer; i++)
pre_pyr[i] = next_pyr[i];
}
void LucasKanadeTracker::get_next_frame(BYTE*&gray)
{
next_pyr[0] = gray;
build_pyramid(next_pyr);
}
void LucasKanadeTracker::pyramid_down(BYTE*&src_gray_data,
const int src_h, const int src_w, BYTE*& dst, int&dst_h, int&dst_w)
{
dst_h = src_h / 2;
dst_w = src_w / 2;
int ii = height[1];
int hh = width[1];
assert(dst_w > 3 && dst_h > 3);
dst = new BYTE[dst_h*dst_w];
for (int i = 0; i < dst_h - 1; i++)
for (int j = 0; j < dst_w - 1; j++)
{
int srcY = 2 * i + 1;
int srcX = 2 * j + 1;
double re = src_gray_data[srcY*src_w + srcX] * 0.25;
re += src_gray_data[(srcY - 1)*src_w + srcX] * 0.125;
re += src_gray_data[(srcY + 1)*src_w + srcX] * 0.125;
re += src_gray_data[srcY*src_w + srcX - 1] * 0.125;
re += src_gray_data[srcY*src_w + srcX + 1] * 0.125;
re += src_gray_data[(srcY - 1)*src_w + srcX + 1] * 0.0625;
re += src_gray_data[(srcY - 1)*src_w + srcX - 1] * 0.0625;
re += src_gray_data[(srcY + 1)*src_w + srcX - 1] * 0.0625;
re += src_gray_data[(srcY + 1)*src_w + srcX + 1] * 0.0625;
dst[i*dst_w + j] = re;
}
for (int i = 0; i < dst_h; i++)
dst[i*dst_w + dst_w - 1] = dst[i*dst_w + dst_w - 2];
for (int i = 0; i < dst_w; i++)
dst[(dst_h - 1)*dst_w + i] = dst[(dst_h - 2)*dst_w + i];
}
double LucasKanadeTracker::get_subpixel(BYTE*&src, int h, int w, const DBPoint& point)
{
int floorX = floor(point.x);
int floorY = floor(point.y);
double fractX = point.x - floorX;
double fractY = point.y - floorY;
return ((1.0 - fractX) * (1.0 - fractY) * src[floorX + w* floorY])
+ (fractX * (1.0 - fractY) * src[floorX + 1 + floorY*w])
+ ((1.0 - fractX) * fractY * src[floorX + (floorY + 1)*w])
+ (fractX * fractY * src[floorX + 1 + (floorY + 1)*w]);
}
void LucasKanadeTracker::get_max_pyramid_layer()
{
int layer = 0;
int windowsize = 2 * window_radius + 1;
int temp = original_imgH > original_imgW ?
original_imgW : original_imgH;
if (temp > ((1 << 4) * 2 * windowsize))
{
max_pyramid_layer = 5;
return;
}
temp = double(temp) / 2;
while (temp > 2 * windowsize)
{
layer++;
temp = double(temp) / 2;
}
max_pyramid_layer = layer;
}
void LucasKanadeTracker::build_pyramid(BYTE**&pyramid)
{
for (int i = 1; i < max_pyramid_layer; i++)
{
pyramid_down(pyramid[i - 1], height[i - 1],
width[i - 1], pyramid[i], height[i], width[i]);
}
}
void LucasKanadeTracker::run_single_frame()
{
char*state = NULL;
lucaskanade(pre_pyr, next_pyr, target, endin, numofpoint, state);
for (int i = 0; i < numofpoint; i++)
{
target[i].x = endin[i].x;
target[i].y = endin[i].y;
}
}
void LucasKanadeTracker::lucaskanade(BYTE**&frame_pre, BYTE**&frame_cur,
DBPoint*& start, DBPoint*& finish, unsigned int point_nums, char*state)
{
double*derivativeXs = new double
[(2 * window_radius + 1)*(2 * window_radius + 1)];
double*derivativeYs = new double
[(2 * window_radius + 1)*(2 * window_radius + 1)];
for (int i = 0; i < point_nums; i++)
{
double g[2] = { 0 };
double finalopticalflow[2] = { 0 };
memset(derivativeXs, 0, sizeof(double)*
(2 * window_radius + 1)*(2 * window_radius + 1));
memset(derivativeYs, 0, sizeof(double)*
(2 * window_radius + 1)*(2 * window_radius + 1));
for (int j = max_pyramid_layer - 1; j >= 0; j--)
{
DBPoint curpoint;
curpoint.x = start[i].x / pow(2.0, j);
curpoint.y = start[i].y / pow(2.0, j);
double Xleft = curpoint.x - window_radius;
double Xright = curpoint.x + window_radius;
double Yleft = curpoint.y - window_radius;
double Yright = curpoint.y + window_radius;
double gradient[4] = { 0 };
int cnt = 0;
for (double xx = Xleft; xx < Xright + 0.01; xx += 1.0)
for (double yy = Yleft; yy < Yright + 0.01; yy += 1.0)
{
assert(xx < 1000 && yy < 1000 && xx >= 0 && yy >= 0);
double derivativeX = get_subpixel(frame_pre[j],
height[j], width[j], DBPoint(xx + 1.0, yy)) -
get_subpixel(frame_pre[j], height[j],
width[j], DBPoint(xx - 1.0, yy));
derivativeX /= 2.0;
double t1 = get_subpixel
(frame_pre[j], height[j], width[j], DBPoint(xx, yy + 1.0));
double t2 = get_subpixel(frame_pre[j], height[j],
width[j], DBPoint(xx, yy - 1.0));
double derivativeY = (t1 - t2) / 2.0;
derivativeXs[cnt] = derivativeX;
derivativeYs[cnt++] = derivativeY;
gradient[0] += derivativeX * derivativeX;
gradient[1] += derivativeX * derivativeY;
gradient[2] += derivativeX * derivativeY;
gradient[3] += derivativeY * derivativeY;
}
double gradient_inv[4] = { 0 };
ContraryMatrix(gradient, gradient_inv, 2);
double opticalflow[2] = { 0 };
int max_iter = 50;
double opticalflow_residual = 1;
int iteration = 0;
while (iteration0.00001)
{
iteration++;
double mismatch[2] = { 0 };
cnt = 0;
for (double xx = Xleft; xx < Xright + 0.001; xx += 1.0)
for (double yy = Yleft; yy < Yright + 0.001; yy += 1.0)
{
assert(xx < 1000 && yy < 1000 && xx >= 0 && yy >= 0);
double nextX = xx + g[0] + opticalflow[0];
double nextY = yy + g[1] + opticalflow[1];
assert(nextX < 1000 && nextY < 1000 && nextX >= 0 && nextY >= 0);
double pixelDifference = (get_subpixel(frame_pre[j],
height[j], width[j], DBPoint(xx, yy))
- get_subpixel(frame_cur[j], height[j],
width[j], DBPoint(nextX, nextY)));
mismatch[0] += pixelDifference*derivativeXs[cnt];
mismatch[1] += pixelDifference*derivativeYs[cnt++];
}
double temp_of[2];
matrixMul(gradient_inv, 2, 2, mismatch, 2, 1, temp_of);
opticalflow[0] += temp_of[0];
opticalflow[1] += temp_of[1];
opticalflow_residual = abs(temp_of[0]) + abs(temp_of[1]);
}
if (j == 0)
{
finalopticalflow[0] = opticalflow[0];
finalopticalflow[1] = opticalflow[1];
}
else
{
g[0] = 2 * (g[0] + opticalflow[0]);
g[1] = 2 * (g[1] + opticalflow[1]);
}
}
finalopticalflow[0] += g[0];
finalopticalflow[1] += g[1];
finish[i].x = start[i].x + finalopticalflow[0];
finish[i].y = start[i].y + finalopticalflow[1];
}
delete[]derivativeXs, derivativeYs;
}
void LucasKanadeTracker::ContraryMatrix(double *pMatrix, double * _pMatrix, int dim)
{
double *tMatrix = new double[2 * dim*dim];
for (int i = 0; i < dim; i++){
for (int j = 0; j < dim; j++)
tMatrix[i*dim * 2 + j] = pMatrix[i*dim + j];
}
for (int i = 0; i < dim; i++){
for (int j = dim; j < dim * 2; j++)
tMatrix[i*dim * 2 + j] = 0.0;
tMatrix[i*dim * 2 + dim + i] = 1.0;
}
for (int i = 0; i < dim; i++)
{
double base = tMatrix[i*dim * 2 + i];
if (fabs(base) < 1E-300){
_ASSERTE(-1);
exit(0);
}
for (int j = 0; j < dim; j++)
{
if (j == i) continue;
double times = tMatrix[j*dim * 2 + i] / base;
for (int k = 0; k < dim * 2; k++)
{
tMatrix[j*dim * 2 + k] = tMatrix[j*dim * 2 + k] - times*tMatrix[i*dim * 2 + k];
}
}
for (int k = 0; k < dim * 2; k++){
tMatrix[i*dim * 2 + k] /= base;
}
}
for (int i = 0; i < dim; i++)
{
for (int j = 0; j < dim; j++)
_pMatrix[i*dim + j] = tMatrix[i*dim * 2 + j + dim];
}
delete[] tMatrix;
}
bool LucasKanadeTracker::matrixMul(double *src1, int height1, int width1, double *src2, int height2, int width2, double *dst)
{
int i, j, k;
double sum = 0;
double *first = src1;
double *second = src2;
double *dest = dst;
int Step1 = width1;
int Step2 = width2;
if (src1 == NULL || src2 == NULL || dest == NULL || height2 != width1)
return false;
for (j = 0; j < height1; j++)
{
for (i = 0; i < width2; i++)
{
sum = 0;
second = src2 + i;
for (k = 0; k < width1; k++)
{
sum += first[k] * (*second);
second += Step2;
}
dest[i] = sum;
}
first += Step1;
dest += Step2;
}
return true;
}
下面是两帧图像间特征点的跟踪
![](https://www.#.com/go/aHR0cDovL2ltZy5ibG9nLmNzZG4ubmV0LzIwMTUxMDIxMjI0MDQxNTE3P3dhdGVybWFyay8yL3RleHQvYUhSMGNEb3ZMMkpzYjJjdVkzTmtiaTV1WlhRdi9mb250LzVhNkw1TDJUL2ZvbnRzaXplLzQwMC9maWxsL0kwSkJRa0ZDTUE9PS9kaXNzb2x2ZS83MC9ncmF2aXR5L0NlbnRlcg==)
参考文献:Pyramidal Implementation of the AÆne Lucas Kanade Feature Tracker Description of the algorithm
An Iterative Image Registration Technique with an Application to Stereo Vision
Detection and Tracking of Point Features
Good Features to Track
Derivation of Kanade-Lucas-Tomasi Tracking Equation
Lucas–Kanade光流算法
Kanade-Lucas-Tomasi(KLT)目标跟踪
光流法与KLT
跟踪算法合集
Opencv学习笔记(九)光流法
OpenCV光流跟踪程序学习