解决方法的连接https://github.com/opencv/opencv/issues/4704
https://stackoverflow.com/questions/16669779/opencv-camera-orientation-issue
老外也遇到了同样的问题http://answers.opencv.org/question/20325/how-can-i-change-orientation-without-ruin-camera-settings/但是还没有人给出正确的解决办法
经过我的测试有两个方法的表现还不错
需要改opencv的库文件
CameraBridgeViewBase.java 中的deliverAndDrawFrame(CVCameraViewFrame frame)方法全部替换成
protected void deliverAndDrawFrame(CvCameraViewFrame frame) {
Mat modified;
if (mListener != null) {
modified = mListener.onCameraFrame(frame);
} else {
modified = frame.rgba();
}
boolean bmpValid = true;
if (modified != null) {
try {
Utils.matToBitmap(modified, mCacheBitmap);
} catch(Exception e) {
Log.e(TAG, "Mat type: " + modified);
Log.e(TAG, "Bitmap type: " + mCacheBitmap.getWidth() + "*" + mCacheBitmap.getHeight());
Log.e(TAG, "Utils.matToBitmap() throws an exception: " + e.getMessage());
bmpValid = false;
}
}
mFpsMeter.measure();
}
JavaCameraView.java 中的initializeCamera(int width, int height)方法,并且需要增加两个函数private void setDisplayOrientation(Camera camera, int angle)和private String getOrientation(),通过mCamera.setPreviewDisplay(getHolder());来实现竖屏全屏显示
protected boolean initializeCamera(int width, int height) {
Log.d(TAG, "Initialize java camera");
boolean result = true;
synchronized (this) {
mCamera = null;
if (mCameraIndex == CAMERA_ID_ANY) {
Log.d(TAG, "Trying to open camera with old open()");
try {
mCamera = Camera.open();
}
catch (Exception e){
Log.e(TAG, "Camera is not available (in use or does not exist): " + e.getLocalizedMessage());
}
if(mCamera == null && Build.VERSION.SDK_INT >= Build.VERSION_CODES.GINGERBREAD) {
boolean cOnnected= false;
for (int camIdx = 0; camIdx
try {
mCamera = Camera.open(camIdx);
cOnnected= true;
} catch (RuntimeException e) {
Log.e(TAG, "Camera #" + camIdx + "failed to open: " + e.getLocalizedMessage());
}
if (connected) break;
}
}
} else {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.GINGERBREAD) {
int localCameraIndex = mCameraIndex;
if (mCameraIndex == CAMERA_ID_BACK) {
Log.i(TAG, "Trying to open back camera");
Camera.CameraInfo cameraInfo = new Camera.CameraInfo();
for (int camIdx = 0; camIdx
if (cameraInfo.facing == Camera.CameraInfo.CAMERA_FACING_BACK) {
localCameraIndex = camIdx;
break;
}
}
} else if (mCameraIndex == CAMERA_ID_FRONT) {
Log.i(TAG, "Trying to open front camera");
Camera.CameraInfo cameraInfo = new Camera.CameraInfo();
for (int camIdx = 0; camIdx
if (cameraInfo.facing == Camera.CameraInfo.CAMERA_FACING_FRONT) {
localCameraIndex = camIdx;
break;
}
}
}
if (localCameraIndex == CAMERA_ID_BACK) {
Log.e(TAG, "Back camera not found!");
} else if (localCameraIndex == CAMERA_ID_FRONT) {
Log.e(TAG, "Front camera not found!");
} else {
Log.d(TAG, "Trying to open camera with new open(" + Integer.valueOf(localCameraIndex) + ")");
try {
mCamera = Camera.open(localCameraIndex);
} catch (RuntimeException e) {
Log.e(TAG, "Camera #" + localCameraIndex + "failed to open: " + e.getLocalizedMessage());
}
}
}
}
if (mCamera == null)
return false;
/* Now set camera parameters */
try {
Camera.Parameters params = mCamera.getParameters();
Log.d(TAG, "getSupportedPreviewSizes()");
List
if (sizes != null) {
/* Image format NV21 causes issues in the Android emulators */
if (Build.FINGERPRINT.startsWith("generic")
|| Build.FINGERPRINT.startsWith("unknown")
|| Build.MODEL.contains("google_sdk")
|| Build.MODEL.contains("Emulator")
|| Build.MODEL.contains("Android SDK built for x86")
|| Build.MANUFACTURER.contains("Genymotion")
|| (Build.BRAND.startsWith("generic") && Build.DEVICE.startsWith("generic"))
|| "google_sdk".equals(Build.PRODUCT))
params.setPreviewFormat(ImageFormat.YV12); // "generic" or "android" = android emulator
else
params.setPreviewFormat(ImageFormat.NV21);
mPreviewFormat = params.getPreviewFormat();
//从这里开始不同
if (!Build.MODEL.equals("GT-I9100")) params.setRecordingHint(true);
params.setPreviewSize(1920, 1080);
mCamera.setParameters(params);
mFrameWidth = 1920;
mFrameHeight = 1080;
if (mFpsMeter != null) {
mFpsMeter.setResolution(mFrameWidth, mFrameHeight);
}
int size = mFrameWidth * mFrameHeight;
size = size * ImageFormat.getBitsPerPixel(params.getPreviewFormat()) / 8;
mBuffer = new byte[size];
mCamera.addCallbackBuffer(mBuffer);
mCamera.setPreviewCallbackWithBuffer(this);
mFrameChain = new Mat[2];
mFrameChain[0] = new Mat(mFrameHeight + (mFrameHeight/2), mFrameWidth, CvType.CV_8UC1);
mFrameChain[1] = new Mat(mFrameHeight + (mFrameHeight/2), mFrameWidth, CvType.CV_8UC1);
AllocateCache();
mCameraFrame = new JavaCameraFrame[2];
mCameraFrame[0] = new JavaCameraFrame(mFrameChain[0], mFrameWidth, mFrameHeight);
mCameraFrame[1] = new JavaCameraFrame(mFrameChain[1], mFrameWidth, mFrameHeight);
//different
mSurfaceTexture = new SurfaceTexture(MAGIC_TEXTURE_ID);
mCamera.setPreviewTexture(mSurfaceTexture);
//主要修改
if (getOrientation().equals("portrait")) {
setDisplayOrientation(mCamera, 90);
} else if (getOrientation().equals("reverse landscape")){
setDisplayOrientation(mCamera, 180);
} else if (getOrientation().equals("reverse portrait")) {
setDisplayOrientation(mCamera, 270);
}
mCamera.setPreviewDisplay(getHolder());
//end
mCamera.startPreview();
}
else
result = false;
} catch (Exception e) {
result = false;
e.printStackTrace();
}
}
return result;
}
//add two function
private void setDisplayOrientation(Camera camera, int angle){
Method downPolymorphic;
try {
downPolymorphic = camera.getClass().getMethod("setDisplayOrientation", int.class);
if (downPolymorphic != null) {
downPolymorphic.invoke(camera, angle);
}
}
catch (Exception e) {
e.printStackTrace();
}
}
private String getOrientation(){
int orientation = Surface.ROTATION_0;
WindowManager wm = (WindowManager) getContext().getSystemService(Context.WINDOW_SERVICE);
if (wm != null) {
Display display = wm.getDefaultDisplay();
orientation = display.getOrientation();
}
if (orientation == Surface.ROTATION_0) {
return "portrait";
}else if (orientation == Surface.ROTATION_90) {
return "landscape";
} else if (orientation == Surface.ROTATION_180) {
return "reverse portrait";
} else return "reverse landscape";
}
//end
该方法显示的十分完美,但是存在一个无法接受的缺点
我们的onCameraFrame()方法失效了,因为我们调用了mCamera.setPreviewDisplay(getHolder());这个方法导致opencv对相机每一帧的处理无法显示出来。。。但是具体怎么解决???我也不知道,期望能有大神把解决方法分享出来23333
该方法只需要修改opencv的一个库文件CameraBridgeViewBase.java,相机预览的显示效果还不错,但是还是还是有问题.。。。2333。。。经我测试发现横屏时存在显示图像放大的问题,但是竖屏倒是蛮完美的,就是fps会比较显著的降低,但还在能接受的范围内。
protected void deliverAndDrawFrame(CvCameraViewFrame frame) {
Mat modified;
if (mListener != null) {
modified = mListener.onCameraFrame(frame);
} else {
modified = frame.rgba();
}
boolean bmpValid = true;
if (modified != null) {
try {
Utils.matToBitmap(modified, mCacheBitmap);
} catch(Exception e) {
Log.e(TAG, "Mat type: " + modified);
Log.e(TAG, "Bitmap type: " + mCacheBitmap.getWidth() + "*" + mCacheBitmap.getHeight());
Log.e(TAG, "Utils.matToBitmap() throws an exception: " + e.getMessage());
bmpValid = false;
}
}
if (bmpValid && mCacheBitmap != null) {
Canvas canvas = getHolder().lockCanvas();
if (canvas != null) {
canvas.drawColor(0, android.graphics.PorterDuff.Mode.CLEAR);
/*
//原来的方法
if (BuildConfig.DEBUG)
Log.d(TAG, "mStretch value: " + mScale);
if (mScale != 0) {
canvas.drawBitmap(mCacheBitmap, new Rect(0,0,mCacheBitmap.getWidth(), mCacheBitmap.getHeight()),
new Rect((int)((canvas.getWidth() - mScale*mCacheBitmap.getWidth()) / 2),
(int)((canvas.getHeight() - mScale*mCacheBitmap.getHeight()) / 2),
(int)((canvas.getWidth() - mScale*mCacheBitmap.getWidth()) / 2 + mScale*mCacheBitmap.getWidth()),
(int)((canvas.getHeight() - mScale*mCacheBitmap.getHeight()) / 2 + mScale*mCacheBitmap.getHeight())), null);
} else {
canvas.drawBitmap(mCacheBitmap, new Rect(0,0,mCacheBitmap.getWidth(), mCacheBitmap.getHeight()),
new Rect((canvas.getWidth() - mCacheBitmap.getWidth()) / 2,
(canvas.getHeight() - mCacheBitmap.getHeight()) / 2,
(canvas.getWidth() - mCacheBitmap.getWidth()) / 2 + mCacheBitmap.getWidth(),
(canvas.getHeight() - mCacheBitmap.getHeight()) / 2 + mCacheBitmap.getHeight()), null);
}
*///method4
Matrix matrix = new Matrix(); // I rotate it with minimal process
//matrix.preTranslate((canvas.getWidth() - mCacheBitmap.getWidth()) / 2,(canvas.getHeight() - mCacheBitmap.getHeight()) / 2);
//matrix.postRotate(90f,(canvas.getWidth()) / 2,(canvas.getHeight()) / 2);
//float scale = (float) canvas.getWidth() / (float) mCacheBitmap.getHeight();
//matrix.postScale(scale, scale, canvas.getWidth()/2 , canvas.getHeight()/2 );
//canvas.drawBitmap(mCacheBitmap, matrix, new Paint());
//end
if (getDisplay().getRotation() == Surface.ROTATION_0) {
matrix.preTranslate((canvas.getWidth() - mCacheBitmap.getWidth()) / 2,(canvas.getHeight() - mCacheBitmap.getHeight()) / 2);
matrix.postRotate(90f,(canvas.getWidth()) / 2,(canvas.getHeight()) / 2);
float scale = (float) canvas.getWidth() / (float) mCacheBitmap.getHeight();
matrix.postScale(scale, scale, canvas.getWidth()/2 , canvas.getHeight()/2 );
canvas.drawBitmap(mCacheBitmap, matrix, new Paint());
} else if (getDisplay().getRotation() == Surface.ROTATION_90) {
float scale = (float) canvas.getWidth() / (float) mCacheBitmap.getHeight();
matrix.postScale(scale, scale, canvas.getWidth()/2 , canvas.getHeight()/2 );
canvas.drawBitmap(mCacheBitmap, matrix, new Paint());
} else if (getDisplay().getRotation() == Surface.ROTATION_180) {
matrix.preTranslate((canvas.getWidth() - mCacheBitmap.getWidth()) / 2,(canvas.getHeight() - mCacheBitmap.getHeight()) / 2);
matrix.postRotate(270f,(canvas.getWidth()) / 2,(canvas.getHeight()) / 2);
float scale = (float) canvas.getWidth() / (float) mCacheBitmap.getHeight();
matrix.postScale(scale, scale, canvas.getWidth()/2 , canvas.getHeight()/2 );
canvas.drawBitmap(mCacheBitmap, matrix, new Paint());
} else if (getDisplay().getRotation() == Surface.ROTATION_270) {
matrix.postRotate(180f,(canvas.getWidth()) / 2,(canvas.getHeight()) / 2);
float scale = (float) canvas.getWidth() / (float) mCacheBitmap.getHeight();
matrix.postScale(scale, scale, canvas.getWidth()/2 , canvas.getHeight()/2 );
canvas.drawBitmap(mCacheBitmap, matrix, new Paint());
}
if (mFpsMeter != null) {
mFpsMeter.measure();
mFpsMeter.draw(canvas, 20, 30);
}
getHolder().unlockCanvasAndPost(canvas);
}
}
}
其实主要的修改只是
在protected void deliverAndDrawFrame(CvCameraViewFrame frame) 函数里
if (bmpValid && mCacheBitmap != null) {
Canvas canvas = getHolder().lockCanvas();
if (canvas != null) {
canvas.drawColor(0, android.graphics.PorterDuff.Mode.CLEAR);
函数段后面把原方法替换成
Matrix matrix = new Matrix(); // I rotate it with minimal process
matrix.preTranslate((canvas.getWidth() - mCacheBitmap.getWidth()) / 2,(canvas.getHeight() - mCacheBitmap.getHeight()) / 2);
matrix.postRotate(90f,(canvas.getWidth()) / 2,(canvas.getHeight()) / 2);
float scale = (float) canvas.getWidth() / (float) mCacheBitmap.getHeight();
matrix.postScale(scale, scale, canvas.getWidth()/2 , canvas.getHeight()/2 );
canvas.drawBitmap(mCacheBitmap, matrix, new Paint());
即可
作为个强迫症患者,我增加了个判断语句来实现横竖屏都能正常的显示。但是就是这个原因发现了这个方法的bug,横屏时显示的图像是放大的。。。2333
看来我还需要再研究下,看看能不能修复横屏会放大的bug
但是要是在AndroidManifest.xml里锁定竖屏的话
就不出现横屏时的bug了,但是该方法fps降低的很明显,不过显示效果还是在可以接受的范围内的。。。
方法二效果图
不过仍然还是只有在把屏幕横着时人脸检测才能得到较好的结果,竖屏时虽然也能检测到,但是很不稳定23333
总的来说,我还是决定采用方法二的方法,来实现我下一步的目标。ヾ(o・ω・)ノ
留给我的时间不多了╮(╯﹏╰)╭
作为强迫症患者,睡觉时想到了一个解决竖屏时不能人脸检测的方法,因为opencv要在横屏时才能得到较好的结果,那么我可以先把竖屏时得到的图像顺时针旋转90度,这样就和横屏时一样了,然后我在把得到识别绿框的图像逆时针旋转90度,再输出这样就能做到竖屏时实现人脸检测了。
所以我将MainActicity.java中的onCameraViewStarted和onCameraFrame()函数修改如下
@Override
public void onCameraViewStarted(int width, int height){
rgbaImage = new Mat(width, height, CvType.CV_8UC4);
grayscaleImage = new Mat(height, width, CvType.CV_8UC4);
Matlin = new Mat(width, height, CvType.CV_8UC4);
gMatlin = new Mat(width, height, CvType.CV_8UC4);
absoluteFaceSize = (int)(height * 0.2);
}
@Override
public void onCameraViewStopped(){
}
@RequiresApi(api = Build.VERSION_CODES.JELLY_BEAN_MR1)
@Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame InputFrame) {
grayscaleImage = InputFrame.gray();
rgbaImage = InputFrame.rgba();
int rotation = openCvCameraView.getDisplay().getRotation();
//使前置的图像也是正的
if (camera_scene == CAMERA_FRONT) {
Core.flip(rgbaImage, rgbaImage, 1);
Core.flip(grayscaleImage, grayscaleImage, 1);
}
//MatOfRect faces = new MatOfRect();
if (rotation == Surface.ROTATION_0) {
MatOfRect faces = new MatOfRect();
Core.rotate(grayscaleImage, gMatlin, Core.ROTATE_90_CLOCKWISE);
Core.rotate(rgbaImage, Matlin, Core.ROTATE_90_CLOCKWISE);
if (cascadeClassifier != null) {
cascadeClassifier.detectMultiScale(gMatlin, faces, 1.1, 2, 2, new Size(absoluteFaceSize, absoluteFaceSize), new Size());
}
Rect[] faceArray = faces.toArray();
for (int i = 0; i
Core.rotate(Matlin, rgbaImage, Core.ROTATE_90_COUNTERCLOCKWISE);
} else {
MatOfRect faces = new MatOfRect();
if (cascadeClassifier != null) {
cascadeClassifier.detectMultiScale(grayscaleImage, faces, 1.1, 2, 2, new Size(absoluteFaceSize, absoluteFaceSize), new Size());
}
Rect[] faceArray = faces.toArray();
for (int i = 0; i
}
return rgbaImage;
}
最后,经过我的修改显示效果如下:
竖屏后置
竖屏前置
识别效果还不错,就是侧脸的话,识别不出来。还有一个问题就是现在横屏时无法检测人脸了,我想可能是因为我锁定为了竖屏模式,导致int rotation = openCvCameraView.getDisplay().getRotation()放回的一致都是Surface.ROTATION_0使得我的其他方法不能实现。
为此,我不锁定竖屏,对AndroidManifest.xml修改
修改MainActivity.java,加入完整的if判断语句,我试过使用switch来判断但是fps会降的很低,有很明显的卡顿感
MatOfRect faces = new MatOfRect();
if (rotation == Surface.ROTATION_0) {
Core.rotate(grayscaleImage, gMatlin, Core.ROTATE_90_CLOCKWISE);
Core.rotate(rgbaImage, Matlin, Core.ROTATE_90_CLOCKWISE);
if (cascadeClassifier != null) {
cascadeClassifier.detectMultiScale(gMatlin, faces, 1.1, 2, 2, new Size(absoluteFaceSize, absoluteFaceSize), new Size());
}
Rect[] faceArray = faces.toArray();
for (int i = 0; i
Core.rotate(Matlin, rgbaImage, Core.ROTATE_90_COUNTERCLOCKWISE);
} else if (rotation == Surface.ROTATION_90) {
if (cascadeClassifier != null) {
cascadeClassifier.detectMultiScale(grayscaleImage, faces, 1.1, 2, 2, new Size(absoluteFaceSize, absoluteFaceSize), new Size());
}
Rect[] faceArray = faces.toArray();
for (int i = 0; i
} else if (rotation == Surface.ROTATION_180) {
Core.rotate(grayscaleImage, gMatlin, Core.ROTATE_90_COUNTERCLOCKWISE);
Core.rotate(rgbaImage, Matlin, Core.ROTATE_90_COUNTERCLOCKWISE);
if (cascadeClassifier != null) {
cascadeClassifier.detectMultiScale(gMatlin, faces, 1.1, 2, 2, new Size(absoluteFaceSize, absoluteFaceSize), new Size());
}
Rect[] faceArray = faces.toArray();
for (int i = 0; i
Core.rotate(Matlin, rgbaImage, Core.ROTATE_90_CLOCKWISE);
} else if (rotation == Surface.ROTATION_270) {
Core.rotate(grayscaleImage, gMatlin, Core.ROTATE_180);
Core.rotate(rgbaImage, Matlin, Core.ROTATE_180);
if (cascadeClassifier != null) {
cascadeClassifier.detectMultiScale(gMatlin, faces, 1.1, 2, 2, new Size(absoluteFaceSize, absoluteFaceSize), new Size());
}
Rect[] faceArray = faces.toArray();
for (int i = 0; i
Core.rotate(Matlin, rgbaImage, Core.ROTATE_180);
}
在CameraBridgeViewBase.java里的deliverAndDrawFrame()
if (bmpValid && mCacheBitmap != null) {
Canvas canvas = getHolder().lockCanvas();
if (canvas != null) {
canvas.drawColor(0, android.graphics.PorterDuff.Mode.CLEAR);
后面的片段修改为
Matrix matrix = new Matrix(); // I rotate it with minimal process
float portraitscale = (float) canvas.getWidth() / (float) mCacheBitmap.getHeight();
float landscapscale = 1f;
if (getDisplay().getRotation() == Surface.ROTATION_0) {
matrix.preTranslate((canvas.getWidth() - mCacheBitmap.getWidth()) / 2,(canvas.getHeight() - mCacheBitmap.getHeight()) / 2);
matrix.postRotate(90f,(canvas.getWidth()) / 2,(canvas.getHeight()) / 2);
//float scale = (float) canvas.getWidth() / (float) mCacheBitmap.getHeight();
//matrix.postScale(scale, scale, canvas.getWidth()/2 , canvas.getHeight()/2 );
matrix.postScale(portraitscale, portraitscale, canvas.getWidth()/2 , canvas.getHeight()/2 );
canvas.drawBitmap(mCacheBitmap, matrix, new Paint());
} else if (getDisplay().getRotation() == Surface.ROTATION_90) {
matrix.preTranslate((canvas.getWidth() - mCacheBitmap.getWidth()) / 2,(canvas.getHeight() - mCacheBitmap.getHeight()) / 2);
//float scale = 1f;
//matrix.postScale(scale, scale, canvas.getWidth()/2 , canvas.getHeight()/2 );
matrix.postScale(landscapscale, landscapscale, canvas.getWidth()/2 , canvas.getHeight()/2 );
canvas.drawBitmap(mCacheBitmap, matrix, new Paint());
} else if (getDisplay().getRotation() == Surface.ROTATION_180) {
matrix.preTranslate((canvas.getWidth() - mCacheBitmap.getWidth()) / 2,(canvas.getHeight() - mCacheBitmap.getHeight()) / 2);
matrix.postRotate(270f,(canvas.getWidth()) / 2,(canvas.getHeight()) / 2);
//float scale = (float) canvas.getWidth() / (float) mCacheBitmap.getHeight();
//matrix.postScale(scale, scale, canvas.getWidth()/2 , canvas.getHeight()/2 );
matrix.postScale(portraitscale, portraitscale, canvas.getWidth()/2 , canvas.getHeight()/2 );
canvas.drawBitmap(mCacheBitmap, matrix, new Paint());
} else if (getDisplay().getRotation() == Surface.ROTATION_270) {
matrix.preTranslate((canvas.getWidth() - mCacheBitmap.getWidth()) / 2,(canvas.getHeight() - mCacheBitmap.getHeight()) / 2);
matrix.postRotate(180f,(canvas.getWidth()) / 2,(canvas.getHeight()) / 2);
//float scale = 1f;
//matrix.postScale(scale, scale, canvas.getWidth()/2 , canvas.getHeight()/2 );
matrix.postScale(landscapscale, landscapscale, canvas.getWidth()/2 , canvas.getHeight()/2 );
canvas.drawBitmap(mCacheBitmap, matrix, new Paint());
}
将横屏时的float scale 设置为1就能解决前面出现的横屏放大的比例不对的问题,该段我也尝试过换成switch语句但是同样会有很卡顿的感觉,原因不明,可能这个是编译switch时自身的问题???
做完上述修改后,横竖屏就都能进行人脸检测了,但是当旋转270度时,即倒着横屏时会有很明显的卡顿感。。。
不管了,反正一般锁定竖屏模式使用就好了。。。
剩下的工作就是提高检测的准确率,实现侧脸检测和人眼检测了