OpenCV  4.10.0
开源计算机视觉
正在加载...
正在搜索...
无匹配项
如何在 Android 设备上运行深度网络

上一教程: 使用 OpenCV 进行 Android 开发
下一教程: 在基于 Android 相机预览的 CV 应用程序中使用 OpenCL

另请参见
深度神经网络 (dnn 模块)
原始作者Dmitry Kurtaev
兼容性OpenCV >= 4.9

简介

在本教程中,您将了解如何使用 OpenCV 深度学习模块在 Android 设备上运行深度学习网络。本教程是针对 Android Studio 2022.2.1 编写的。

要求

创建一个空的 Android Studio 项目并添加 OpenCV 依赖项

使用 使用 OpenCV 进行 Android 开发 教程初始化您的项目并添加 OpenCV。

制作一个应用程序

我们的示例将从相机拍摄图片,将其转发到深度网络,并接收一组矩形、类别标识符和置信度值(范围为 [0, 1])。

  • 首先,我们需要添加一个必要的窗口小部件来显示已处理的帧。修改 app/src/main/res/layout/activity_main.xml
    <?xml version="1.0" encoding="utf-8"?>
    <FrameLayout xmlns:android="http://schemas.android.com/apk/res/android"
    xmlns:app="http://schemas.android.com/apk/res-auto"
    xmlns:tools="http://schemas.android.com/tools"
    android:layout_width="match_parent"
    android:layout_height="match_parent"
    tools:context="org.opencv.samples.opencv_mobilenet.MainActivity">
    <org.opencv.android.JavaCameraView
    android:id="@+id/CameraView"
    android:layout_width="match_parent"
    android:layout_height="match_parent"
    android:visibility="visible" />
    </FrameLayout>
  • 修改 /app/src/main/AndroidManifest.xml 以启用全屏模式,设置正确的屏幕方向并允许使用相机。
    <?xml version="1.0" encoding="utf-8"?>
    <manifest xmlns:android="http://schemas.android.com/apk/res/android">
    <application
    android:label="@string/app_name">
    <activity
    android:exported="true"
    android:name=".MainActivity"
    android:screenOrientation="landscape"> <!--屏幕方向-->
    <intent-filter>
    <action android:name="android.intent.action.MAIN" />
    <category android:name="android.intent.category.LAUNCHER" />
    </intent-filter>
    </activity>
    </application>
    <!--允许使用相机-->
    <uses-permission android:name="android.permission.CAMERA"/>
    <uses-feature android:name="android.hardware.camera" android:required="false"/>
    <uses-feature android:name="android.hardware.camera.autofocus" android:required="false"/>
    <uses-feature android:name="android.hardware.camera.front" android:required="false"/>
    <uses-feature android:name="android.hardware.camera.front.autofocus" android:required="false"/>
    </manifest>
  • 替换 app/src/main/java/com/example/myapplication/MainActivity.java 的内容,并在必要时设置自定义包名称
package com.example.myapplication;
import android.content.Context;
import android.content.res.AssetManager;
import android.os.Bundle;
import android.util.Log;
import android.widget.Toast;
import org.opencv.android.CameraActivity;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.MatOfByte;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.dnn.Net;
import org.opencv.dnn.Dnn;
import org.opencv.imgproc.Imgproc;
import java.io.InputStream;
import java.io.IOException;
import java.util.Collections;
import java.util.List;
public class MainActivity extends CameraActivity implements CvCameraViewListener2 {
@Override
public void onResume() {
super.onResume();
if (mOpenCvCameraView != null)
mOpenCvCameraView.enableView();
}
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
if (OpenCVLoader.initLocal()) {
Log.i(TAG, "OpenCV 加载成功");
} else {
Log.e(TAG, "OpenCV 初始化失败!");
(Toast.makeText(this, "OpenCV 初始化失败!", Toast.LENGTH_LONG)).show();
return;
}
mModelBuffer = loadFileFromResource(R.raw.mobilenet_iter_73000);
mConfigBuffer = loadFileFromResource(R.raw.deploy);
if (mModelBuffer == null || mConfigBuffer == null) {
Log.e(TAG, "无法从资源加载模型");
} else
Log.i(TAG, "模型文件加载成功");
net = Dnn.readNet("caffe", mModelBuffer, mConfigBuffer);
Log.i(TAG, "网络加载成功");
setContentView(R.layout.activity_main);
// 设置相机监听器。
mOpenCvCameraView = (CameraBridgeViewBase)findViewById(R.id.CameraView);
mOpenCvCameraView.setVisibility(CameraBridgeViewBase.VISIBLE);
mOpenCvCameraView.setCvCameraViewListener(this);
}
@Override
public void onPause()
{
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
@Override
protected List<? extends CameraBridgeViewBase> getCameraViewList() {
return Collections.singletonList(mOpenCvCameraView);
}
public void onDestroy() {
super.onDestroy();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
mModelBuffer.release();
mConfigBuffer.release();
}
// 加载网络。
public void onCameraViewStarted(int width, int height) {
}
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
final int IN_WIDTH = 300;
final int IN_HEIGHT = 300;
final float WH_RATIO = (float)IN_WIDTH / IN_HEIGHT;
final double IN_SCALE_FACTOR = 0.007843;
final double MEAN_VAL = 127.5;
final double THRESHOLD = 0.2;
// 获取新帧
Log.d(TAG, "处理新帧!");
Mat frame = inputFrame.rgba();
Imgproc.cvtColor(frame, frame, Imgproc.COLOR_RGBA2RGB);
// 将图像转发到网络。
Mat blob = Dnn.blobFromImage(frame, IN_SCALE_FACTOR,
new Size(IN_WIDTH, IN_HEIGHT),
new Scalar(MEAN_VAL, MEAN_VAL, MEAN_VAL), /*swapRB*/false, /*crop*/false);
net.setInput(blob);
Mat detections = net.forward();
int cols = frame.cols();
int rows = frame.rows();
detections = detections.reshape(1, (int)detections.total() / 7);
for (int i = 0; i < detections.rows(); ++i) {
double confidence = detections.get(i, 2)[0];
if (confidence > THRESHOLD) {
int classId = (int)detections.get(i, 1)[0];
int left = (int)(detections.get(i, 3)[0] * cols);
int top = (int)(detections.get(i, 4)[0] * rows);
int right = (int)(detections.get(i, 5)[0] * cols);
int bottom = (int)(detections.get(i, 6)[0] * rows);
// 在检测到的物体周围绘制矩形。
Imgproc.rectangle(frame, new Point(left, top), new Point(right, bottom),
new Scalar(0, 255, 0));
String label = classNames[classId] + ": " + confidence;
int[] baseLine = new int[1];
Size labelSize = Imgproc.getTextSize(label, Imgproc.FONT_HERSHEY_SIMPLEX, 0.5, 1, baseLine);
// 为标签绘制背景。
Imgproc.rectangle(frame, new Point(left, top - labelSize.height),
new Point(left + labelSize.width, top + baseLine[0]),
new Scalar(255, 255, 255), Imgproc.FILLED);
// 编写类别名称和置信度。
Imgproc.putText(frame, label, new Point(left, top),
Imgproc.FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(0, 0, 0));
}
}
return frame;
}
public void onCameraViewStopped() {}
private MatOfByte loadFileFromResource(int id) {
byte[] buffer;
try {
// 从应用程序资源加载级联文件
InputStream is = getResources().openRawResource(id);
int size = is.available();
buffer = new byte[size];
int bytesRead = is.read(buffer);
is.close();
} catch (IOException e) {
e.printStackTrace();
Log.e(TAG, "Failed to ONNX model from resources! Exception thrown: " + e);
(Toast.makeText(this, "Failed to ONNX model from resources!", Toast.LENGTH_LONG)).show();
return null;
}
return new MatOfByte(buffer);
}
private static final String TAG = "OpenCV-MobileNet";
private static final String[] classNames = {"背景",
"飞机", "自行车", "鸟", "船",
"瓶子", "巴士", "汽车", "猫", "椅子",
"牛", "餐桌", "狗", "马",
"摩托车", "人", "盆栽",
"羊", "沙发", "火车", "电视机"};
private MatOfByte mConfigBuffer;
private MatOfByte mModelBuffer;
private Net net;
private CameraBridgeViewBase mOpenCvCameraView;
}
Point2i Point
定义 types.hpp:209
std::string String
定义 cvstd.hpp:151
Size2i Size
定义 types.hpp:370
Scalar_< double > Scalar
定义 types.hpp:702
GOpaque< Size > size(const GMat &src)
从 Mat 获取维度。
  • 将下载的 deploy.prototxtmobilenet_iter_73000.caffemodel 放入 app/src/main/res/raw 文件夹中。OpenCV DNN 模型主要设计用于从文件加载 ML 和 DNN 模型。现代 Android 无法在没有额外权限的情况下执行此操作,但提供 Java API 来从资源加载字节。该示例使用替代 DNN API,该 API 从内存缓冲区而不是文件初始化模型。以下函数从资源读取模型文件并将其转换为 MatOfBytes(类似于 C++ 世界中的 std::vector<char>)对象,适合 OpenCV Java API
private MatOfByte loadFileFromResource(int id) {
byte[] buffer;
try {
// 从应用程序资源加载级联文件
InputStream is = getResources().openRawResource(id);
int size = is.available();
buffer = new byte[size];
int bytesRead = is.read(buffer);
is.close();
} catch (IOException e) {
e.printStackTrace();
Log.e(TAG, "Failed to ONNX model from resources! Exception thrown: " + e);
(Toast.makeText(this, "Failed to ONNX model from resources!", Toast.LENGTH_LONG)).show();
return null;
}
return new MatOfByte(buffer);
}

然后,使用以下行进行网络初始化

mModelBuffer = loadFileFromResource(R.raw.mobilenet_iter_73000);
mConfigBuffer = loadFileFromResource(R.raw.deploy);
if (mModelBuffer == null || mConfigBuffer == null) {
Log.e(TAG, "无法从资源加载模型");
} else
Log.i(TAG, "模型文件加载成功");
net = Dnn.readNet("caffe", mModelBuffer, mConfigBuffer);
Log.i(TAG, "网络加载成功");

另请参见 Android 资源文档

  • 了解 DNN 模型输入是如何准备的以及推理结果是如何解释的
Mat blob = Dnn.blobFromImage(frame, IN_SCALE_FACTOR,
new Size(IN_WIDTH, IN_HEIGHT),
new Scalar(MEAN_VAL, MEAN_VAL, MEAN_VAL), /*swapRB*/false, /*crop*/false);
net.setInput(blob);
Mat detections = net.forward();
int cols = frame.cols();
int rows = frame.rows();
detections = detections.reshape(1, (int)detections.total() / 7);
for (int i = 0; i < detections.rows(); ++i) {
double confidence = detections.get(i, 2)[0];
if (confidence > THRESHOLD) {
int classId = (int)detections.get(i, 1)[0];
int left = (int)(detections.get(i, 3)[0] * cols);
int top = (int)(detections.get(i, 4)[0] * rows);
int right = (int)(detections.get(i, 5)[0] * cols);
int bottom = (int)(detections.get(i, 6)[0] * rows);
// 在检测到的物体周围绘制矩形。
Imgproc.rectangle(frame, new Point(left, top), new Point(right, bottom),
new Scalar(0, 255, 0));
String label = classNames[classId] + ": " + confidence;
int[] baseLine = new int[1];
Size labelSize = Imgproc.getTextSize(label, Imgproc.FONT_HERSHEY_SIMPLEX, 0.5, 1, baseLine);
// 为标签绘制背景。
Imgproc.rectangle(frame, new Point(left, top - labelSize.height),
new Point(left + labelSize.width, top + baseLine[0]),
new Scalar(255, 255, 255), Imgproc.FILLED);
// 编写类别名称和置信度。
Imgproc.putText(frame, label, new Point(left, top),
Imgproc.FONT_HERSHEY_SIMPLEX, 0.5, new Scalar(0, 0, 0));
}
}

Dnn.blobFromImage 将相机帧转换为神经网络输入张量。应用了调整大小和统计归一化。网络输出张量的每一行都包含有关检测到的一个对象的以下信息:置信度范围 [0, 1]、类别 ID、左、上、右、下边界框坐标。所有坐标都在 [0, 1] 范围内,应在渲染之前缩放到图像大小。

  • 启动应用程序并玩得开心!