TA的每日心情 | 奋斗 2024-4-10 08:31 |
---|
签到天数: 300 天 连续签到: 1 天 [LV.8]以坛为家I
|
本帖最后由 day_day 于 2020-12-7 20:45 编辑
(一)PC端测试
由于之前设备端版本不兼容的原因,因此想要现在PC端测试一下。PC端linux部署测试需要训练服务器版本,还需要额外申请一个序列号。
PC端编译与设备端基本相似,但运行的时候有问题,一直提示找不到库:
$ ./mysrc
./mysrc: error while loading shared libraries: libeasyedge.so.0.5.8: cannot open shared object file: No such file or directory
这是因为编译的时候cmake是指定路径来识别库的,而系统中又不像FZ3设备端直接在系统环境变量的路径内带有相关库,因此要在控制台暴露一下库的路径:
- export LD_LIBRARY_PATH=../../lib/
复制代码
(二)PC端视频流
1-PC端又可以用我最熟悉的Qt了
只需要在pro文件里面加入opencv和百度AI相关的系统库即可:
- LIBS += -L /usr/local/lib/libopencv_*.so
- LIBS += -L ../../lib/lib*.so
复制代码
2-进行一些初始化配置
- global_controller()->set_licence_key("");
- // 1. 配置输出log
- EdgeLogConfig log_config;
- log_config.enable_debug = false;
- global_controller()->set_log_config(log_config);
- // 2. 设置识别配置
- PaddleFluidConfig config;
- // 2.1. 使用CPU预测
- config.use_gpu = false;
- // 2.2. 使用NVIDIA GPU预测;请务必下载GPU版本SDK
- // config.use_gpu = true;
- // config.fraction_of_gpu_memory = 0.2;
- // config.device = 0;
- // config.model_dir = argv[1];
- config.model_dir = "../../../../RES";
- // 3. 加载
- auto predictor = global_controller()->CreateEdgePredictor<PaddleFluidConfig>(config);
- // 4. 初始化
- if (predictor->init() != EDGE_OK)
- {
- exit(-1);
- }
- // 5.读取待识别图片路径
- // std::stringstream img_path;
- //// img_path << argv[2];
- // img_path << "1.jpg";
- // auto img = cv::imread(img_path.str().c_str());
- cv::Mat rgbImageL;
- cv::VideoCapture left_VideoCapture;
- left_VideoCapture.set(cv::CAP_PROP_FRAME_WIDTH,640);
- left_VideoCapture.set(cv::CAP_PROP_FRAME_HEIGHT,480);
- if(!left_VideoCapture.isOpened())
- left_VideoCapture.open(0);
复制代码
3-为了更好地展示,我将至保存为视频流
- cv::Size size = cv::Size(640,480);
- cv::VideoWriter Writer("dect_result2.avi", CV_FOURCC('D', 'I', 'V', 'X'), 1, size, true);
复制代码
4-识别时输出到视频流
- struct timeval tpstart, tpend;
- gettimeofday(&tpstart,NULL);
- while(true)
- {
- cv::Mat img;
- left_VideoCapture >> img;
- if(img.empty()) break;
- // 6.识别
- std::vector<EdgeResultData> result2;
- predictor->infer(img, result2);
- if (result2.empty()) {
- std::cerr << "Result is empty: " << std::endl;
- }
- int top = 0;
- for (auto &v : result2)
- {
- std::cout << v.index << ", " << v.label << ", p:" << v.prob;
- if (predictor->model_kind() == EdgeModelKind::kObjectDetection
- || predictor->model_kind() == EdgeModelKind::kImageSegmentation
- || predictor->model_kind() == EdgeModelKind::kFaceDetection)
- {
- std::cout << " loc: "
- << v.x1 << ", " << v.y1 << ", " << v.x2 << ", " << v.y2;
- auto p1 = cv::Point(static_cast<int>(v.x1 * img.cols),
- static_cast<int>(v.y1 * img.rows));
- auto p2 = cv::Point(static_cast<int>(v.x2 * img.cols),
- static_cast<int>(v.y2 * img.rows));
- if(v.prob>0.8)
- cv::rectangle(img, p1, p2, RED, 2);
- else
- cv::rectangle(img, p1, p2, PINK, 2);
- if (predictor->model_kind() == EdgeModelKind::kFaceDetection) {
- cv::putText(img, std::to_string(v.prob), p1, cv::FONT_HERSHEY_PLAIN, 0.8, GREEN, 1);
- } else {
- cv::putText(img, v.label, p1, cv::FONT_HERSHEY_PLAIN, 1, BLUE, 2);
- }
- }
- // 图像分割画mask
- if (predictor->model_kind() == EdgeModelKind::kImageSegmentation)
- {
- cv::Mat tmp(img.rows, img.cols, CV_8UC3, cv::Scalar(0, 0, 0));
- cv::Vec3b random_color((v.index + 3) * 13 % 255,
- (v.index + 7) * 19 % 255,
- (v.index + 11) * 29 % 255);
- for(int y = 0; y < tmp.rows; ++y)
- {
- for(int x = 0; x < tmp.cols; ++x)
- {
- if (v.mask.at<uint8_t>(y, x) == 1)
- {
- tmp.at<cv::Vec3b>(y, x) = random_color;
- }
- }
- }
- // 图片融合
- cv::addWeighted(tmp, 0.95, img, 1, 0.0, img);
- }
- gettimeofday(&tpend,NULL);
- std::cout << std::endl;
- }
- if (predictor->model_kind() == EdgeModelKind::kObjectDetection
- || predictor->model_kind() == EdgeModelKind::kImageSegmentation
- || predictor->model_kind() == EdgeModelKind::kFaceDetection)
- {
- // img_path << ".result.jpg";
- // std::cout << "Write result to " << img_path.str() << std::endl;
- // cv::imwrite(img_path.str(), img);
- // cv::imwrite("dect_result.jpg", img);
- Writer.write(img);
- cv::imshow("dect result", img);
- if(cv::waitKey(30)=='q') break;
- static int i=0;
- i++;
- if(i>30) break;
- }
- std::cout << "\tuse time:"
- << (tpend.tv_sec-tpstart.tv_sec) * 1000000 + (tpend.tv_usec - tpstart.tv_usec);
- tpstart = tpend;
- std::cout << std::endl;
- }
- Writer.release();
- std::cout << "Done" << std::endl;
复制代码
识别效果:
紫色部分是不足0.8置信度的,超过0.8置信度标为红色,可以看到效果还不错。
在识别图片的时候,PC端和设备端高性能模式下都是将近1s,但实际应用到视频的时候,只有开始的几帧耗时有些波动,其余都在300ms出头。
(三)设备端视频流尝试
设备端一上来就遭遇了滑铁卢:
显示报了一个mmap错误,然后设备干脆利落地死机了。
然后我改成录像再放到FZ3上,让opencv直接读avi视频文件,结果还是不行,依然死机:
估计是opencv的库在移植的时候没有适配解码部分的驱动。
但是转念一想,之前的帖子测试螺丝识别的时候是可以用摄像头的,打开其源码一看,果然没有那么简单:
- int Camera::read_frame(void) {
- struct v4l2_buffer buf;
- unsigned int i = 0;
- CLEAR(buf);
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_MMAP;
- if (-1 == xioctl(fd, VIDIOC_DQBUF, &buf)) {
- switch (errno) {
- case EAGAIN:
- return 0;
- case EIO:
- /* Could ignore EIO, see spec. */
- /* fall through */
- default:
- errno_exit("VIDIOC_DQBUF");
- }
- }
- process_image(buffers[buf.index].start, buf.bytesused);
- if (-1 == xioctl(fd, VIDIOC_QBUF, &buf)) {
- errno_exit("VIDIOC_QBUF");
- }
- return 0;
- }
复制代码
这是使用v4l2来读的,初始化的时候还要初始化mmap,门槛还挺高的。
- void Camera::init_mmap(void) {
- struct v4l2_requestbuffers req;
- CLEAR(req);
- req.count = 3;
- req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- req.memory = V4L2_MEMORY_MMAP;
- if (-1 == xioctl(fd, VIDIOC_REQBUFS, &req)) {
- if (EINVAL == errno) {
- fprintf(stderr, "%s does not support "
- "memory mappingn", dev_name);
- exit(EXIT_FAILURE);
- } else {
- errno_exit("VIDIOC_REQBUFS");
- }
- }
- if (req.count < 2) {
- fprintf(stderr, "Insufficient buffer memory on %s\n",
- dev_name);
- exit(EXIT_FAILURE);
- }
- buffers = (struct buffer*)calloc(req.count, sizeof(*buffers));
- if (!buffers) {
- fprintf(stderr, "Out of memory\\n");
- exit(EXIT_FAILURE);
- }
- for (n_buffers = 0; n_buffers < req.count; ++n_buffers) {
- struct v4l2_buffer buf;
- CLEAR(buf);
- buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
- buf.memory = V4L2_MEMORY_MMAP;
- buf.index = n_buffers;
- if (-1 == xioctl(fd, VIDIOC_QUERYBUF, &buf))
- errno_exit("VIDIOC_QUERYBUF");
- buffers[n_buffers].length = buf.length;
- buffers[n_buffers].start =
- mmap(NULL /* start anywhere */,
- buf.length,
- PROT_READ | PROT_WRITE /* required */,
- MAP_SHARED /* recommended */,
- fd, buf.m.offset);
- if (MAP_FAILED == buffers[n_buffers].start) {
- errno_exit("mmap");
- }
- }
- }
复制代码
。
|
|