Commit 1658f830 张士柳

1 个父辈 01b04fdd
......@@ -785,6 +785,12 @@ namespace eyemLib_Sharp
private static extern int setSkipProcessID(int pid);
#endregion
#region 测试接口
//测试接口
[DllImport("eyemLib.dll", CharSet = CharSet.None, CallingConvention = CallingConvention.Cdecl)]
private static extern int eyemAllMindImpl(EyemImage tpImage);
#endregion
#region 日志功能
// 日志回调
public delegate void TCallBack(string msg);
......@@ -856,6 +862,8 @@ namespace eyemLib_Sharp
//flag = eyemImageAbs(image1, ref tpDstImg);
//flag = eyemAllMindImpl(image);
#region Test Blob
//int ipNum;
......
#include "align.h"
namespace wechat_qrcode {
Align::Align() { rotate90_ = false; }
cv::Mat Align::calcWarpMatrix(const cv::Mat src, const cv::Mat dst) {
M_ = getPerspectiveTransform(src, dst);
M_inv_ = M_.inv();
return M_;
}
std::vector<cv::Point2f> Align::warpBack(const std::vector<cv::Point2f> &dst_pts) {
std::vector<cv::Point2f> src_pts;
for (size_t j = 0; j < dst_pts.size(); j++) {
auto src_x = (rotate90_ ? dst_pts[j].y : dst_pts[j].x) + crop_x_;
auto src_y = (rotate90_ ? dst_pts[j].x : dst_pts[j].y) + crop_y_;
src_pts.push_back(cv::Point2f(src_x, src_y));
}
return src_pts;
}
cv::Mat Align::crop(const cv::Mat &inputImg, const int width, const int height) {
cv::Mat warp_dst = cv::Mat::zeros(height, width, inputImg.type());
warpPerspective(inputImg, warp_dst, M_, warp_dst.size(), cv::INTER_LINEAR, cv::BORDER_CONSTANT, 255);
return warp_dst;
}
cv::Mat Align::crop(const cv::Mat &inputImg, const cv::Mat &srcPts, const float paddingW, const float paddingH,
const int minPadding) {
int x0 = srcPts.at<float>(0, 0);
int y0 = srcPts.at<float>(0, 1);
int x2 = srcPts.at<float>(2, 0);
int y2 = srcPts.at<float>(2, 1);
int width = x2 - x0 + 1;
int height = y2 - y0 + 1;
int padx = cv::max(paddingW * width, static_cast<float>(minPadding));
int pady = cv::max(paddingH * height, static_cast<float>(minPadding));
crop_x_ = cv::max(x0 - padx, 0);
crop_y_ = cv::max(y0 - pady, 0);
int end_x = cv::min(x2 + padx, inputImg.cols - 1);
int end_y = cv::min(y2 + pady, inputImg.rows - 1);
cv::Rect crop_roi(crop_x_, crop_y_, end_x - crop_x_ + 1, end_y - crop_y_ + 1);
cv::Mat dst = inputImg(crop_roi).clone();
if (rotate90_) dst = dst.t(); // transpose
return dst;
}
} // namespace wechat_qrcode
\ No newline at end of file
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Tencent is pleased to support the open source community by making WeChat QRCode available.
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
#ifndef __DETECTOR_ALIGN_HPP_
#define __DETECTOR_ALIGN_HPP_
#include <stdio.h>
#include <fstream>
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
namespace wechat_qrcode {
class Align {
public:
Align();
cv::Mat calcWarpMatrix(const cv::Mat src, const cv::Mat dst);
std::vector<cv::Point2f> warpBack(const std::vector<cv::Point2f> &dst_pts);
cv::Mat crop(const cv::Mat &inputImg, const cv::Mat &srcPts, const float paddingW, const float paddingH,
const int minPadding);
void setRotate90(bool v) { rotate90_ = v; }
private:
cv::Mat crop(const cv::Mat &inputImg, const int width, const int height);
cv::Mat M_;
cv::Mat M_inv_;
int crop_x_;
int crop_y_;
bool rotate90_;
};
} // namespace wechat_qrcode
#endif // __DETECTOR_ALIGN_HPP_
\ No newline at end of file
......@@ -859,7 +859,7 @@ extern "C" {
EXPORTS int eyemReleaseModel(IntPtr &hModelID);
EXPORTS int eyemTrackFeature(EyemImage tpPrevImg, EyemImage tpNextImg, EyemRect3 *tpRois, int iRoiNum, int *ipResults, EyemImage *tpDstImg);
EXPORTS int eyemAOIForTSAV(EyemImage tpRefImg, EyemImage tpNextImg, EyemRect3 *tpRois, int iRoiNum);
EXPORTS int eyemAllMethodTest(EyemImage tpImage);
EXPORTS int eyemAllMindImpl(EyemImage tpImage);
#ifdef __cplusplus
}
......
此文件类型无法预览
......@@ -163,6 +163,7 @@
</Link>
</ItemDefinitionGroup>
<ItemGroup>
<ClInclude Include="align.h" />
<ClInclude Include="eyemBarCode.h" />
<ClInclude Include="eyemBin.h" />
<ClInclude Include="eyemCalib.h" />
......@@ -177,8 +178,12 @@
<ClInclude Include="eyemMisc.h" />
<ClInclude Include="eyemSmooth.h" />
<ClInclude Include="resource.h" />
<ClInclude Include="ssd_detector.h" />
<ClInclude Include="super_scale.h" />
<ClInclude Include="wechat_qrcode.h" />
</ItemGroup>
<ItemGroup>
<ClCompile Include="align.cpp" />
<ClCompile Include="eyemBarCode.cpp" />
<ClCompile Include="eyemBin.cpp" />
<ClCompile Include="eyemCalib.cpp" />
......@@ -196,6 +201,9 @@
<ClCompile Include="eyemMisc.cpp" />
<ClCompile Include="eyemSmooth.cpp" />
<ClCompile Include="libopencv.cpp" />
<ClCompile Include="ssd_detector.cpp" />
<ClCompile Include="super_scale.cpp" />
<ClCompile Include="wechat_qrcode.cpp" />
</ItemGroup>
<ItemGroup>
<ResourceCompile Include="eyemLib.rc" />
......
......@@ -13,6 +13,9 @@
<UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>
<Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
</Filter>
<Filter Include="源文件\wechat_qrcode">
<UniqueIdentifier>{df02fcd2-0d49-4313-a9f8-e5e0093807c6}</UniqueIdentifier>
</Filter>
</ItemGroup>
<ItemGroup>
<ClInclude Include="eyemLib.h">
......@@ -57,6 +60,18 @@
<ClInclude Include="eyemBarCode.h">
<Filter>源文件</Filter>
</ClInclude>
<ClInclude Include="wechat_qrcode.h">
<Filter>源文件</Filter>
</ClInclude>
<ClInclude Include="align.h">
<Filter>源文件\wechat_qrcode</Filter>
</ClInclude>
<ClInclude Include="ssd_detector.h">
<Filter>源文件\wechat_qrcode</Filter>
</ClInclude>
<ClInclude Include="super_scale.h">
<Filter>源文件\wechat_qrcode</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<ClCompile Include="eyemLib.cpp">
......@@ -110,6 +125,18 @@
<ClCompile Include="eyemBarCode.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="wechat_qrcode.cpp">
<Filter>源文件</Filter>
</ClCompile>
<ClCompile Include="align.cpp">
<Filter>源文件\wechat_qrcode</Filter>
</ClCompile>
<ClCompile Include="ssd_detector.cpp">
<Filter>源文件\wechat_qrcode</Filter>
</ClCompile>
<ClCompile Include="super_scale.cpp">
<Filter>源文件\wechat_qrcode</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ResourceCompile Include="eyemLib.rc">
......
......@@ -1614,7 +1614,7 @@ int eyemCountObject(EyemImage tpImage, EyemRect tpRoi, const char *fileName, dou
delete[] ucpTrackLabel;
ucpTrackLabel = NULL;
}
}
} // 分料盘计数
//输出结果
const int bufSize = 64;
char cTrayNum[bufSize * 4] = { 0 };
......@@ -4889,8 +4889,8 @@ int eyemCountObjectIrregularPartsE(EyemImage tpImage, EyemRect tpRoi, const char
#endif
//判断是否追踪终止
if (trayEnd) {
//判断是否追踪终止(增加匹配分数小于0.18被判定不是元件)
if (trayEnd || maxyyu < 0.15) {
//不再判断,大概率已经终止
found = false;
}
......@@ -5055,9 +5055,8 @@ int eyemCountObjectIrregularPartsE(EyemImage tpImage, EyemRect tpRoi, const char
#endif
//判断是否追踪终止
if (trayEnd)
{
//判断是否追踪终止(增加匹配分数小于0.18被判定不是元件)
if (trayEnd || maxyyu < 0.15) {
//不再判断,大概率已经终止
found = false;
}
......@@ -5312,12 +5311,16 @@ int eyemInitModel(const char *ccTplName, IntPtr *hModelID)
cv::glob(ccTplName, fileNames);
logger.t(logModule + "获取文件名...");
//判断文件
if (fileNames.size() <= 0)
return FUNC_CANNOT_CALC;
//载入所有模板
std::vector<EyemModelID> *tpModelID = new std::vector<EyemModelID>();
//判断文件
if (fileNames.size() <= 0) {
//输出
*hModelID = reinterpret_cast<IntPtr>(tpModelID);
return FUNC_OK;
}
for (std::vector<std::string>::iterator it = fileNames.begin(); it != fileNames.end(); ++it)
{
std::string fileName = (*it);
......@@ -5871,13 +5874,41 @@ int eyemAOIForTSAV(EyemImage tpRefImg, EyemImage tpNextImg, EyemRect3 *tpRois, i
return FUNC_OK;
}
int eyemAllMethodTest(EyemImage tpImage)
int eyemAllMindImpl(EyemImage tpImage)
{
cv::Mat image = cv::Mat(tpImage.iHeight, tpImage.iWidth, MAKETYPE(tpImage.iDepth, tpImage.iChannels), tpImage.vpImage).clone();
if (image.empty())
return FUNC_IMAGE_NOT_EXIST;
#pragma region wechat_qrcode
//cv::Ptr<wechat_qrcode::WeChatQRCode> detector;
//try {
// detector = cv::makePtr<wechat_qrcode::WeChatQRCode>(".\\opencv_3rdparty-wechat_qrcode\\detect.prototxt", ".\\opencv_3rdparty-wechat_qrcode\\detect.caffemodel",
// ".\\opencv_3rdparty-wechat_qrcode\\sr.prototxt", ".\\opencv_3rdparty-wechat_qrcode\\sr.caffemodel");
//}
//catch (const std::exception& e) {
// std::cout << e.what() << std::endl;
// return 0;
//}
//std::string prevstr = "";
//std::vector<cv::Mat> points;
//auto res = detector->detectAndDecode(image, points);
//for (const auto& t : res) std::cout << t << std::endl;
#pragma endregion
cv::Mat sum, sqsum, templ;
cv::Scalar templMean, templSdv;
double *q0 = 0, *q1 = 0, *q2 = 0, *q3 = 0;
double templNorm = 0, templSum2 = 0;
cv::integral(image, sum, sqsum, CV_64F);
cv::meanStdDev(templ, templMean, templSdv);
return FUNC_OK;
}
\ No newline at end of file
......@@ -12,6 +12,8 @@
#include <tbb\tbb.h>
#include "wechat_qrcode.h"
constexpr double c = PI / 180.;
std::mutex mtx_misc;
......
#include "ssd_detector.h"
#define CLIP(x, x1, x2) cv::max(x1, cv::min(x, x2))
namespace wechat_qrcode {
int SSDDetector::init(const std::string& proto_path, const std::string& model_path) {
net_ = cv::dnn::readNetFromCaffe(proto_path, model_path);
return 0;
}
std::vector<cv::Mat> SSDDetector::forward(cv::Mat img, const int target_width, const int target_height) {
int img_w = img.cols;
int img_h = img.rows;
cv::Mat input;
resize(img, input, cv::Size(target_width, target_height), 0, 0, cv::INTER_CUBIC);
cv::dnn::blobFromImage(input, input, 1.0 / 255, cv::Size(input.cols, input.rows), { 0.0f, 0.0f, 0.0f },
false, false);
net_.setInput(input, "data");
auto prob = net_.forward("detection_output");
std::vector<cv::Mat> point_list;
// the shape is (1,1,100,7)=>(batch,channel,count,dim)
for (int row = 0; row < prob.size[2]; row++) {
const float* prob_score = prob.ptr<float>(0, 0, row);
// prob_score[0] is not used.
// prob_score[1]==1 stands for qrcode
if (prob_score[1] == 1 && prob_score[2] > 1E-5) {
// add a safe score threshold due to https://github.com/opencv/opencv_contrib/issues/2877
// prob_score[2] is the probability of the qrcode, which is not used.
auto point = cv::Mat(4, 2, CV_32FC1);
float x0 = CLIP(prob_score[3] * img_w, 0.0f, img_w - 1.0f);
float y0 = CLIP(prob_score[4] * img_h, 0.0f, img_h - 1.0f);
float x1 = CLIP(prob_score[5] * img_w, 0.0f, img_w - 1.0f);
float y1 = CLIP(prob_score[6] * img_h, 0.0f, img_h - 1.0f);
point.at<float>(0, 0) = x0;
point.at<float>(0, 1) = y0;
point.at<float>(1, 0) = x1;
point.at<float>(1, 1) = y0;
point.at<float>(2, 0) = x1;
point.at<float>(2, 1) = y1;
point.at<float>(3, 0) = x0;
point.at<float>(3, 1) = y1;
point_list.push_back(point);
}
}
return point_list;
}
}
\ No newline at end of file
#ifndef __DETECTOR_SSD_DETECTOR_HPP_
#define __DETECTOR_SSD_DETECTOR_HPP_
#include <stdio.h>
#include "opencv2/dnn.hpp"
#include "opencv2/imgproc.hpp"
namespace wechat_qrcode {
class SSDDetector {
public:
SSDDetector() {};
~SSDDetector() {};
int init(const std::string& proto_path, const std::string& model_path);
std::vector<cv::Mat> forward(cv::Mat img, const int target_width, const int target_height);
private:
cv::dnn::Net net_;
};
}
#endif // __DETECTOR_SSD_DETECTOR_HPP_
#include "super_scale.h"
#define CLIP(x, x1, x2) cv::max(x1, cv::min(x, x2))
namespace wechat_qrcode {
int SuperScale::init(const std::string &proto_path, const std::string &model_path) {
srnet_ = cv::dnn::readNetFromCaffe(proto_path, model_path);
net_loaded_ = true;
return 0;
}
cv::Mat SuperScale::processImageScale(const cv::Mat &src, float scale, const bool &use_sr,
int sr_max_size) {
cv::Mat dst = src;
if (scale == 1.0) { // src
return dst;
}
int width = src.cols;
int height = src.rows;
if (scale == 2.0) { // upsample
int SR_TH = sr_max_size;
if (use_sr && (int)sqrt(width * height * 1.0) < SR_TH && net_loaded_) {
int ret = superResoutionScale(src, dst);
if (ret == 0) return dst;
}
{ resize(src, dst, cv::Size(), scale, scale, cv::INTER_CUBIC); }
}
else if (scale < 1.0) { // downsample
resize(src, dst, cv::Size(), scale, scale, cv::INTER_AREA);
}
return dst;
}
int SuperScale::superResoutionScale(const cv::Mat &src, cv::Mat &dst) {
cv::Mat blob;
cv::dnn::blobFromImage(src, blob, 1.0 / 255, cv::Size(src.cols, src.rows), { 0.0f }, false, false);
srnet_.setInput(blob);
auto prob = srnet_.forward();
dst = cv::Mat(prob.size[2], prob.size[3], CV_8UC1);
for (int row = 0; row < prob.size[2]; row++) {
const float *prob_score = prob.ptr<float>(0, 0, row);
for (int col = 0; col < prob.size[3]; col++) {
float pixel = prob_score[col] * 255.0;
dst.at<uint8_t>(row, col) = static_cast<uint8_t>(CLIP(pixel, 0.0f, 255.0f));
}
}
return 0;
}
} // namespace wechat_qrcode
\ No newline at end of file
#ifndef __SCALE_SUPER_SCALE_HPP_
#define __SCALE_SUPER_SCALE_HPP_
#include <stdio.h>
#include "opencv2/dnn.hpp"
#include "opencv2/imgproc.hpp"
namespace wechat_qrcode {
class SuperScale {
public:
SuperScale() {};
~SuperScale() {};
int init(const std::string &proto_path, const std::string &model_path);
cv::Mat processImageScale(const cv::Mat &src, float scale, const bool &use_sr, int sr_max_size = 160);
private:
cv::dnn::Net srnet_;
bool net_loaded_ = false;
int superResoutionScale(const cv::Mat &src, cv::Mat &dst);
};
} // namespace wechat_qrcode
#endif // __SCALE_SUPER_SCALE_HPP_
\ No newline at end of file
#include "wechat_qrcode.h"
#include "align.h"
#include "ssd_detector.h"
#include "super_scale.h"
namespace wechat_qrcode {
class WeChatQRCode::Impl {
public:
Impl() {}
~Impl() {}
/**
* @brief detect QR codes from the given image
*
* @param img supports grayscale or color (BGR) image.
* @return vector<Mat> detected QR code bounding boxes.
*/
std::vector<cv::Mat> detect(const cv::Mat& img);
/**
* @brief decode QR codes from detected points
*
* @param img supports grayscale or color (BGR) image.
* @param candidate_points detected points. we name it "candidate points" which means no
* all the qrcode can be decoded.
* @param points succussfully decoded qrcode with bounding box points.
* @return vector<string>
*/
std::vector<std::string> decode(const cv::Mat& img, std::vector<cv::Mat>& candidate_points,
std::vector<cv::Mat>& points);
int applyDetector(const cv::Mat& img, std::vector<cv::Mat>& points);
cv::Mat cropObj(const cv::Mat& img, const cv::Mat& point, Align& aligner);
std::vector<float> getScaleList(const int width, const int height);
std::shared_ptr<SSDDetector> detector_;
std::shared_ptr<SuperScale> super_resolution_model_;
bool use_nn_detector_, use_nn_sr_;
};
WeChatQRCode::WeChatQRCode(const cv::String& detector_prototxt_path,
const cv::String& detector_caffe_model_path,
const cv::String& super_resolution_prototxt_path,
const cv::String& super_resolution_caffe_model_path) {
p = cv::makePtr<WeChatQRCode::Impl>();
if (!detector_caffe_model_path.empty() && !detector_prototxt_path.empty()) {
// initialize detector model (caffe)
p->use_nn_detector_ = true;
p->detector_ = std::make_shared<SSDDetector>();
auto ret = p->detector_->init(detector_prototxt_path, detector_caffe_model_path);
CV_Assert(ret == 0);
}
else {
p->use_nn_detector_ = false;
p->detector_ = NULL;
}
// initialize super_resolution_model
// it could also support non model weights by cubic resizing
// so, we initialize it first.
p->super_resolution_model_ = std::make_shared<SuperScale>();
if (!super_resolution_prototxt_path.empty() && !super_resolution_caffe_model_path.empty()) {
p->use_nn_sr_ = true;
// initialize dnn model (caffe format)
auto ret = p->super_resolution_model_->init(super_resolution_prototxt_path,
super_resolution_caffe_model_path);
CV_Assert(ret == 0);
}
else {
p->use_nn_sr_ = false;
}
}
std::vector<std::string> WeChatQRCode::detectAndDecode(cv::InputArray img, cv::OutputArrayOfArrays points) {
CV_Assert(!img.empty());
CV_CheckDepthEQ(img.depth(), CV_8U, "");
if (img.cols() <= 20 || img.rows() <= 20) {
return std::vector<std::string>(); // image data is not enough for providing reliable results
}
cv::Mat input_img;
int incn = img.channels();
CV_Check(incn, incn == 1 || incn == 3 || incn == 4, "");
if (incn == 3 || incn == 4) {
cvtColor(img, input_img, cv::COLOR_BGR2GRAY);
}
else {
input_img = img.getMat();
}
auto candidate_points = p->detect(input_img);
auto res_points = std::vector<cv::Mat>();
auto ret = p->decode(input_img, candidate_points, res_points);
// opencv type convert
std::vector<cv::Mat> tmp_points;
if (points.needed()) {
for (size_t i = 0; i < res_points.size(); i++) {
cv::Mat tmp_point;
tmp_points.push_back(tmp_point);
res_points[i].convertTo(((cv::OutputArray)tmp_points[i]), CV_32FC2);
}
points.createSameSize(tmp_points, CV_32FC2);
points.assign(tmp_points);
}
return ret;
};
std::vector<std::string> WeChatQRCode::Impl::decode(const cv::Mat& img, std::vector<cv::Mat>& candidate_points,
std::vector<cv::Mat>& points) {
if (candidate_points.size() == 0) {
return std::vector<std::string>();
}
std::vector<std::string> decode_results;
for (auto& point : candidate_points) {
cv::Mat cropped_img;
if (use_nn_detector_) {
Align aligner;
cropped_img = cropObj(img, point, aligner);
}
else {
cropped_img = img;
}
// scale_list contains different scale ratios
auto scale_list = getScaleList(cropped_img.cols, cropped_img.rows);
for (auto cur_scale : scale_list) {
cv::Mat scaled_img =
super_resolution_model_->processImageScale(cropped_img, cur_scale, use_nn_sr_);
std::string result;
//DecoderMgr decodemgr;
//auto ret = decodemgr.decodeImage(scaled_img, use_nn_detector_, result);
//if (ret == 0) {
// decode_results.push_back(result);
// points.push_back(point);
// break;
//}
}
}
return decode_results;
}
std::vector<cv::Mat> WeChatQRCode::Impl::detect(const cv::Mat& img) {
auto points = std::vector<cv::Mat>();
if (use_nn_detector_) {
// use cnn detector
auto ret = applyDetector(img, points);
CV_Assert(ret == 0);
}
else {
auto width = img.cols, height = img.rows;
// if there is no detector, use the full image as input
auto point = cv::Mat(4, 2, CV_32FC1);
point.at<float>(0, 0) = 0.0f;
point.at<float>(0, 1) = 0.0f;
point.at<float>(1, 0) = float(width - 1);
point.at<float>(1, 1) = 0.0f;
point.at<float>(2, 0) = float(width - 1);
point.at<float>(2, 1) = float(height - 1);
point.at<float>(3, 0) = 0.0f;
point.at<float>(3, 1) = float(height - 1);
points.push_back(point);
}
return points;
}
int WeChatQRCode::Impl::applyDetector(const cv::Mat& img, std::vector<cv::Mat>& points) {
int img_w = img.cols;
int img_h = img.rows;
// hard code input size
int minInputSize = 400;
float resizeRatio = (float)sqrt(img_w * img_h * 1.0 / (minInputSize * minInputSize));
int detect_width = cvRound((float)img_w / resizeRatio);
int detect_height = cvRound((float)img_h / resizeRatio);
points = detector_->forward(img, detect_width, detect_height);
return 0;
}
cv::Mat WeChatQRCode::Impl::cropObj(const cv::Mat& img, const cv::Mat& point, Align& aligner) {
// make some padding to boost the qrcode details recall.
float padding_w = 0.1f, padding_h = 0.1f;
auto min_padding = 15;
auto cropped = aligner.crop(img, point, padding_w, padding_h, min_padding);
return cropped;
}
// empirical rules
std::vector<float> WeChatQRCode::Impl::getScaleList(const int width, const int height) {
if (width < 320 || height < 320) return{ 1.0, 2.0, 0.5 };
if (width < 640 && height < 640) return{ 1.0, 0.5 };
return{ 0.5, 1.0 };
}
} // namespace wechat_qrcode
\ No newline at end of file
#pragma once
//
// eyemSmoothͷ
//
#ifndef __OPENCV_WECHAT_QRCODE_HPP__
#define __OPENCV_WECHAT_QRCODE_HPP__
#define CLIP(x, x1, x2) cv::max(x1, cv::min(x, x2))
#include "opencv2/core.hpp"
namespace wechat_qrcode {
class WeChatQRCode {
public:
WeChatQRCode(const std::string& detector_prototxt_path = "",
const std::string& detector_caffe_model_path = "",
const std::string& super_resolution_prototxt_path = "",
const std::string& super_resolution_caffe_model_path = "");
~WeChatQRCode() {};
std::vector<std::string> detectAndDecode(cv::InputArray img,
cv::OutputArrayOfArrays points = cv::noArray());
protected:
class Impl;
cv::Ptr<Impl> p;
};
} // namespace wechat_qrcode
#endif/* __OPENCV_WECHAT_QRCODE_HPP__ */
\ No newline at end of file
支持 Markdown 格式
你添加了 0 到此讨论。请谨慎行事。
Finish editing this message first!