本文整理了Java中org.opencv.core.Rect.br
方法的一些代码示例,展示了Rect.br
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Rect.br
方法的具体详情如下:
包路径:org.opencv.core.Rect
类名称:Rect
方法名:br
暂无
代码示例来源:origin: raulh82vlc/Image-Detection-Samples
public static void drawEyeRectangle(Rect eyeArea, Mat matrixRgba) {
Imgproc.rectangle(matrixRgba, eyeArea.tl(), eyeArea.br(),
new Scalar(255, 0, 0, 255), 2);
}
代码示例来源:origin: JavaOpenCVBook/code
private void drawBoundingBox(MatOfPoint currentContour) {
Rect rectangle = Imgproc.boundingRect(currentContour);
Imgproc.rectangle(image, rectangle.tl(), rectangle.br(), new Scalar(255,0,0),1);
}
代码示例来源:origin: raulh82vlc/Image-Detection-Samples
public static void drawFaceShapes(Rect face, Mat matrixRGBA) {
Point start = face.tl();
int h = (int) start.y + (face.height / 2);
int w = (int) start.x + (face.width / 2);
Imgproc.rectangle(matrixRGBA, start, face.br(),
FACE_RECT_COLOR, 3);
Point center = new Point(w, h);
Imgproc.circle(matrixRGBA, center, 10, new Scalar(255, 0, 0, 255), 3);
}
代码示例来源:origin: nroduit/Weasis
private void copyTileFromSource(Mat sourceImage, Mat tileInput, Rect tile, int mBorderType) {
Point tl = tile.tl();
Point br = tile.br();
Rect paddedTile = new Rect(tile.tl(), tile.br());
assert (paddedTile.x >= 0);
assert (paddedTile.y >= 0);
assert (paddedTile.br().x < sourceImage.cols());
assert (paddedTile.br().y < sourceImage.rows());
代码示例来源:origin: kongqw/OpenCVForAndroid
@Override
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
// 子线程(非UI线程)
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
for (ObjectDetector detector : mObjectDetects) {
// 检测目标
Rect[] object = detector.detectObject(mGray, mObject);
for (Rect rect : object) {
Imgproc.rectangle(mRgba, rect.tl(), rect.br(), detector.getRectColor(), 3);
}
}
return mRgba;
}
代码示例来源:origin: nroduit/Weasis
private void copySourceTile(Mat sourceImage, Mat tileInput, Rect tile) {
Point tl = tile.tl();
Point br = tile.br();
Rect paddedTile = new Rect(tile.tl(), tile.br());
assert (paddedTile.x >= 0);
assert (paddedTile.y >= 0);
assert (paddedTile.br().x < sourceImage.cols());
assert (paddedTile.br().y < sourceImage.rows());
代码示例来源:origin: hschott/Camdroid
protected void execute() {
out = gray();
Imgproc.equalizeHist(out, out);
synchronized (mog) {
mog.apply(out, this.mask, (double) (-10 + learning_rate) / 10);
}
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_DILATE,
new Size(3, 3));
Imgproc.dilate(mask, mask, kernel);
ArrayList<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(this.mask, contours, new Mat(),
Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
double maxheight = object_max_size * this.in.height() / 100;
double minheight = object_min_size * this.in.height() / 100;
Iterator<MatOfPoint> each = contours.iterator();
each = contours.iterator();
while (each.hasNext()) {
MatOfPoint contour = each.next();
Rect rect = Imgproc.boundingRect(contour);
if (rect.height > minheight && rect.height < maxheight) {
Imgproc.rectangle(out, rect.tl(), rect.br(), new Scalar(255,
0, 0), 1);
}
}
}
}
代码示例来源:origin: abhn/marvel
Core.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
代码示例来源:origin: SOFTPOWER1991/OpenCVCheck
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
} else if (mDetectorType == NATIVE_DETECTOR) {
if (mNativeDetector != null)
mNativeDetector.detect(mGray, faces);
} else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++)
Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
return mRgba;
}
代码示例来源:origin: SOFTPOWER1991/OpenCVCheck
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
} else if (mDetectorType == NATIVE_DETECTOR) {
if (mNativeDetector != null)
mNativeDetector.detect(mGray, faces);
} else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++)
Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
return mRgba;
}
代码示例来源:origin: SOFTPOWER1991/OpenCVCheck
private void checkFace() {
Bitmap imgtemp = BitmapFactory.decodeResource(getResources(), R.mipmap.twop);
Utils.bitmapToMat(imgtemp, mRgba);
Mat mat1 = new Mat();
Utils.bitmapToMat(imgtemp, mat1);
Imgproc.cvtColor(mat1, mGray, Imgproc.COLOR_BGR2GRAY);
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
} else if (mDetectorType == NATIVE_DETECTOR) {
if (mNativeDetector != null)
mNativeDetector.detect(mGray, faces);
} else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++)
Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
Utils.matToBitmap(mRgba, imgtemp, true);
imageView.setImageBitmap(imgtemp);
}
代码示例来源:origin: SOFTPOWER1991/OpenCVCheck
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
} else if (mDetectorType == NATIVE_DETECTOR) {
if (mNativeDetector != null)
mNativeDetector.detect(mGray, faces);
} else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++)
Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
return mRgba;
}
代码示例来源:origin: SouvDc/face-detection
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
}
else if (mDetectorType == NATIVE_DETECTOR) {
if (mNativeDetector != null)
mNativeDetector.detect(mGray, faces);
}
else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++)
Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
return mRgba;
}
代码示例来源:origin: abhn/marvel
Core.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
代码示例来源:origin: SOFTPOWER1991/OpenCVCheck
Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
代码示例来源:origin: kongqw/OpenCVForAndroid
public RotatedRect objectTracking(Mat mRgba) {
rgba2Hsv(mRgba);
updateHueImage();
// 计算直方图的反投影。
// Imgproc.calcBackProject(hueList, new MatOfInt(0), hist, prob, ranges, 255);
Imgproc.calcBackProject(hueList, new MatOfInt(0), hist, prob, ranges, 1.0);
// 计算两个数组的按位连接(dst = src1 & src2)计算两个数组或数组和标量的每个元素的逐位连接。
Core.bitwise_and(prob, mask, prob, new Mat());
// 追踪目标
rotatedRect = Video.CamShift(prob, trackRect, new TermCriteria(TermCriteria.EPS, 10, 1));
if (null != mOnCalcBackProjectListener) {
mOnCalcBackProjectListener.onCalcBackProject(prob);
}
// 将本次最终到的目标作为下次追踪的对象
trackRect = rotatedRect.boundingRect();
Imgproc.rectangle(prob, trackRect.tl(), trackRect.br(), new Scalar(255, 255, 0, 255), 6);
Log.i(TAG, "objectTracking: 宽度 : " + trackRect.width + " 高度 : " + trackRect.height + " 角度 : " + rotatedRect.angle);
return rotatedRect;
}
代码示例来源:origin: hschott/Camdroid
protected void execute() {
if (!classifierPath.equals(lastUsedClassifierPath)) {
detector.load(classifierPath);
lastUsedClassifierPath = classifierPath;
}
if (detector.empty()) {
return;
}
out = gray();
double min = (double) this.in.height() * object_min_size / 100;
double max = (double) this.in.height() * object_max_size / 100;
MatOfRect rects = new MatOfRect();
detector.detectMultiScale(out, rects,
1 + (double) scale_factor / 100, min_neighbors, 0,
new Size(min, min), new Size(max, max));
for (Rect rect : rects.toArray()) {
Imgproc.rectangle(out, rect.tl(), rect.br(),
new Scalar(255, 0, 0), 1);
}
}
代码示例来源:origin: kongqw/OpenCVForAndroid
Imgproc.rectangle(mRgba, rect.tl(), rect.br(), TRACKING_RECT_COLOR, 3);
if (null != mOnObjectTrackingListener) {
Point center = rotatedRect.center;
代码示例来源:origin: nroduit/Weasis
private void copyTileToResultImage(Mat tileOutput, Mat resultImage, Rect srcTile, Rect dstTile) {
Point br = dstTile.br();
if (br.x >= resultImage.cols()) {
dstTile.width -= br.x - resultImage.cols();
srcTile.width -= br.x - resultImage.cols();
}
if (br.y >= resultImage.rows()) {
dstTile.height -= br.y - resultImage.rows();
srcTile.height -= br.y - resultImage.rows();
}
Mat tileView = tileOutput.submat(srcTile);
Mat dstView = resultImage.submat(dstTile);
assert (tileView.rows() == dstView.rows());
assert (tileView.cols() == dstView.cols());
tileView.copyTo(dstView);
}
代码示例来源:origin: nroduit/Weasis
private void copyTileToResultImage(Mat tileOutput, Mat resultImage, Rect dstTile) {
Rect srcTile = new Rect(mPadding, mPadding, mTileSize, mTileSize);
Point br = dstTile.br();
if (br.x >= resultImage.cols()) {
dstTile.width -= br.x - resultImage.cols();
srcTile.width -= br.x - resultImage.cols();
}
if (br.y >= resultImage.rows()) {
dstTile.height -= br.y - resultImage.rows();
srcTile.height -= br.y - resultImage.rows();
}
Mat tileView = tileOutput.submat(srcTile);
Mat dstView = resultImage.submat(dstTile);
assert (tileView.rows() == dstView.rows());
assert (tileView.cols() == dstView.cols());
tileView.copyTo(dstView);
}
内容来源于网络,如有侵权,请联系作者删除!