我是Android Studio移动的开发的完全初学者。这是我到目前为止使用的代码。
我想检测如果一个人的眼睛是关闭的或不是真实的的。我使用谷歌毫升工具包。他们提供了大部分的代码,但不清楚通过现场摄像头的看法到进程(图像)。
public class EyeDetection extends AppCompatActivity {
private ListenableFuture<ProcessCameraProvider> cameraProviderFuture;
PreviewView previewView;
VideoView videoView;
@Override
protected void onCreate(@Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_eye_detection);
previewView = findViewById(R.id.previewView);
videoView = findViewById(R.id.video_view);
String videoPath = "android.resource://"+getPackageName()+ "/" + R.raw.videoplayback;
Uri uri = Uri.parse(videoPath);
videoView.setVideoURI(uri);
MediaController mediaController = new MediaController(this);
videoView.setMediaController(mediaController);
mediaController.setAnchorView(videoView);
// FaceDetectorOptions realTimeOpts =
// new FaceDetectorOptions.Builder()
// .setContourMode(FaceDetectorOptions.CONTOUR_MODE_ALL)
// .build();
cameraProviderFuture = ProcessCameraProvider.getInstance(this);
cameraProviderFuture.addListener(() -> {
try {
ProcessCameraProvider cameraProvider = cameraProviderFuture.get();
bindPreview(cameraProvider);
} catch (ExecutionException | InterruptedException e) {
// No errors need to be handled for this Future.
// This should never be reached.
}
}, ContextCompat.getMainExecutor(this));
}
// private class YourAnalyzer implements ImageAnalysis.Analyzer {
//
// @Override
// public void analyze(ImageProxy imageProxy) {
// Image mediaImage = imageProxy.getImage();
// if (mediaImage != null) {
// InputImage image =
// InputImage.fromMediaImage(mediaImage, imageProxy.getImageInfo().getRotationDegrees());
// // Pass image to an ML Kit Vision API
// // ...
// }
// }
//
// }
void bindPreview(@NonNull ProcessCameraProvider cameraProvider) {
Preview preview = new Preview.Builder()
.build();
mlFaceDetector(preview);
CameraSelector cameraSelector = new CameraSelector.Builder()
.requireLensFacing(CameraSelector.LENS_FACING_FRONT)
.build();
preview.setSurfaceProvider(previewView.getSurfaceProvider());
Camera camera = cameraProvider.bindToLifecycle((LifecycleOwner)this, cameraSelector, preview);
// For performing operations that affect all outputs.
CameraControl cameraControl = camera.getCameraControl();
// // For querying information and states.
// CameraInfo cameraInfo = camera.getCameraInfo();
cameraControl.enableTorch(true);
}
void mlFaceDetector(Preview preview) {
FaceDetectorOptions highAccuracyOpts =
new FaceDetectorOptions.Builder()
.setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_ACCURATE)
.setLandmarkMode(FaceDetectorOptions.LANDMARK_MODE_ALL)
.setClassificationMode(FaceDetectorOptions.CLASSIFICATION_MODE_ALL)
.build();
FaceDetector detector = FaceDetection.getClient(highAccuracyOpts);
Task<List<Face>> result =
detector.process(image) // this is where the issue is
.addOnSuccessListener(
new OnSuccessListener<List<Face>>() {
@Override
public void onSuccess(List<Face> faces) {
// Task completed successfully
// ...
}
})
.addOnFailureListener(
new OnFailureListener() {
@Override
public void onFailure(@NonNull Exception e) {
// Task failed with an exception
// ...
}
});
}
}
1条答案
按热度按时间zengzsys1#
请检查mlkit sample app,其中有从相机加载图像流的示例代码