从相机中拍摄照片后在textView中显示的项目。使用Firebase ML工具包进行文本检测。这不能清楚地检测文本。它检测一些单词,但不能清楚地检测所有单词。使用位图进行检测。如果这个位图造成这个问题,我不知道。我应该使用相机的SurfaceView吗?或者有什么解决方案可以解决这个问题?
activity_main.xml
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context=".MainActivity">
<ImageButton
android:id="@+id/cameraButton"
android:layout_width="108dp"
android:layout_height="72dp"
android:layout_marginStart="44dp"
android:layout_marginTop="8dp"
android:layout_marginBottom="32dp"
android:background="@color/white"
android:src="@drawable/ic_baseline_camera_alt_24"
app:layout_constraintBottom_toBottomOf="parent"
app:layout_constraintStart_toStartOf="parent"
app:layout_constraintTop_toBottomOf="@+id/textView"
app:layout_constraintVertical_bias="0.427" />
<ImageButton
android:id="@+id/detectButton"
android:layout_width="108dp"
android:layout_height="72dp"
android:layout_marginTop="8dp"
android:layout_marginEnd="44dp"
android:layout_marginBottom="41dp"
android:background="@color/white"
android:src="@drawable/ic_baseline_done_outline_24"
app:layout_constraintBottom_toBottomOf="parent"
app:layout_constraintEnd_toEndOf="parent"
app:layout_constraintTop_toBottomOf="@+id/textView"
app:layout_constraintVertical_bias="0.445" />
<ImageView
android:id="@+id/mImageView"
android:layout_width="0dp"
android:layout_height="346dp"
android:layout_marginStart="8dp"
android:layout_marginTop="8dp"
android:layout_marginEnd="8dp"
android:scaleType="fitXY"
app:layout_constraintEnd_toEndOf="parent"
app:layout_constraintStart_toStartOf="parent"
app:layout_constraintTop_toTopOf="parent"
app:srcCompat="@drawable/ic_baseline_image_24" />
<TextView
android:id="@+id/textView"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_marginStart="16dp"
android:layout_marginTop="24dp"
android:layout_marginEnd="16dp"
android:fontFamily="@font/segoeui"
android:textSize="20sp"
app:layout_constraintEnd_toEndOf="parent"
app:layout_constraintHorizontal_bias="0.0"
app:layout_constraintStart_toStartOf="parent"
app:layout_constraintTop_toBottomOf="@+id/mImageView" />
</androidx.constraintlayout.widget.ConstraintLayout>
MainActivity.java
public class MainActivity extends AppCompatActivity {
ImageView mImageView;
ImageButton cameraBtn;
ImageButton detectBtn;
Bitmap imageBitmap;
TextView textView;
String log = "error";
static final int REQUEST_IMAGE_CAPTURE = 1;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
mImageView = findViewById(R.id.mImageView);
cameraBtn = findViewById(R.id.cameraButton);
detectBtn = findViewById(R.id.detectButton);
textView = findViewById(R.id.textView);
textView.setTypeface(ResourcesCompat.getFont(this, R.font.segoeui));
cameraBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
dispatchTakePictureIntent();
textView.setText("");
}
});
detectBtn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View view) {
detectTextFromImage();
}
});
}
private void dispatchTakePictureIntent() {
Intent takePictureIntent = new Intent(MediaStore.ACTION_IMAGE_CAPTURE);
if (takePictureIntent.resolveActivity(getPackageManager())!= null) {
startActivityForResult(takePictureIntent, REQUEST_IMAGE_CAPTURE);
}
}
@Override
protected void onActivityResult(int requestCode, int resultCode, Intent data)
{
super.onActivityResult(requestCode, resultCode, data);
if (requestCode == REQUEST_IMAGE_CAPTURE && resultCode == RESULT_OK) {
Bundle extras = data.getExtras();
imageBitmap = (Bitmap) extras.get("data");
mImageView.setImageBitmap(imageBitmap);
}
}
private void detectTextFromImage() {
FirebaseVisionImage firebaseVisionImage = FirebaseVisionImage.fromBitmap(imageBitmap);
FirebaseVisionCloudTextRecognizerOptions options = new FirebaseVisionCloudTextRecognizerOptions.Builder()
.setLanguageHints(Arrays.asList("eng","hi"))
.build();
FirebaseVisionTextRecognizer detector = FirebaseVision.getInstance()
.getCloudTextRecognizer(options);
Task<FirebaseVisionText> result =
detector.processImage(firebaseVisionImage)
.addOnSuccessListener(new OnSuccessListener<FirebaseVisionText>() {
@Override
public void onSuccess(FirebaseVisionText firebaseVisionText) {
// Task completed successfully
// ...
displayTextFromImage(firebaseVisionText);
}
})
.addOnFailureListener(
new OnFailureListener() {
@Override
public void onFailure(@NonNull Exception e) {
// Task failed with an exception
}
});
}
private void displayTextFromImage(FirebaseVisionText firebaseVisionText) {
List<FirebaseVisionText.TextBlock> blockList = firebaseVisionText.getTextBlocks();
if (blockList.size() == 0){
Toast.makeText(this,"No Text Found in image!",Toast.LENGTH_SHORT).show();
}
else{
for (FirebaseVisionText.TextBlock block: firebaseVisionText.getTextBlocks()) {
String text = block.getText();
textView.setText(text);
}
}
}
2条答案
按热度按时间gwbalxhn1#
在iOS和Android平台上都有一个文本识别的示例应用程序https://developers.google.com/ml-kit/samples(视觉快速启动),看起来两者都能成功检测到给定图像中的文本?
gt0wga4j2#
private void displayTextFromImage(FirebaseVisionText firebaseVisionText) { List<FirebaseVisionText.TextBlock> blockList = firebaseVisionText.getTextBlocks(); if (blockList.size() == 0){ Toast.makeText(this,"No Text Found in image!",Toast.LENGTH_SHORT).show(); } else{ String text = ""; for (FirebaseVisionText.TextBlock block: firebaseVisionText.getTextBlocks()) { text = text + "\n" + block.getText(); textView.setText(text); } } }
在for循环中声明和初始化String文本,会导致丢失前一个块的文本。实际上,它将每行作为一个块。循环执行得太快,所以您只能看到文本的最后一行或最后一个块被设置为您的文本视图。请将该代码替换为以下代码,以解决该问题。