以下是一个完整的Flutter代码示例,可实现拍照实时锁定实物位置并绘制实物框,拍照之后实物锁定绘制实物框,根据实物框裁剪实物并保存到本地。该示例使用了 camera
、path_provider
、image
和 image_picker
库。
import 'dart:async';
import 'dart:io';
import 'dart:math' as math;
import 'dart:ui' as ui;
import 'package:camera/camera.dart';
import 'package:flutter/cupertino.dart';
import 'package:flutter/material.dart';
import 'package:image/image.dart' as img;
import 'package:image_picker/image_picker.dart';
import 'package:path_provider/path_provider.dart';
class RealtimeObjectDetection extends StatefulWidget {
@override
_RealtimeObjectDetectionState createState() => _RealtimeObjectDetectionState();
}
class _RealtimeObjectDetectionState extends State<RealtimeObjectDetection> {
CameraController _cameraController;
XFile _imageFile;
Size _imageSize;
bool _isDetecting = false;
List<Face> _faces = [];
Rect _targetBox;
@override
void initState() {
super.initState();
_initializeCamera();
}
Future<void> _initializeCamera() async {
final cameras = await availableCameras();
final camera = cameras.first;
_cameraController = CameraController(camera, ResolutionPreset.high, enableAudio: false);
await _cameraController.initialize();
await _cameraController.startImageStream(_processCameraImage);
}
Future<void> _processCameraImage(CameraImage cameraImage) async {
if (!_isDetecting && _targetBox != null) {
_isDetecting = true;
final points = [
_targetBox.topLeft,
_targetBox.topRight,
_targetBox.bottomRight,
_targetBox.bottomLeft,
];
// Convert coordinates from preview size to image size
final scaleX = _imageSize.width / _cameraController.value.previewSize.height;
final scaleY = _imageSize.height / _cameraController.value.previewSize.width;
final scaledPoints = points
.map((point) => Offset(
point.dy * scaleX,
(1 - point.dx) * scaleY,
))
.toList();
// Detect object in image
final inputImage = img.copyResize(
img.Image.fromBytes(
cameraImage.width,
cameraImage.height,
cameraImage.planes[0].bytes,
),
width: _imageSize.width.toInt(),
height: _imageSize.height.toInt(),
);
final _faceDetector = getRealtimeObjectDetector();
List<Face> faces = _faceDetector.detectFaces(Image.fromBytes(
inputImage.width,
inputImage.height,
inputImage.getBytes(),
));
// Find face closest to target box and update target box accordingly
double closestDistance;
Rect closestBox;
for (final face in faces) {
final faceBox = face.boundingBox;
final distance = math.sqrt(scaledPoints.fold(
0, (sum, point) => sum + (point - faceBox.center).distanceSquared)) /
scaledPoints.length;
if (closestBox == null || distance < closestDistance) {
closestBox = faceBox;
closestDistance = distance;
}
}
setState(() {
_faces = faces;
_targetBox = closestBox;
});
_isDetecting = false;
}
}
Future<void> _getImage() async {
final pickedFile = await ImagePicker().getImage(source: ImageSource.camera);
setState(() {
_imageFile = XFile(pickedFile.path);
});
}
Future<void> _setImageSize(Size size) async {
setState(() {
_imageSize = size;
_targetBox = null;
});
if (_imageFile != null && _imageSize != null) {
// Detect object in image
final inputImage = await _imageFile.readAsBytes();
final _faceDetector = getRealtimeObjectDetector();
final faces = _faceDetector.detectFaces(Image.fromBytes(
_imageSize.width.toInt(),
_imageSize.height.toInt(),
inputImage,
));
// Select first detected face and set it as target box
if (faces.isNotEmpty) {
setState(() {
_faces = faces;
_targetBox = faces.first.boundingBox;
});
}
}
}
Future<void> _cropRealtimeObject() async {
if (_targetBox != null && _imageFile != null) {
// Convert bounding box from image size to input size
final scaleX = _imageFile.width / _imageSize.width;
final scaleY = _imageFile.height / _imageSize.height;
final targetBox = Rect.fromLTWH(
_targetBox.left * scaleX,
_targetBox.top * scaleY,
_targetBox.width * scaleX,
_targetBox.height * scaleY,
).inflate(50);
// Load input image
final inputImage = await _imageFile.readAsBytes();
final image = img.decodeImage(inputImage);
final croppedImage = img.copyCrop(image, targetBox.left.toInt(), targetBox.top.toInt(),
targetBox.width.toInt(), targetBox.height.toInt());
// Get and create output directory
final appDir = await getApplicationDocumentsDirectory();
final uniqueId = DateTime.now().millisecondsSinceEpoch.toString();
final filePath = '${appDir.path}/$uniqueId.png';
// Save output image
await File(filePath).writeAsBytes(img.encodePng(croppedImage));
setState(() {
_imageFile = XFile(filePath);
_imageSize = null;
_faces = [];
_targetBox = null;
});
}
}
@override
void dispose() {
_cameraController?.dispose();
super.dispose();
}
@override
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(title: Text('Realtime Object Detection')),
body: Stack(
children: [
if (_cameraController != null && _targetBox == null)
CameraPreview(_cameraController, onCameraImage: (size) => _setImageSize(size)),
if (_imageFile != null && _targetBox == null) Image.file(File(_imageFile.path)),
if (_imageFile != null && _targetBox != null) _buildImageWithTargetBox(),
if (_faces.isNotEmpty) _buildFaces(),
],
),
floatingActionButton: FloatingActionButton(
onPressed: _targetBox != null ? _cropRealtimeObject : _getImage,
tooltip: 'Crop Realtime Object',
child: Icon(_targetBox != null ? Icons.crop : Icons.camera_alt),
),
);
}
CustomPaint _buildImageWithTargetBox() {
return CustomPaint(
painter: ImageWithBoxPainter(
image: Image.file(File(_imageFile.path)),
box: _targetBox,
),
size: Size(_imageFile.width.toDouble(), _imageFile.height.toDouble()),
);
}
CustomPaint _buildFaces() {
return CustomPaint(
painter: FacePainter(
faces: _faces,
imageSize: _imageSize,
),
size: Size(_imageSize.width, _imageSize.height),
);
}
}
class ImageWithBoxPainter extends CustomPainter {
final Image image;
final Rect box;
ImageWithBoxPainter({this.image, this.box});
@override
void paint(Canvas canvas, Size size) {
final imageWidth = image?.width?.toDouble() ?? size.width;
final imageHeight = image?.height?.toDouble() ?? size.height;
final scaleX = imageWidth / size.width;
final scaleY = imageHeight / size.height;
final imagesize = Size(imageWidth, imageHeight);
if (image != null) {
canvas.drawImageRect(
image.image,
Rect.fromLTRB(0, 0, imageWidth, imageHeight),
Rect.fromLTRB(0, 0, size.width, size.height),
Paint(),
);
}
if (box != null) {
final targetBox = Rect.fromLTRB(
box.left / scaleX,
box.top / scaleY,
box.right / scaleX,
box.bottom / scaleY,
);
canvas.drawRect(targetBox, Paint()..color = Colors.green.withOpacity(0.5));
}
}
@override
bool shouldRepaint(ImageWithBoxPainter oldDelegate) =>
image != oldDelegate.image || box != oldDelegate.box;
}
class FacePainter extends CustomPainter {
final List<Face> faces;
final Size imageSize;
FacePainter({this.faces, this.imageSize});
@override
void paint(Canvas canvas, Size size) {
if (faces.isEmpty || imageSize == null) return;
final scaleX = size.width / imageSize.width;
final scaleY = size.height / imageSize.height;
final paint = Paint()
..color = Colors.green
..style = PaintingStyle.stroke
..strokeWidth = 2;
for (final face in faces) {
final box = Rect.fromLTRB(
face.boundingBox.left * scaleX,
face.boundingBox.top * scaleY,
face.boundingBox.right * scaleX,
face.boundingBox.bottom * scaleY,
);
canvas.drawRect(box, paint);
}
}
@override
bool shouldRepaint(FacePainter oldDelegate) =>
faces != oldDelegate.faces || imageSize != oldDelegate.imageSize;
}
在 _processCameraImage
方法中,我们使用一个目标框盒子来锁定相机预览图像中的对象。如果目标框已经被设置,代码将使用对象检测器检测帧中的对象,并找到最接近目标框的对象。如果找到最接近的对象,则使用其边界框更新目标框。
在 _setImageSize
方法中,我们使用 _imageFile
设置目标框,并在图像加载后使用 _faceDetector
检测对象。然后,如果有对象被检测到,我们选择第一个检测到的对象并使用其边界框作为目标框。
在 _cropRealtimeObject
方法中,我们使用 _targetBox
对 _imageFile
图像进行裁剪。我们首先将边界框从图片大小转换为输入大小,然后使用 image
库的 copyCrop
方法裁剪输入图像。最后,我们将裁剪后的图像保存到文件系统中并更新 UI。
在 ImageWithBoxPainter
中,我们使用传入的 Image
和 Rect
绘制带有目标框的图像。
在 FacePainter
中,我们使用传入的 faces
和 imageSize
绘制检测到的脸部的边界框。