Linkedin Instagram Facebook X-twitter Welcome to our comprehensive guide on bundling a React library into a reusable component library! In…
class ARGameView extends StatefulWidget {
ARGameView({
Key? key,
required this.title,
required this.onDetectedObject,
}) : super(key: key);
final String title;
final Function(DetectedObject) onDetectedObject;
@override
State<ARGameView> createState() => _ARGameViewState();
}
The _ARGameViewState class manages the state of the ARGameView widget. It initializes the object detector and other necessary variables in the initState method.
class _ARGameViewState extends State<ARGameView> {
ObjectDetector? _objectDetector;
DetectionMode _mode = DetectionMode.stream;
bool _canProcess = false;
bool _isBusy = false;
CustomPaint? _customPaint;
String? _text;
var _cameraLensDirection = CameraLensDirection.back;
int _option = 0;
final _options = {
'default': '',
'object_custom': 'object_labeler.tflite',
};
@override
void initState() {
super.initState();
_initializeDetector();
}
void _initializeDetector() async {
_objectDetector?.close();
_objectDetector = null;
if (_option == 0) {
final options = ObjectDetectorOptions(
mode: _mode,
classifyObjects: true,
multipleObjects: true,
);
_objectDetector = GoogleMlKit.vision.objectDetector(options);
} else if (_option > 0 && _option <= _options.length) {
final option = _options[_options.keys.toList()[_option]] ?? '';
final modelPath = await getAssetPath('assets/ml/$option');
final options = LocalObjectDetectorOptions(
mode: _mode,
modelPath: modelPath,
classifyObjects: true,
multipleObjects: true,
);
_objectDetector = GoogleMlKit.vision.objectDetector(options);
}
_canProcess = true;
}
The _processImage method employs the image recognition technique to analyze the captured image and detect objects using the initialized detector. Once objects are detected, the UI is updated accordingly using the _updateUI method.
Future<void> _processImage(InputImage inputImage) async {
if (_objectDetector == null) return;
if (!_canProcess) return;
if (_isBusy) return;
_isBusy = true;
setState(() {
_text = '';
});
final objects = await _objectDetector!.processImage(inputImage);
_updateUI(objects);
_isBusy = false;
if (mounted) {
setState(() {});
}
}
The _updateUI method updates the UI with the detected objects. If objects are detected, it displays the number of objects detected along with a visual representation of the objects using the CustomPaint widget. Otherwise, it displays a message indicating that no objects were detected.
void _updateUI(List<DetectedObject> objects) {
if (objects.isNotEmpty) {
setState(() {
_text = 'Objects Detected: ${objects.length}';
_customPaint = CustomPaint(
painter: ObjectDetectPainter(objects),
);
});
} else {
setState(() {
_text = 'No Objects Detected';
_customPaint = null;
});
}
}
Here's how the final code looks like,
class ARGameView extends StatefulWidget {ARGameView({
Key? key,
required this.title,
required this.onDetectedObject,
}) : super(key: key);
final String title;
final Function(DetectedObject) onDetectedObject;
@override
State<ARGameView> createState() => _ARGameViewState();
}
class _ARGameViewState extends State<ARGameView> {
ObjectDetector? _objectDetector;
DetectionMode _mode = DetectionMode.stream;
bool _canProcess = false;
bool _isBusy = false;
CustomPaint? _customPaint;
String? _text;
var _cameraLensDirection = CameraLensDirection.back;
int _option = 0;
final _options = {
'default': '',
'object_custom': 'object_labeler.tflite',
};
@override
void initState() {
super.initState();
_initializeDetector();
}
@override
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(
title: Text(widget.title),
),
body: Stack(
children: [
DetectorView(
title: 'AR Game Detector',
customPaint: _customPaint,
text: _text,
onImage: _processImage,
initialCameraLensDirection: _cameraLensDirection,
onCameraLensDirectionChanged: (value) =>
_cameraLensDirection = value,
onCameraFeedReady: _initializeDetector,
initialDetectionMode: DetectorViewMode.values[_mode.index],
onDetectorViewModeChanged: _onScreenModeChanged,
),
Positioned(
top: 30,
left: 100,
right: 100,
child: Row(
children: [
Spacer(),
Container(
decoration: BoxDecoration(
color: Colors.black54,
borderRadius: BorderRadius.circular(10.0),
),
child: Padding(
padding: const EdgeInsets.all(4.0),
child: _buildDropdown(),
),
),
Spacer(),
],
),
),
],
),
);
}
Widget _buildDropdown() => DropdownButton<int>(
value: _option,
icon: const Icon(Icons.arrow_downward),
elevation: 16,
style: const TextStyle(color: Colors.blue),
underline: Container(
height: 2,
color: Colors.blue,
),
onChanged: (int? option) {
if (option != null) {
setState(() {
_option = option;
_initializeDetector();
});
}
},
items: List<int>.generate(_options.length, (i) => i)
.map<DropdownMenuItem<int>>((option) {
return DropdownMenuItem<int>(
value: option,
child: Text(_options.keys.toList()[option]),
);
}).toList(),
);
void _onScreenModeChanged(DetectorViewMode mode) {
switch (mode) {
case DetectorViewMode.gallery:
_mode = DetectionMode.single;
_initializeDetector();
return;
case DetectorViewMode.liveFeed:
_mode = DetectionMode.stream;
_initializeDetector();
return;
}
}
void _initializeDetector() async {
_objectDetector?.close();
_objectDetector = null;
if (_option == 0) {
final options = ObjectDetectorOptions(
mode: _mode,
classifyObjects: true,
multipleObjects: true,
);
_objectDetector = GoogleMlKit.vision.objectDetector(options);
} else if (_option > 0 && _option <= _options.length) {
final option = _options[_options.keys.toList()[_option]] ?? '';
final modelPath = await getAssetPath('assets/ml/$option');
final options = LocalObjectDetectorOptions(
mode: _mode,
modelPath: modelPath,
classifyObjects: true,
multipleObjects: true,
);
_objectDetector = GoogleMlKit.vision.objectDetector(options);
}
_canProcess = true;
}
Future<void> _processImage(InputImage inputImage) async {
if (_objectDetector == null) return;
if (!_canProcess) return;
if (_isBusy) return;
_isBusy = true;
setState(() {
_text = '';
});
final objects = await _objectDetector!.processImage(inputImage);
_updateUI(objects);
_isBusy = false;
if (mounted) {
setState(() {});
}
}
void _updateUI(List<DetectedObject> objects) {
if (objects.isNotEmpty) {
// Update UI with detected objects
setState(() {
_text = 'Objects Detected: ${objects.length}';
_customPaint = CustomPaint(
painter: ObjectDetectPainter(objects),
);
});
} else {
setState(() {
_text = 'No Objects Detected';
_customPaint = null;
});
}
}
}
Integrating object detection in Mobile App Development for game development opens up a plethora of use cases and gameplay possibilities, leveraging Machine learning and Google ML Kit:
As Tech Co-Founder at Yugensys, I’m passionate about fostering innovation and propelling technological progress. By harnessing the power of cutting-edge solutions, I lead our team in delivering transformative IT services and Outsourced Product Development. My expertise lies in leveraging technology to empower businesses and ensure their success within the dynamic digital landscape.
Looking to augment your software engineering team with a team dedicated to impactful solutions and continuous advancement, feel free to connect with me. Yugensys can be your trusted partner in navigating the ever-evolving technological landscape.
Linkedin Instagram Facebook X-twitter Welcome to our comprehensive guide on bundling a React library into a reusable component library! In…
Linkedin Instagram Facebook X-twitter In today’s tutorial, we’ll explore creation of stunning diagrams using ChatGPT, along with the assistance of…
Linkedin Instagram Facebook X-twitter Animations play a vital role in creating engaging and interactive user interfaces in Flutter applications. Whether…
Linkedin Instagram Facebook X-twitter Use Case: Enhancing word discovery in a mobile game In today’s digital age, there’s a growing…