modified logic in ocr
This commit is contained in:
@@ -567,58 +567,69 @@ function cameraCapture() {
|
||||
|
||||
{{-- //.. --}}
|
||||
|
||||
<div x-data="cameraCapture()" x-init="initCamera()" class="relative w-80 h-auto">
|
||||
<!-- Video Stream -->
|
||||
<video x-ref="video" autoplay playsinline class="border rounded w-80 h-auto"></video>
|
||||
<div x-data="cameraCapture()" x-init="initCamera()" wire:ignore class="relative space-y-2">
|
||||
<!-- Video feed -->
|
||||
<video
|
||||
x-ref="video"
|
||||
autoplay
|
||||
playsinline
|
||||
class="border rounded w-80 h-auto"
|
||||
style="display:block;"
|
||||
></video>
|
||||
|
||||
<!-- Overlay for OCR highlights -->
|
||||
<canvas x-ref="overlay" class="absolute top-0 left-0 w-80 h-auto pointer-events-none"></canvas>
|
||||
<!-- Overlay canvas for OCR highlight -->
|
||||
<canvas
|
||||
x-ref="overlay"
|
||||
class="border rounded w-80 h-auto"
|
||||
style="position:absolute; top:0; left:0; pointer-events:none;"
|
||||
></canvas>
|
||||
|
||||
<!-- Switch Camera Button -->
|
||||
<div class="mt-2">
|
||||
<button @click="switchCamera" class="px-4 py-2 bg-blue-600 text-white rounded">Switch Camera</button>
|
||||
<!-- Hidden canvas for capturing snapshot if needed -->
|
||||
<canvas x-ref="canvas" class="hidden"></canvas>
|
||||
|
||||
<div class="flex space-x-4 mt-2">
|
||||
<x-filament::button color="primary" @click="switchCamera">Switch Camera</x-filament::button>
|
||||
<x-filament::button color="success" @click="capturePhoto">Capture Photo</x-filament::button>
|
||||
</div>
|
||||
|
||||
<input type="hidden" x-ref="hiddenInput" name="camera_capture_file">
|
||||
</div>
|
||||
|
||||
<!-- Tesseract.js CDN -->
|
||||
<script src="https://cdn.jsdelivr.net/npm/tesseract.js@4.1.3/dist/tesseract.min.js"></script>
|
||||
<!-- Scripts -->
|
||||
<script src="https://cdn.jsdelivr.net/npm/alpinejs@3.x.x/dist/cdn.min.js"></script>
|
||||
<script src="https://cdn.jsdelivr.net/npm/tesseract.js@2.1.5/dist/tesseract.min.js"></script>
|
||||
|
||||
<script>
|
||||
function cameraCapture() {
|
||||
return {
|
||||
stream: null,
|
||||
currentFacingMode: 'user', // 'user' = front, 'environment' = rear
|
||||
worker: null,
|
||||
currentFacingMode: 'user',
|
||||
textDetectionInterval: null,
|
||||
|
||||
async initCamera() {
|
||||
try {
|
||||
// Stop any existing streams
|
||||
if (this.stream) this.stream.getTracks().forEach(track => track.stop());
|
||||
|
||||
const video = this.$refs.video;
|
||||
|
||||
this.stream = await navigator.mediaDevices.getUserMedia({
|
||||
video: { facingMode: this.currentFacingMode }
|
||||
});
|
||||
|
||||
video.srcObject = this.stream;
|
||||
await new Promise(resolve => video.onloadedmetadata = resolve);
|
||||
await video.play();
|
||||
|
||||
// Set overlay size
|
||||
// Wait for metadata to load
|
||||
await new Promise(resolve => video.onloadedmetadata = resolve);
|
||||
video.play();
|
||||
|
||||
// Overlay size matches video
|
||||
const overlay = this.$refs.overlay;
|
||||
overlay.width = video.videoWidth;
|
||||
overlay.height = video.videoHeight;
|
||||
|
||||
// Initialize Tesseract Worker once
|
||||
if (!this.worker) {
|
||||
this.worker = Tesseract.createWorker({ logger: m => console.log(m) });
|
||||
await this.worker.load();
|
||||
await this.worker.loadLanguage('eng');
|
||||
await this.worker.initialize('eng');
|
||||
}
|
||||
|
||||
// Start OCR detection
|
||||
this.startDetection();
|
||||
|
||||
} catch (err) {
|
||||
console.error("Camera error:", err);
|
||||
alert("Camera error:\n" + (err.message || err));
|
||||
@@ -626,22 +637,26 @@ function cameraCapture() {
|
||||
},
|
||||
|
||||
async switchCamera() {
|
||||
// Toggle facing mode
|
||||
this.currentFacingMode = this.currentFacingMode === 'user' ? 'environment' : 'user';
|
||||
|
||||
// Stop previous detection interval
|
||||
if (this.textDetectionInterval) clearInterval(this.textDetectionInterval);
|
||||
|
||||
// Stop all tracks
|
||||
if (this.stream) this.stream.getTracks().forEach(track => track.stop());
|
||||
|
||||
// Small delay to avoid browser issues
|
||||
await new Promise(r => setTimeout(r, 300));
|
||||
|
||||
// Restart camera
|
||||
await this.initCamera();
|
||||
},
|
||||
|
||||
async capturePhoto() {
|
||||
const video = this.$refs.video;
|
||||
const canvas = this.$refs.canvas;
|
||||
const ctx = canvas.getContext('2d');
|
||||
|
||||
canvas.width = video.videoWidth;
|
||||
canvas.height = video.videoHeight;
|
||||
ctx.drawImage(video, 0, 0);
|
||||
|
||||
// Save captured image to hidden input (optional)
|
||||
const snapshot = canvas.toDataURL('image/png');
|
||||
this.$refs.hiddenInput.value = snapshot;
|
||||
|
||||
alert("Photo captured!");
|
||||
},
|
||||
|
||||
async detectText() {
|
||||
const video = this.$refs.video;
|
||||
const overlay = this.$refs.overlay;
|
||||
@@ -649,7 +664,6 @@ function cameraCapture() {
|
||||
|
||||
if (!video.videoWidth) return;
|
||||
|
||||
// Draw current frame on temp canvas
|
||||
const tempCanvas = document.createElement('canvas');
|
||||
tempCanvas.width = video.videoWidth;
|
||||
tempCanvas.height = video.videoHeight;
|
||||
@@ -657,19 +671,23 @@ function cameraCapture() {
|
||||
tempCtx.drawImage(video, 0, 0);
|
||||
|
||||
try {
|
||||
const { data: { words } } = await this.worker.recognize(tempCanvas);
|
||||
const result = await Tesseract.recognize(tempCanvas, 'eng', {
|
||||
logger: m => console.log(m)
|
||||
});
|
||||
|
||||
const words = result.data.words;
|
||||
|
||||
// Clear overlay
|
||||
ctx.clearRect(0, 0, overlay.width, overlay.height);
|
||||
|
||||
ctx.strokeStyle = 'lime';
|
||||
ctx.lineWidth = 2;
|
||||
|
||||
words.forEach(w => {
|
||||
if (!w.bbox || w.confidence < 50) return;
|
||||
|
||||
const { x0, y0, x1, y1 } = w.bbox;
|
||||
ctx.strokeRect(x0, y0, x1 - x0, y1 - y0);
|
||||
});
|
||||
|
||||
} catch (err) {
|
||||
console.error("OCR error:", err);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user