1106 lines
38 KiB
PHP
1106 lines
38 KiB
PHP
{{-- <div>
|
||
<video id="video" width="320" height="240" autoplay playsinline style="border:1px solid #ccc;"></video>
|
||
<br>
|
||
<button type="button" id="captureBtn" class="mt-2 px-4 py-2 bg-blue-600 text-white rounded">Capture</button>
|
||
<canvas id="canvas" width="320" height="240" style="display:none;"></canvas>
|
||
<img id="snapshot" style="margin-top:10px; max-width:100%;">
|
||
<input type="hidden" id="camera_image" name="{{ $getName() }}">
|
||
</div>
|
||
|
||
<script>
|
||
document.addEventListener('DOMContentLoaded', () => {
|
||
const video = document.getElementById('video');
|
||
const canvas = document.getElementById('canvas');
|
||
const captureBtn = document.getElementById('captureBtn');
|
||
const snapshot = document.getElementById('snapshot');
|
||
const cameraInput = document.getElementById('camera_image');
|
||
|
||
async function startCamera() {
|
||
try {
|
||
const stream = await navigator.mediaDevices.getUserMedia({
|
||
video: { facingMode: "user" } // front camera
|
||
});
|
||
video.srcObject = stream;
|
||
} catch (err) {
|
||
console.error("Camera error: ", err);
|
||
alert("Cannot access camera. Check permissions or HTTPS.");
|
||
}
|
||
}
|
||
|
||
captureBtn.addEventListener('click', () => {
|
||
const context = canvas.getContext('2d');
|
||
context.drawImage(video, 0, 0, canvas.width, canvas.height);
|
||
const dataUrl = canvas.toDataURL('image/png');
|
||
snapshot.src = dataUrl;
|
||
cameraInput.value = dataUrl;
|
||
});
|
||
|
||
startCamera();
|
||
});
|
||
</script> --}}
|
||
|
||
|
||
|
||
{{-- <div x-data="cameraCapture()" x-init="initCamera()" class="space-y-2" wire:ignore class="space-y-2">
|
||
<video x-ref="video" width="320" height="240" autoplay playsinline class="border rounded"></video>
|
||
<canvas x-ref="canvas" width="320" height="240" class="hidden"></canvas>
|
||
<img x-ref="snapshot" class="hidden border rounded max-w-full"> --}}
|
||
|
||
{{-- <div x-data="cameraCapture()" x-init="initCamera()" wire:ignore class="space-y-2">
|
||
<video
|
||
x-ref="video"
|
||
autoplay
|
||
playsinline
|
||
class="border rounded w-80 h-auto"
|
||
></video>
|
||
|
||
<!-- no need to fix width/height here either -->
|
||
<canvas x-ref="canvas" class="hidden"></canvas>
|
||
|
||
<img x-ref="snapshot" class="hidden border rounded max-w-full"> --}}
|
||
|
||
|
||
{{--
|
||
<div class="flex space-x-8 mt-2">
|
||
<x-filament::button color="primary" @click="capturePhoto" x-show="!photoTaken">Capture</x-filament::button>
|
||
<x-filament::button color="primary" @click="retakePhoto" x-show="photoTaken">Retake</x-filament::button>
|
||
<x-filament::button color="primary" @click="switchCamera" x-show="!photoTaken">Switch Camera</x-filament::button>
|
||
<x-filament::button color="primary" @click="verify" x-show="photoTaken">Verify</x-filament::button>
|
||
</div> --}}
|
||
{{-- <div class="flex space-x-2 mt-2">
|
||
<x-filament::button color="primary" @click="capturePhoto" x-show="!photoTaken" class="inline-flex w-auto">Capture</x-filament::button>
|
||
<x-filament::button color="primary" @click="retakePhoto" x-show="photoTaken" class="inline-flex w-auto">Retake</x-filament::button>
|
||
<x-filament::button color="primary" @click="switchCamera" x-show="!photoTaken" class="inline-flex w-auto">Switch Camera</x-filament::button>
|
||
<x-filament::button color="primary" @click="verify" x-show="photoTaken" class="inline-flex w-auto">Verify</x-filament::button>
|
||
</div> --}}
|
||
|
||
{{-- <input type="hidden" name="{{ $getName() }}" x-ref="hiddenInput"> --}}
|
||
{{-- <input type="hidden" x-ref="hiddenInput" name="camera_capture"> --}}
|
||
{{-- <input type="hidden" x-ref="hiddenInput" id="camera_capture_field" name="camera_capture_file">
|
||
|
||
</div>
|
||
|
||
|
||
<script src="https://cdn.jsdelivr.net/npm/tesseract.js@4.1.2/dist/tesseract.min.js"></script>
|
||
<script>
|
||
|
||
// function cameraCapture()
|
||
// {
|
||
// return
|
||
// {
|
||
// stream: null,
|
||
// currentFacingMode: 'user', // 'user' = front, 'environment' = back
|
||
// photoTaken: false,
|
||
// photo1: '',
|
||
|
||
// async initCamera() {
|
||
// try {
|
||
// if (this.stream) {
|
||
// this.stream.getTracks().forEach(track => track.stop());
|
||
// }
|
||
|
||
// this.stream = await navigator.mediaDevices.getUserMedia({
|
||
// video: { facingMode: this.currentFacingMode }
|
||
// });
|
||
|
||
// this.$refs.video.srcObject = this.stream;
|
||
// } catch (err) {
|
||
// console.error("Camera error:", err);
|
||
// alert("Cannot access camera. Enable permissions or use HTTPS.");
|
||
// }
|
||
// },
|
||
|
||
// async switchCamera() {
|
||
// this.currentFacingMode = this.currentFacingMode === 'user' ? 'environment' : 'user';
|
||
// await this.initCamera();
|
||
// },
|
||
|
||
// capturePhoto() {
|
||
// const video = this.$refs.video;
|
||
// const canvas = this.$refs.canvas;
|
||
// const snapshot = this.$refs.snapshot;
|
||
// const context = canvas.getContext('2d');
|
||
|
||
// context.drawImage(video, 0, 0, canvas.width, canvas.height);
|
||
// const dataUrl = canvas.toDataURL('image/png');
|
||
|
||
// // stop camera stream after capture
|
||
// if (this.stream) {
|
||
// this.stream.getTracks().forEach(track => track.stop());
|
||
// }
|
||
|
||
// snapshot.src = dataUrl;
|
||
// snapshot.classList.remove('hidden');
|
||
// video.classList.add('hidden');
|
||
// this.photoTaken = true;
|
||
|
||
// // this.photo1 = dataUrl;
|
||
// this.$refs.hiddenInput.value = dataUrl;
|
||
// // @this.set('photo1', dataUrl);
|
||
// console.log('Captured Image:', dataUrl);
|
||
// },
|
||
|
||
// async verifyOCR(dataUrl) {
|
||
// try {
|
||
// const { data: { text } } = await Tesseract.recognize(
|
||
// dataUrl,
|
||
// 'eng', // language
|
||
// { logger: m => console.log(m) } // optional
|
||
// );
|
||
// alert("OCR Result: " + text);
|
||
// } catch (err) {
|
||
// console.error(err);
|
||
// alert("OCR Failed: " + err.message);
|
||
// }
|
||
// },
|
||
|
||
// async verify() {
|
||
// const dataUrl = this.$refs.hiddenInput.value;
|
||
// if (!dataUrl) {
|
||
// alert("No captured image found!");
|
||
// return;
|
||
// }
|
||
// await this.verifyOCR(dataUrl);
|
||
// },
|
||
|
||
// async retakePhoto() {
|
||
// this.photoTaken = false;
|
||
// this.$refs.snapshot.classList.add('hidden');
|
||
// this.$refs.video.classList.remove('hidden');
|
||
// await this.initCamera();
|
||
// }
|
||
// }
|
||
|
||
// }
|
||
|
||
function cameraCapture() {
|
||
return {
|
||
stream: null,
|
||
currentFacingMode: 'user',
|
||
photoTaken: false,
|
||
photo1: '',
|
||
|
||
async initCamera() {
|
||
try {
|
||
if (this.stream) this.stream.getTracks().forEach(track => track.stop());
|
||
|
||
this.stream = await navigator.mediaDevices.getUserMedia({
|
||
video: { facingMode: this.currentFacingMode }
|
||
});
|
||
|
||
this.$refs.video.srcObject = this.stream;
|
||
} catch (err) {
|
||
console.error("Camera error:", err);
|
||
alert("Cannot access camera. Enable permissions or use HTTPS.");
|
||
}
|
||
},
|
||
|
||
async switchCamera() {
|
||
this.currentFacingMode = this.currentFacingMode === 'user' ? 'environment' : 'user';
|
||
await this.initCamera();
|
||
},
|
||
|
||
capturePhoto() {
|
||
const video = this.$refs.video;
|
||
const canvas = this.$refs.canvas;
|
||
const snapshot = this.$refs.snapshot;
|
||
const context = canvas.getContext('2d');
|
||
|
||
canvas.width = video.videoWidth;
|
||
canvas.height = video.videoHeight;
|
||
|
||
context.drawImage(video, 0, 0, canvas.width, canvas.height);
|
||
|
||
//const dataUrl = canvas.toDataURL('image/png');
|
||
const dataUrl = canvas.toDataURL('image/jpeg', 0.95);
|
||
|
||
if (this.stream) this.stream.getTracks().forEach(track => track.stop());
|
||
|
||
snapshot.src = dataUrl;
|
||
snapshot.classList.remove('hidden');
|
||
video.classList.add('hidden');
|
||
this.photoTaken = true;
|
||
|
||
this.$refs.hiddenInput.value = dataUrl;
|
||
console.log('Captured Image:', dataUrl);
|
||
},
|
||
|
||
async verifyOCR(dataUrl) {
|
||
try {
|
||
const { data: { text } } = await Tesseract.recognize(
|
||
dataUrl,
|
||
'eng',
|
||
{ logger: m => console.log(m) }
|
||
);
|
||
alert("OCR Result: " + text);
|
||
} catch (err) {
|
||
console.error(err);
|
||
alert("OCR Failed: " + err.message);
|
||
}
|
||
}, // <-- COMMA ADDED HERE
|
||
|
||
async verify() {
|
||
const dataUrl = this.$refs.hiddenInput.value;
|
||
if (!dataUrl) {
|
||
alert("No captured image found!");
|
||
return;
|
||
}
|
||
await this.verifyOCR(dataUrl);
|
||
},
|
||
|
||
async retakePhoto() {
|
||
this.photoTaken = false;
|
||
this.$refs.snapshot.classList.add('hidden');
|
||
this.$refs.video.classList.remove('hidden');
|
||
await this.initCamera();
|
||
}
|
||
}
|
||
}
|
||
</script> --}}
|
||
|
||
|
||
{{-- //..Another Option --}}
|
||
|
||
{{-- <div x-data="cameraCapture()" x-init="initCamera()" wire:ignore class="space-y-2">
|
||
<video
|
||
x-ref="video"
|
||
autoplay
|
||
playsinline
|
||
class="border rounded w-80 h-auto"
|
||
></video>
|
||
|
||
<!-- OCR Highlight Layer -->
|
||
<canvas
|
||
x-ref="overlay"
|
||
class="border rounded w-80 h-auto"
|
||
style="position:absolute; top:0; left:0; pointer-events:none;"
|
||
></canvas>
|
||
|
||
<canvas x-ref="canvas" class="hidden"></canvas>
|
||
|
||
{{-- <img x-ref="snapshot" class="hidden border rounded max-w-full"> --}}
|
||
{{-- <img x-ref="snapshot"
|
||
class="hidden border rounded"
|
||
style="width: 100%; max-width: 350px; height: auto;"> --}}
|
||
|
||
|
||
{{-- <div class="flex space-x-4 mt-2">
|
||
<x-filament::button color="primary" @click="capturePhoto" x-show="!photoTaken">Capture</x-filament::button>
|
||
<x-filament::button color="primary" @click="retakePhoto" x-show="photoTaken">Retake</x-filament::button>
|
||
<x-filament::button color="primary" @click="switchCamera" x-show="!photoTaken" class="inline-flex w-auto">Switch Camera</x-filament::button>
|
||
<x-filament::button color="primary" @click="verify" x-show="photoTaken" class="inline-flex w-auto">Verify</x-filament::button>
|
||
<x-filament::button color="success" @click="uploadCroppedImage" x-show="photoTaken">OK ✔ Upload Cropped</x-filament::button>
|
||
<x-filament::button color="success" @click="uploadOcr" x-show="photoTaken">Upload OCR</x-filament::button>
|
||
</div>
|
||
|
||
<input type="hidden" x-ref="hiddenInput" x-model="photo1" name="camera_capture_file">
|
||
<input type="hidden" x-ref="serialInput" name="serialNumbers">
|
||
|
||
</div> --}}
|
||
|
||
{{-- <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/cropperjs/1.5.13/cropper.min.css">
|
||
<script src="https://cdnjs.cloudflare.com/ajax/libs/cropperjs/1.5.13/cropper.min.js"></script>
|
||
<script src="https://cdn.jsdelivr.net/npm/tesseract.js@4.1.3/dist/tesseract.min.js"></script> --}}
|
||
|
||
|
||
{{-- <script>
|
||
function cameraCapture() {
|
||
return {
|
||
stream: null,
|
||
currentFacingMode: 'user',
|
||
photoTaken: false,
|
||
photo1: '',
|
||
textDetectionInterval: null,
|
||
worker: null, --}}
|
||
|
||
|
||
{{-- // async initCamera() {
|
||
// try {
|
||
// if (this.stream) this.stream.getTracks().forEach(track => track.stop());
|
||
|
||
// this.stream = await navigator.mediaDevices.getUserMedia({
|
||
// video: { facingMode: this.currentFacingMode }
|
||
// });
|
||
|
||
// this.$refs.video.srcObject = this.stream;
|
||
// //this.startDetection();
|
||
// } catch (err) {
|
||
// console.error("Camera error:", err);
|
||
// alert("Cannot access camera. Enable permissions or use HTTPS.");
|
||
// }
|
||
// },
|
||
|
||
async initCamera() {
|
||
try {
|
||
if (this.stream) this.stream.getTracks().forEach(track => track.stop());
|
||
|
||
const video = this.$refs.video;
|
||
this.stream = await navigator.mediaDevices.getUserMedia({
|
||
video: { facingMode: this.currentFacingMode }
|
||
});
|
||
|
||
video.srcObject = this.stream;
|
||
|
||
await new Promise(resolve => video.onloadedmetadata = resolve);
|
||
|
||
// Overlay size
|
||
const overlay = this.$refs.overlay;
|
||
overlay.width = video.videoWidth;
|
||
overlay.height = video.videoHeight;
|
||
|
||
// Initialize Tesseract Worker
|
||
if (!this.worker) {
|
||
this.worker = Tesseract.createWorker({
|
||
logger: m => console.log(m)
|
||
});
|
||
await this.worker.load();
|
||
await this.worker.loadLanguage('eng');
|
||
await this.worker.initialize('eng');
|
||
}
|
||
|
||
this.startDetection();
|
||
} catch (err) {
|
||
console.error("Camera error:", err);
|
||
//alert("Cannot access camera. Enable permissions or use HTTPS.");
|
||
alert("Camera error:\n" + (err.message || err));
|
||
}
|
||
},
|
||
|
||
|
||
async switchCamera() {
|
||
this.currentFacingMode = this.currentFacingMode === 'user' ? 'environment' : 'user';
|
||
await this.initCamera();
|
||
},
|
||
|
||
async capturePhoto() {
|
||
const video = this.$refs.video;
|
||
const canvas = this.$refs.canvas;
|
||
const ctx = canvas.getContext('2d');
|
||
|
||
canvas.width = video.videoWidth;
|
||
canvas.height = video.videoHeight;
|
||
ctx.drawImage(video, 0, 0);
|
||
|
||
const snapshot = this.$refs.snapshot;
|
||
snapshot.src = canvas.toDataURL('image/png');
|
||
|
||
|
||
|
||
//Wait until image is loaded
|
||
// snapshot.onload = () => {
|
||
// snapshot.classList.remove('hidden');
|
||
// video.classList.add('hidden');
|
||
|
||
// //Alpine reactive update inside nextTick
|
||
// this.$nextTick(() => {
|
||
// this.photoTaken = true;
|
||
|
||
// //Destroy old cropper if exists
|
||
// if (this.cropper) this.cropper.destroy();
|
||
|
||
// // ✅ Use requestAnimationFrame to ensure browser painted the image
|
||
// requestAnimationFrame(() => {
|
||
// this.cropper = new Cropper(snapshot, {
|
||
// aspectRatio: NaN,
|
||
// dragMode: 'crop',
|
||
// viewMode: 1,
|
||
// autoCropArea: 0.8,
|
||
// background: true,
|
||
// movable: true,
|
||
// zoomable: true,
|
||
// responsive: true,
|
||
// });
|
||
// console.log("✅ Cropper initialized");
|
||
// });
|
||
|
||
// this.stopCamera(); // stop camera after Cropper starts
|
||
// });
|
||
// };
|
||
|
||
},
|
||
|
||
//
|
||
async uploadCroppedImage() {
|
||
|
||
if (!this.cropper) {
|
||
alert("Crop the image before upload!");
|
||
return;
|
||
}
|
||
|
||
const croppedCanvas = this.cropper.getCroppedCanvas({ imageSmoothingEnabled: true });
|
||
|
||
croppedCanvas.toBlob(async blob => {
|
||
|
||
const formData = new FormData();
|
||
formData.append('photo', blob, 'cropped.png');
|
||
|
||
const response = await fetch('/temp-upload', {
|
||
method: 'POST',
|
||
headers: { 'X-CSRF-TOKEN': '{{ csrf_token() }}' },
|
||
body: formData
|
||
});
|
||
|
||
const data = await response.json();
|
||
|
||
if (data.success) {
|
||
this.$refs.hiddenInput.value = data.path;
|
||
alert("✅ Cropped image uploaded!");
|
||
} else {
|
||
alert("Upload failed!");
|
||
}
|
||
}, "image/png");
|
||
},
|
||
|
||
async verify() {
|
||
const filePath = this.$refs.hiddenInput.value; // e.g., "temp/capture_1760764396.jpeg"
|
||
|
||
if (!filePath) {
|
||
alert("No captured image found!");
|
||
return;
|
||
}
|
||
|
||
try {
|
||
const response = await fetch('/verify-ocr', {
|
||
method: 'POST',
|
||
headers: {
|
||
'Content-Type': 'application/json',
|
||
'X-CSRF-TOKEN': '{{ csrf_token() }}'
|
||
},
|
||
body: JSON.stringify({ path: filePath })
|
||
});
|
||
|
||
const data = await response.json();
|
||
|
||
console.log(data);
|
||
|
||
// if (data.success) {
|
||
// alert("OCR Result: " + data.text);
|
||
// console.error(data.text);
|
||
// }
|
||
if (data.success) {
|
||
// const serials = Array.isArray(data.text) ? data.text.join("\n") : data.text;
|
||
// alert("OCR Result:\n" + serials);
|
||
// console.log(serials);
|
||
const serials = Array.isArray(data.text) ? data.text : [data.text];
|
||
const firstFour = serials.slice(0, 4);
|
||
|
||
// Emit Livewire event to Resource Page
|
||
// window.dispatchEvent(new CustomEvent('set-serial-numbers', {
|
||
// detail: { serialNumbers: firstFour }
|
||
// }));
|
||
// Fill hidden input for Filament action
|
||
this.$refs.serialInput.value = JSON.stringify(firstFour);
|
||
|
||
alert("OCR Result:\n" + firstFour.join("\n"));
|
||
console.log("Serials sent to Resource Page:", firstFour);
|
||
}
|
||
else {
|
||
alert("OCR Failed: " + data.error);
|
||
console.error(data.error);
|
||
}
|
||
} catch (err) {
|
||
console.error(err.message);
|
||
alert("OCR request failed: " + err.message);
|
||
}
|
||
},
|
||
|
||
async retakePhoto() {
|
||
this.photoTaken = false;
|
||
this.$refs.snapshot.classList.add('hidden');
|
||
this.$refs.video.classList.remove('hidden');
|
||
this.cropper?.destroy();
|
||
await this.initCamera();
|
||
},
|
||
|
||
async detectText() {
|
||
const video = this.$refs.video;
|
||
const overlay = this.$refs.overlay;
|
||
const ctx = overlay.getContext("2d");
|
||
|
||
if (!video.videoWidth) return;
|
||
|
||
// Draw video frame to temp canvas
|
||
const tempCanvas = document.createElement('canvas');
|
||
tempCanvas.width = video.videoWidth;
|
||
tempCanvas.height = video.videoHeight;
|
||
const tempCtx = tempCanvas.getContext('2d');
|
||
tempCtx.drawImage(video, 0, 0);
|
||
|
||
// Run OCR in worker
|
||
const { data: { words } } = await this.worker.recognize(tempCanvas);
|
||
|
||
// Clear overlay
|
||
ctx.clearRect(0, 0, overlay.width, overlay.height);
|
||
|
||
ctx.strokeStyle = 'lime';
|
||
ctx.lineWidth = 2;
|
||
|
||
words.forEach(w => {
|
||
if (!w.bbox || w.confidence < 50) return;
|
||
|
||
const { x0, y0, x1, y1 } = w.bbox;
|
||
ctx.strokeRect(x0, y0, x1 - x0, y1 - y0);
|
||
});
|
||
},
|
||
|
||
startDetection() {
|
||
if (this.textDetectionInterval) {
|
||
clearInterval(this.textDetectionInterval);
|
||
}
|
||
this.textDetectionInterval = setInterval(() => this.detectText(), 1000);
|
||
},
|
||
|
||
// Initialize camera and detection
|
||
async init() {
|
||
await this.initCamera();
|
||
this.startDetection();
|
||
}
|
||
}
|
||
}
|
||
|
||
</script> --}}
|
||
|
||
{{-- //.. --}}
|
||
|
||
{{-- <div x-data="cameraCapture()" x-init="initCamera()" wire:ignore class="relative space-y-2">
|
||
<!-- Video feed -->
|
||
<video
|
||
x-ref="video"
|
||
autoplay
|
||
playsinline
|
||
class="border rounded w-80 h-auto"
|
||
style="display:block;"
|
||
></video>
|
||
|
||
<!-- Overlay canvas for OCR highlight -->
|
||
<canvas
|
||
x-ref="overlay"
|
||
class="border rounded w-80 h-auto"
|
||
style="position:absolute; top:0; left:0; pointer-events:none;"
|
||
></canvas>
|
||
|
||
<!-- Hidden canvas for capturing snapshot if needed -->
|
||
<canvas x-ref="canvas" class="hidden"></canvas>
|
||
|
||
<div class="flex space-x-4 mt-2">
|
||
<x-filament::button color="primary" @click="switchCamera">Switch Camera</x-filament::button>
|
||
<x-filament::button color="success" @click="capturePhoto">Capture Photo</x-filament::button>
|
||
</div>
|
||
|
||
<input type="hidden" x-ref="hiddenInput" name="camera_capture_file">
|
||
</div> --}}
|
||
|
||
<!-- Scripts -->
|
||
|
||
{{-- <script src="https://cdn.jsdelivr.net/npm/tesseract.js@2.1.5/dist/tesseract.min.js"></script>
|
||
|
||
<script>
|
||
function cameraCapture() {
|
||
return {
|
||
stream: null,
|
||
currentFacingMode: 'user',
|
||
textDetectionInterval: null,
|
||
|
||
async initCamera() {
|
||
try {
|
||
// Stop any existing streams
|
||
if (this.stream) this.stream.getTracks().forEach(track => track.stop());
|
||
|
||
const video = this.$refs.video;
|
||
this.stream = await navigator.mediaDevices.getUserMedia({
|
||
video: { facingMode: this.currentFacingMode }
|
||
});
|
||
|
||
video.srcObject = this.stream;
|
||
|
||
// Wait for metadata to load
|
||
await new Promise(resolve => video.onloadedmetadata = resolve);
|
||
video.play();
|
||
|
||
// Overlay size matches video
|
||
const overlay = this.$refs.overlay;
|
||
overlay.width = video.videoWidth;
|
||
overlay.height = video.videoHeight;
|
||
|
||
// Start OCR detection
|
||
this.startDetection();
|
||
|
||
} catch (err) {
|
||
console.error("Camera error:", err);
|
||
alert("Camera error:\n" + (err.message || err));
|
||
}
|
||
},
|
||
|
||
async switchCamera() {
|
||
this.currentFacingMode = this.currentFacingMode === 'user' ? 'environment' : 'user';
|
||
await this.initCamera();
|
||
},
|
||
|
||
async capturePhoto() {
|
||
const video = this.$refs.video;
|
||
const canvas = this.$refs.canvas;
|
||
const ctx = canvas.getContext('2d');
|
||
|
||
canvas.width = video.videoWidth;
|
||
canvas.height = video.videoHeight;
|
||
ctx.drawImage(video, 0, 0);
|
||
|
||
// Save captured image to hidden input (optional)
|
||
const snapshot = canvas.toDataURL('image/png');
|
||
this.$refs.hiddenInput.value = snapshot;
|
||
|
||
alert("Photo captured!");
|
||
},
|
||
|
||
async detectText() {
|
||
const video = this.$refs.video;
|
||
const overlay = this.$refs.overlay;
|
||
const ctx = overlay.getContext('2d');
|
||
|
||
if (!video.videoWidth) return;
|
||
|
||
const tempCanvas = document.createElement('canvas');
|
||
tempCanvas.width = video.videoWidth;
|
||
tempCanvas.height = video.videoHeight;
|
||
const tempCtx = tempCanvas.getContext('2d');
|
||
tempCtx.drawImage(video, 0, 0);
|
||
|
||
try {
|
||
const result = await Tesseract.recognize(tempCanvas, 'eng', {
|
||
logger: m => console.log(m)
|
||
});
|
||
|
||
const words = result.data.words;
|
||
|
||
ctx.clearRect(0, 0, overlay.width, overlay.height);
|
||
ctx.strokeStyle = 'lime';
|
||
ctx.lineWidth = 2;
|
||
|
||
words.forEach(w => {
|
||
if (!w.bbox || w.confidence < 50) return;
|
||
|
||
const { x0, y0, x1, y1 } = w.bbox;
|
||
ctx.strokeRect(x0, y0, x1 - x0, y1 - y0);
|
||
});
|
||
|
||
} catch (err) {
|
||
console.error("OCR error:", err);
|
||
}
|
||
},
|
||
|
||
startDetection() {
|
||
if (this.textDetectionInterval) clearInterval(this.textDetectionInterval);
|
||
this.textDetectionInterval = setInterval(() => this.detectText(), 1000);
|
||
}
|
||
}
|
||
}
|
||
</script> --}}
|
||
|
||
{{-- .. --}}
|
||
|
||
<div x-data="cameraCapture()" x-init="initCamera()" wire:ignore class="relative space-y-2">
|
||
<!-- Video feed -->
|
||
<video
|
||
x-ref="video"
|
||
autoplay
|
||
playsinline
|
||
class="border rounded w-80 h-auto"
|
||
style="display:block;"
|
||
></video>
|
||
|
||
<!-- Image Preview -->
|
||
{{-- <img x-ref="snapshot" class="border rounded w-80 h-auto hidden" /> --}}
|
||
|
||
<img
|
||
x-ref="snapshot"
|
||
class="absolute top-0 left-0 w-full h-full border rounded hidden"
|
||
/>
|
||
<!-- Overlay canvas for OCR highlight -->
|
||
<canvas
|
||
x-ref="overlay"
|
||
class="border rounded w-80 h-auto"
|
||
style="position:absolute; top:0; left:0; pointer-events:none;"
|
||
></canvas>
|
||
|
||
<!-- Hidden canvas for capturing snapshot -->
|
||
<canvas x-ref="canvas" class="hidden"></canvas>
|
||
|
||
<div class="flex space-x-4 mt-2">
|
||
<x-filament::button color="primary" @click="switchCamera">Switch Camera</x-filament::button>
|
||
<x-filament::button color="success" @click="capturePhoto">Capture Photo</x-filament::button>
|
||
<x-filament::button color="warning" @click="verifyPhoto">Verify</x-filament::button>
|
||
<x-filament::button color="primary" @click="retakePhoto">Retake</x-filament::button>
|
||
</div>
|
||
|
||
<input type="hidden" x-ref="hiddenInput" name="camera_capture_file">
|
||
{{-- <input type="hidden" x-ref="serialInput" name="serialNumbers"> --}}
|
||
{{-- <input type="hidden" x-model="serialNumbers" name="serialNumbers"> --}}
|
||
<input type="hidden" x-model="serialNumbers" name="serialNumbers">
|
||
<input type="hidden" x-ref="hiddenInputSerials" name="serial_numbers">
|
||
|
||
</div>
|
||
|
||
<!-- Scripts -->
|
||
{{-- <script src="https://cdn.jsdelivr.net/npm/tesseract.js@2.1.5/dist/tesseract.min.js"></script> --}}
|
||
<script src="https://cdn.jsdelivr.net/npm/tesseract.js@4/dist/tesseract.min.js"></script>
|
||
|
||
|
||
<script>
|
||
|
||
function cameraCapture() {
|
||
return {
|
||
stream: null,
|
||
currentFacingMode: 'user',
|
||
textDetectionInterval: null,
|
||
capturedPhoto: null,
|
||
serialNumbers: [],
|
||
|
||
// OCR Worker
|
||
ocrWorker: null,
|
||
isWorkerReady: false,
|
||
isDetecting: false,
|
||
|
||
// Reusable canvas
|
||
tempCanvas: null,
|
||
tempCtx: null,
|
||
|
||
async init() {
|
||
this.tempCanvas = document.createElement('canvas');
|
||
this.tempCtx = this.tempCanvas.getContext('2d');
|
||
await this.initWorker();
|
||
},
|
||
|
||
async initWorker() {
|
||
if (this.ocrWorker) return;
|
||
|
||
console.log("⏳ Loading OCR worker...");
|
||
|
||
this.ocrWorker = await Tesseract.createWorker({
|
||
logger: m => console.log(m.status, m.progress)
|
||
});
|
||
|
||
await this.ocrWorker.loadLanguage("eng");
|
||
await this.ocrWorker.initialize("eng");
|
||
|
||
this.isWorkerReady = true;
|
||
console.log("✅ OCR Worker Ready");
|
||
},
|
||
|
||
async initCamera() {
|
||
try {
|
||
if (this.stream) {
|
||
this.stream.getTracks().forEach(t => t.stop());
|
||
}
|
||
|
||
const video = this.$refs.video;
|
||
|
||
this.stream = await navigator.mediaDevices.getUserMedia({
|
||
video: { facingMode: this.currentFacingMode }
|
||
});
|
||
|
||
video.srcObject = this.stream;
|
||
await new Promise(res => video.onloadedmetadata = res);
|
||
video.play();
|
||
|
||
// Resize overlay to match video
|
||
const overlay = this.$refs.overlay;
|
||
overlay.width = video.videoWidth;
|
||
overlay.height = video.videoHeight;
|
||
|
||
// Start OCR once per camera start
|
||
this.startDetection();
|
||
|
||
} catch (err) {
|
||
console.error("Camera error:", err);
|
||
alert("Camera Error:\n" + err.message);
|
||
this.stopDetection();
|
||
}
|
||
},
|
||
|
||
async switchCamera() {
|
||
this.currentFacingMode =
|
||
this.currentFacingMode === "user"
|
||
? "environment"
|
||
: "user";
|
||
|
||
await this.initCamera();
|
||
},
|
||
|
||
async detectText() {
|
||
if (!this.isWorkerReady) return;
|
||
if (this.isDetecting) return;
|
||
|
||
this.isDetecting = true;
|
||
|
||
const video = this.$refs.video;
|
||
const overlay = this.$refs.overlay;
|
||
const ctx = overlay.getContext("2d");
|
||
|
||
if (!video.videoWidth) {
|
||
this.isDetecting = false;
|
||
return;
|
||
}
|
||
|
||
// Draw video frame into reusable canvas
|
||
this.tempCanvas.width = video.videoWidth;
|
||
this.tempCanvas.height = video.videoHeight;
|
||
this.tempCtx.drawImage(video, 0, 0);
|
||
|
||
try {
|
||
const result = await this.ocrWorker.recognize(this.tempCanvas);
|
||
const words = result.data.words;
|
||
|
||
ctx.clearRect(0, 0, overlay.width, overlay.height);
|
||
ctx.strokeStyle = "lime";
|
||
ctx.lineWidth = 2;
|
||
|
||
words.forEach(w => {
|
||
if (w.confidence < 50) return;
|
||
const { x0, y0, x1, y1 } = w.bbox;
|
||
ctx.strokeRect(x0, y0, x1 - x0, y1 - y0);
|
||
});
|
||
|
||
} catch (err) {
|
||
console.error("Live OCR error:", err);
|
||
}
|
||
|
||
this.isDetecting = false;
|
||
},
|
||
|
||
|
||
// async verifyPhoto() {
|
||
// if (!this.capturedPhoto) {
|
||
// alert("Please capture a photo first!");
|
||
// return;
|
||
// }
|
||
|
||
// if (!this.isWorkerReady) {
|
||
// alert("OCR worker not ready yet!");
|
||
// return;
|
||
// }
|
||
|
||
// try {
|
||
// const img = new Image();
|
||
// img.src = this.capturedPhoto;
|
||
|
||
// img.onload = async () => {
|
||
|
||
// // Draw image to a temp canvas for OCR
|
||
// this.tempCanvas.width = img.width;
|
||
// this.tempCanvas.height = img.height;
|
||
// this.tempCtx.drawImage(img, 0, 0);
|
||
|
||
// // OCR using worker
|
||
// const result = await this.ocrWorker.recognize(this.tempCanvas);
|
||
// const detectedText = result.data.text.trim();
|
||
// console.log("Detected OCR Text:", detectedText);
|
||
|
||
// let serials = [];
|
||
|
||
// // 1️⃣ Look for pattern “Serial No: ABC123”
|
||
// const serialWithLabelRegex = /Serial\s*No[:\-]?\s*([A-Za-z0-9]+)/i;
|
||
// const match = detectedText.match(serialWithLabelRegex);
|
||
|
||
// if (match && match[1]) {
|
||
// serials = [match[1].trim()];
|
||
// console.log("Found labeled serial:", serials[0]);
|
||
// }
|
||
// else
|
||
// {
|
||
// const generalNums = detectedText.match(/[A-Za-z0-9]{4,}/g) || [];
|
||
|
||
// serials = generalNums.slice(0, 4);
|
||
|
||
// if (serials.length == 0) {
|
||
// alert("No serial numbers detected in the photo!");
|
||
// return;
|
||
// }
|
||
|
||
// console.log("Extracted possible serials:", serials);
|
||
// }
|
||
|
||
// this.serialNumbers = serials;
|
||
|
||
// this.$refs.hiddenInputSerials.value = JSON.stringify(this.serialNumbers);
|
||
|
||
// // POST to Laravel session
|
||
// const response = await fetch('/save-serials-to-session', {
|
||
// method: 'POST',
|
||
// credentials: 'same-origin',
|
||
// headers: {
|
||
// 'Content-Type': 'application/json',
|
||
// 'X-CSRF-TOKEN': document.querySelector('meta[name="csrf-token"]').content,
|
||
// },
|
||
// body: JSON.stringify({
|
||
// serial_numbers: this.serialNumbers,
|
||
// }),
|
||
// });
|
||
|
||
// const data = await response.json();
|
||
// console.log("Session update result:", data);
|
||
// alert("✅ Serial numbers saved:\n" + JSON.stringify(this.serialNumbers, null, 2));
|
||
// };
|
||
// } catch (err) {
|
||
// console.error("OCR verify error:", err);
|
||
// alert("OCR verify failed:\n" + (err.message || err));
|
||
// }
|
||
// },
|
||
|
||
|
||
async verifyPhoto() {
|
||
if (!this.capturedPhoto) {
|
||
alert("Please capture a photo first!");
|
||
return;
|
||
}
|
||
|
||
if (!this.isWorkerReady) {
|
||
alert("OCR worker not ready yet!");
|
||
return;
|
||
}
|
||
|
||
try {
|
||
const img = new Image();
|
||
img.src = this.capturedPhoto;
|
||
|
||
img.onload = async () => {
|
||
|
||
// Reuse the same temp canvas (no memory leak)
|
||
this.tempCanvas.width = img.width;
|
||
this.tempCanvas.height = img.height;
|
||
this.tempCtx.drawImage(img, 0, 0);
|
||
|
||
// Worker OCR — much faster
|
||
const result = await this.ocrWorker.recognize(this.tempCanvas);
|
||
|
||
const detectedText = result.data.text.trim();
|
||
console.log("Detected Text:", detectedText);
|
||
|
||
// -------------------------------------------------------
|
||
// SERIAL EXTRACTION LOGIC — SAME AS YOUR ORIGINAL
|
||
// -------------------------------------------------------
|
||
const serialWithLabelRegex = /Serial\s*No[:\-]?\s*([A-Za-z0-9]+)/i;
|
||
const match = detectedText.match(serialWithLabelRegex);
|
||
|
||
if (match && match[1]) {
|
||
// "Serial No: XXXXX"
|
||
this.serialNumbers = [match[1].trim()];
|
||
console.log("Serial with Label:", this.serialNumbers[0]);
|
||
} else {
|
||
// Extract first 4 alphanumeric sequences of 4+ chars
|
||
const generalNums = detectedText.match(/[A-Za-z0-9]{4,}/g) || [];
|
||
this.serialNumbers = generalNums.slice(0, 4);
|
||
|
||
if (this.serialNumbers.length === 0) {
|
||
alert("No serial numbers detected!");
|
||
return;
|
||
}
|
||
|
||
console.log("Serial Numbers List:", this.serialNumbers);
|
||
}
|
||
|
||
// Save into hidden input (your original logic)
|
||
this.$refs.hiddenInputSerials.value = JSON.stringify(this.serialNumbers);
|
||
|
||
alert("Serial numbers:\n" + this.$refs.hiddenInputSerials.value);
|
||
|
||
fetch('/save-serials-to-session', {
|
||
method: 'POST',
|
||
credentials: 'same-origin',
|
||
headers: {
|
||
'Content-Type': 'application/json',
|
||
'X-CSRF-TOKEN': document.querySelector('meta[name="csrf-token"]').content,
|
||
},
|
||
body: JSON.stringify({
|
||
serial_numbers: this.serialNumbers,
|
||
}),
|
||
})
|
||
.then(response => response.json())
|
||
.then(data => {
|
||
console.log("Session Updated:", data);
|
||
alert("✅ Serial numbers saved to session!");
|
||
});
|
||
};
|
||
|
||
} catch (err) {
|
||
console.error("OCR verify error:", err);
|
||
alert("OCR verify failed:\n" + (err.message || err));
|
||
}
|
||
}
|
||
|
||
startDetection() {
|
||
if (this.textDetectionInterval) {
|
||
clearInterval(this.textDetectionInterval);
|
||
}
|
||
|
||
// run once instantly
|
||
this.detectText();
|
||
|
||
// run continuous every 1.2s
|
||
this.textDetectionInterval = setInterval(() => {
|
||
this.detectText();
|
||
}, 1200);
|
||
},
|
||
|
||
stopDetection() {
|
||
if (this.textDetectionInterval) {
|
||
clearInterval(this.textDetectionInterval);
|
||
this.textDetectionInterval = null;
|
||
}
|
||
},
|
||
|
||
async capturePhoto() {
|
||
const video = this.$refs.video;
|
||
const canvas = this.$refs.canvas;
|
||
const ctx = canvas.getContext("2d");
|
||
|
||
canvas.width = video.videoWidth;
|
||
canvas.height = video.videoHeight;
|
||
ctx.drawImage(video, 0, 0);
|
||
|
||
const snapshotData = canvas.toDataURL("image/png");
|
||
this.$refs.hiddenInput.value = snapshotData;
|
||
this.capturedPhoto = snapshotData;
|
||
|
||
// Stop detection + camera
|
||
this.stopDetection();
|
||
if (this.stream) this.stream.getTracks().forEach(t => t.stop());
|
||
|
||
// Switch UI
|
||
video.classList.add("hidden");
|
||
this.$refs.overlay.classList.add("hidden");
|
||
|
||
const snap = this.$refs.snapshot;
|
||
snap.src = snapshotData;
|
||
snap.classList.remove("hidden");
|
||
|
||
alert("Photo captured!");
|
||
},
|
||
|
||
async retakePhoto() {
|
||
this.$refs.snapshot.classList.add("hidden");
|
||
this.$refs.video.classList.remove("hidden");
|
||
|
||
await this.initCamera();
|
||
|
||
const overlay = this.$refs.overlay;
|
||
const ctx = overlay.getContext("2d");
|
||
ctx.clearRect(0, 0, overlay.width, overlay.height);
|
||
overlay.classList.remove("hidden");
|
||
|
||
this.startDetection();
|
||
}
|
||
};
|
||
}
|
||
|
||
</script>
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|