Files
pds/resources/views/fields/camera-capture.blade.php
2025-10-25 17:34:59 +05:30

879 lines
30 KiB
PHP

{{-- <div>
<video id="video" width="320" height="240" autoplay playsinline style="border:1px solid #ccc;"></video>
<br>
<button type="button" id="captureBtn" class="mt-2 px-4 py-2 bg-blue-600 text-white rounded">Capture</button>
<canvas id="canvas" width="320" height="240" style="display:none;"></canvas>
<img id="snapshot" style="margin-top:10px; max-width:100%;">
<input type="hidden" id="camera_image" name="{{ $getName() }}">
</div>
<script>
document.addEventListener('DOMContentLoaded', () => {
const video = document.getElementById('video');
const canvas = document.getElementById('canvas');
const captureBtn = document.getElementById('captureBtn');
const snapshot = document.getElementById('snapshot');
const cameraInput = document.getElementById('camera_image');
async function startCamera() {
try {
const stream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: "user" } // front camera
});
video.srcObject = stream;
} catch (err) {
console.error("Camera error: ", err);
alert("Cannot access camera. Check permissions or HTTPS.");
}
}
captureBtn.addEventListener('click', () => {
const context = canvas.getContext('2d');
context.drawImage(video, 0, 0, canvas.width, canvas.height);
const dataUrl = canvas.toDataURL('image/png');
snapshot.src = dataUrl;
cameraInput.value = dataUrl;
});
startCamera();
});
</script> --}}
{{-- <div x-data="cameraCapture()" x-init="initCamera()" class="space-y-2" wire:ignore class="space-y-2">
<video x-ref="video" width="320" height="240" autoplay playsinline class="border rounded"></video>
<canvas x-ref="canvas" width="320" height="240" class="hidden"></canvas>
<img x-ref="snapshot" class="hidden border rounded max-w-full"> --}}
{{-- <div x-data="cameraCapture()" x-init="initCamera()" wire:ignore class="space-y-2">
<video
x-ref="video"
autoplay
playsinline
class="border rounded w-80 h-auto"
></video>
<!-- no need to fix width/height here either -->
<canvas x-ref="canvas" class="hidden"></canvas>
<img x-ref="snapshot" class="hidden border rounded max-w-full"> --}}
{{--
<div class="flex space-x-8 mt-2">
<x-filament::button color="primary" @click="capturePhoto" x-show="!photoTaken">Capture</x-filament::button>
<x-filament::button color="primary" @click="retakePhoto" x-show="photoTaken">Retake</x-filament::button>
<x-filament::button color="primary" @click="switchCamera" x-show="!photoTaken">Switch Camera</x-filament::button>
<x-filament::button color="primary" @click="verify" x-show="photoTaken">Verify</x-filament::button>
</div> --}}
{{-- <div class="flex space-x-2 mt-2">
<x-filament::button color="primary" @click="capturePhoto" x-show="!photoTaken" class="inline-flex w-auto">Capture</x-filament::button>
<x-filament::button color="primary" @click="retakePhoto" x-show="photoTaken" class="inline-flex w-auto">Retake</x-filament::button>
<x-filament::button color="primary" @click="switchCamera" x-show="!photoTaken" class="inline-flex w-auto">Switch Camera</x-filament::button>
<x-filament::button color="primary" @click="verify" x-show="photoTaken" class="inline-flex w-auto">Verify</x-filament::button>
</div> --}}
{{-- <input type="hidden" name="{{ $getName() }}" x-ref="hiddenInput"> --}}
{{-- <input type="hidden" x-ref="hiddenInput" name="camera_capture"> --}}
{{-- <input type="hidden" x-ref="hiddenInput" id="camera_capture_field" name="camera_capture_file">
</div>
<script src="https://cdn.jsdelivr.net/npm/tesseract.js@4.1.2/dist/tesseract.min.js"></script>
<script>
// function cameraCapture()
// {
// return
// {
// stream: null,
// currentFacingMode: 'user', // 'user' = front, 'environment' = back
// photoTaken: false,
// photo1: '',
// async initCamera() {
// try {
// if (this.stream) {
// this.stream.getTracks().forEach(track => track.stop());
// }
// this.stream = await navigator.mediaDevices.getUserMedia({
// video: { facingMode: this.currentFacingMode }
// });
// this.$refs.video.srcObject = this.stream;
// } catch (err) {
// console.error("Camera error:", err);
// alert("Cannot access camera. Enable permissions or use HTTPS.");
// }
// },
// async switchCamera() {
// this.currentFacingMode = this.currentFacingMode === 'user' ? 'environment' : 'user';
// await this.initCamera();
// },
// capturePhoto() {
// const video = this.$refs.video;
// const canvas = this.$refs.canvas;
// const snapshot = this.$refs.snapshot;
// const context = canvas.getContext('2d');
// context.drawImage(video, 0, 0, canvas.width, canvas.height);
// const dataUrl = canvas.toDataURL('image/png');
// // stop camera stream after capture
// if (this.stream) {
// this.stream.getTracks().forEach(track => track.stop());
// }
// snapshot.src = dataUrl;
// snapshot.classList.remove('hidden');
// video.classList.add('hidden');
// this.photoTaken = true;
// // this.photo1 = dataUrl;
// this.$refs.hiddenInput.value = dataUrl;
// // @this.set('photo1', dataUrl);
// console.log('Captured Image:', dataUrl);
// },
// async verifyOCR(dataUrl) {
// try {
// const { data: { text } } = await Tesseract.recognize(
// dataUrl,
// 'eng', // language
// { logger: m => console.log(m) } // optional
// );
// alert("OCR Result: " + text);
// } catch (err) {
// console.error(err);
// alert("OCR Failed: " + err.message);
// }
// },
// async verify() {
// const dataUrl = this.$refs.hiddenInput.value;
// if (!dataUrl) {
// alert("No captured image found!");
// return;
// }
// await this.verifyOCR(dataUrl);
// },
// async retakePhoto() {
// this.photoTaken = false;
// this.$refs.snapshot.classList.add('hidden');
// this.$refs.video.classList.remove('hidden');
// await this.initCamera();
// }
// }
// }
function cameraCapture() {
return {
stream: null,
currentFacingMode: 'user',
photoTaken: false,
photo1: '',
async initCamera() {
try {
if (this.stream) this.stream.getTracks().forEach(track => track.stop());
this.stream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: this.currentFacingMode }
});
this.$refs.video.srcObject = this.stream;
} catch (err) {
console.error("Camera error:", err);
alert("Cannot access camera. Enable permissions or use HTTPS.");
}
},
async switchCamera() {
this.currentFacingMode = this.currentFacingMode === 'user' ? 'environment' : 'user';
await this.initCamera();
},
capturePhoto() {
const video = this.$refs.video;
const canvas = this.$refs.canvas;
const snapshot = this.$refs.snapshot;
const context = canvas.getContext('2d');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
context.drawImage(video, 0, 0, canvas.width, canvas.height);
//const dataUrl = canvas.toDataURL('image/png');
const dataUrl = canvas.toDataURL('image/jpeg', 0.95);
if (this.stream) this.stream.getTracks().forEach(track => track.stop());
snapshot.src = dataUrl;
snapshot.classList.remove('hidden');
video.classList.add('hidden');
this.photoTaken = true;
this.$refs.hiddenInput.value = dataUrl;
console.log('Captured Image:', dataUrl);
},
async verifyOCR(dataUrl) {
try {
const { data: { text } } = await Tesseract.recognize(
dataUrl,
'eng',
{ logger: m => console.log(m) }
);
alert("OCR Result: " + text);
} catch (err) {
console.error(err);
alert("OCR Failed: " + err.message);
}
}, // <-- COMMA ADDED HERE
async verify() {
const dataUrl = this.$refs.hiddenInput.value;
if (!dataUrl) {
alert("No captured image found!");
return;
}
await this.verifyOCR(dataUrl);
},
async retakePhoto() {
this.photoTaken = false;
this.$refs.snapshot.classList.add('hidden');
this.$refs.video.classList.remove('hidden');
await this.initCamera();
}
}
}
</script> --}}
{{-- //..Another Option --}}
{{-- <div x-data="cameraCapture()" x-init="initCamera()" wire:ignore class="space-y-2">
<video
x-ref="video"
autoplay
playsinline
class="border rounded w-80 h-auto"
></video>
<!-- OCR Highlight Layer -->
<canvas
x-ref="overlay"
class="border rounded w-80 h-auto"
style="position:absolute; top:0; left:0; pointer-events:none;"
></canvas>
<canvas x-ref="canvas" class="hidden"></canvas>
{{-- <img x-ref="snapshot" class="hidden border rounded max-w-full"> --}}
{{-- <img x-ref="snapshot"
class="hidden border rounded"
style="width: 100%; max-width: 350px; height: auto;"> --}}
{{-- <div class="flex space-x-4 mt-2">
<x-filament::button color="primary" @click="capturePhoto" x-show="!photoTaken">Capture</x-filament::button>
<x-filament::button color="primary" @click="retakePhoto" x-show="photoTaken">Retake</x-filament::button>
<x-filament::button color="primary" @click="switchCamera" x-show="!photoTaken" class="inline-flex w-auto">Switch Camera</x-filament::button>
<x-filament::button color="primary" @click="verify" x-show="photoTaken" class="inline-flex w-auto">Verify</x-filament::button>
<x-filament::button color="success" @click="uploadCroppedImage" x-show="photoTaken">OK Upload Cropped</x-filament::button>
<x-filament::button color="success" @click="uploadOcr" x-show="photoTaken">Upload OCR</x-filament::button>
</div>
<input type="hidden" x-ref="hiddenInput" x-model="photo1" name="camera_capture_file">
<input type="hidden" x-ref="serialInput" name="serialNumbers">
</div> --}}
{{-- <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/cropperjs/1.5.13/cropper.min.css">
<script src="https://cdnjs.cloudflare.com/ajax/libs/cropperjs/1.5.13/cropper.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/tesseract.js@4.1.3/dist/tesseract.min.js"></script> --}}
{{-- <script>
function cameraCapture() {
return {
stream: null,
currentFacingMode: 'user',
photoTaken: false,
photo1: '',
textDetectionInterval: null,
worker: null, --}}
{{-- // async initCamera() {
// try {
// if (this.stream) this.stream.getTracks().forEach(track => track.stop());
// this.stream = await navigator.mediaDevices.getUserMedia({
// video: { facingMode: this.currentFacingMode }
// });
// this.$refs.video.srcObject = this.stream;
// //this.startDetection();
// } catch (err) {
// console.error("Camera error:", err);
// alert("Cannot access camera. Enable permissions or use HTTPS.");
// }
// },
async initCamera() {
try {
if (this.stream) this.stream.getTracks().forEach(track => track.stop());
const video = this.$refs.video;
this.stream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: this.currentFacingMode }
});
video.srcObject = this.stream;
await new Promise(resolve => video.onloadedmetadata = resolve);
// Overlay size
const overlay = this.$refs.overlay;
overlay.width = video.videoWidth;
overlay.height = video.videoHeight;
// Initialize Tesseract Worker
if (!this.worker) {
this.worker = Tesseract.createWorker({
logger: m => console.log(m)
});
await this.worker.load();
await this.worker.loadLanguage('eng');
await this.worker.initialize('eng');
}
this.startDetection();
} catch (err) {
console.error("Camera error:", err);
//alert("Cannot access camera. Enable permissions or use HTTPS.");
alert("Camera error:\n" + (err.message || err));
}
},
async switchCamera() {
this.currentFacingMode = this.currentFacingMode === 'user' ? 'environment' : 'user';
await this.initCamera();
},
async capturePhoto() {
const video = this.$refs.video;
const canvas = this.$refs.canvas;
const ctx = canvas.getContext('2d');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
ctx.drawImage(video, 0, 0);
const snapshot = this.$refs.snapshot;
snapshot.src = canvas.toDataURL('image/png');
//Wait until image is loaded
// snapshot.onload = () => {
// snapshot.classList.remove('hidden');
// video.classList.add('hidden');
// //Alpine reactive update inside nextTick
// this.$nextTick(() => {
// this.photoTaken = true;
// //Destroy old cropper if exists
// if (this.cropper) this.cropper.destroy();
// // ✅ Use requestAnimationFrame to ensure browser painted the image
// requestAnimationFrame(() => {
// this.cropper = new Cropper(snapshot, {
// aspectRatio: NaN,
// dragMode: 'crop',
// viewMode: 1,
// autoCropArea: 0.8,
// background: true,
// movable: true,
// zoomable: true,
// responsive: true,
// });
// console.log("✅ Cropper initialized");
// });
// this.stopCamera(); // stop camera after Cropper starts
// });
// };
},
//
async uploadCroppedImage() {
if (!this.cropper) {
alert("Crop the image before upload!");
return;
}
const croppedCanvas = this.cropper.getCroppedCanvas({ imageSmoothingEnabled: true });
croppedCanvas.toBlob(async blob => {
const formData = new FormData();
formData.append('photo', blob, 'cropped.png');
const response = await fetch('/temp-upload', {
method: 'POST',
headers: { 'X-CSRF-TOKEN': '{{ csrf_token() }}' },
body: formData
});
const data = await response.json();
if (data.success) {
this.$refs.hiddenInput.value = data.path;
alert("✅ Cropped image uploaded!");
} else {
alert("Upload failed!");
}
}, "image/png");
},
async verify() {
const filePath = this.$refs.hiddenInput.value; // e.g., "temp/capture_1760764396.jpeg"
if (!filePath) {
alert("No captured image found!");
return;
}
try {
const response = await fetch('/verify-ocr', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'X-CSRF-TOKEN': '{{ csrf_token() }}'
},
body: JSON.stringify({ path: filePath })
});
const data = await response.json();
console.log(data);
// if (data.success) {
// alert("OCR Result: " + data.text);
// console.error(data.text);
// }
if (data.success) {
// const serials = Array.isArray(data.text) ? data.text.join("\n") : data.text;
// alert("OCR Result:\n" + serials);
// console.log(serials);
const serials = Array.isArray(data.text) ? data.text : [data.text];
const firstFour = serials.slice(0, 4);
// Emit Livewire event to Resource Page
// window.dispatchEvent(new CustomEvent('set-serial-numbers', {
// detail: { serialNumbers: firstFour }
// }));
// Fill hidden input for Filament action
this.$refs.serialInput.value = JSON.stringify(firstFour);
alert("OCR Result:\n" + firstFour.join("\n"));
console.log("Serials sent to Resource Page:", firstFour);
}
else {
alert("OCR Failed: " + data.error);
console.error(data.error);
}
} catch (err) {
console.error(err.message);
alert("OCR request failed: " + err.message);
}
},
async retakePhoto() {
this.photoTaken = false;
this.$refs.snapshot.classList.add('hidden');
this.$refs.video.classList.remove('hidden');
this.cropper?.destroy();
await this.initCamera();
},
async detectText() {
const video = this.$refs.video;
const overlay = this.$refs.overlay;
const ctx = overlay.getContext("2d");
if (!video.videoWidth) return;
// Draw video frame to temp canvas
const tempCanvas = document.createElement('canvas');
tempCanvas.width = video.videoWidth;
tempCanvas.height = video.videoHeight;
const tempCtx = tempCanvas.getContext('2d');
tempCtx.drawImage(video, 0, 0);
// Run OCR in worker
const { data: { words } } = await this.worker.recognize(tempCanvas);
// Clear overlay
ctx.clearRect(0, 0, overlay.width, overlay.height);
ctx.strokeStyle = 'lime';
ctx.lineWidth = 2;
words.forEach(w => {
if (!w.bbox || w.confidence < 50) return;
const { x0, y0, x1, y1 } = w.bbox;
ctx.strokeRect(x0, y0, x1 - x0, y1 - y0);
});
},
startDetection() {
if (this.textDetectionInterval) {
clearInterval(this.textDetectionInterval);
}
this.textDetectionInterval = setInterval(() => this.detectText(), 1000);
},
// Initialize camera and detection
async init() {
await this.initCamera();
this.startDetection();
}
}
}
</script> --}}
{{-- //.. --}}
{{-- <div x-data="cameraCapture()" x-init="initCamera()" wire:ignore class="relative space-y-2">
<!-- Video feed -->
<video
x-ref="video"
autoplay
playsinline
class="border rounded w-80 h-auto"
style="display:block;"
></video>
<!-- Overlay canvas for OCR highlight -->
<canvas
x-ref="overlay"
class="border rounded w-80 h-auto"
style="position:absolute; top:0; left:0; pointer-events:none;"
></canvas>
<!-- Hidden canvas for capturing snapshot if needed -->
<canvas x-ref="canvas" class="hidden"></canvas>
<div class="flex space-x-4 mt-2">
<x-filament::button color="primary" @click="switchCamera">Switch Camera</x-filament::button>
<x-filament::button color="success" @click="capturePhoto">Capture Photo</x-filament::button>
</div>
<input type="hidden" x-ref="hiddenInput" name="camera_capture_file">
</div> --}}
<!-- Scripts -->
{{-- <script src="https://cdn.jsdelivr.net/npm/tesseract.js@2.1.5/dist/tesseract.min.js"></script>
<script>
function cameraCapture() {
return {
stream: null,
currentFacingMode: 'user',
textDetectionInterval: null,
async initCamera() {
try {
// Stop any existing streams
if (this.stream) this.stream.getTracks().forEach(track => track.stop());
const video = this.$refs.video;
this.stream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: this.currentFacingMode }
});
video.srcObject = this.stream;
// Wait for metadata to load
await new Promise(resolve => video.onloadedmetadata = resolve);
video.play();
// Overlay size matches video
const overlay = this.$refs.overlay;
overlay.width = video.videoWidth;
overlay.height = video.videoHeight;
// Start OCR detection
this.startDetection();
} catch (err) {
console.error("Camera error:", err);
alert("Camera error:\n" + (err.message || err));
}
},
async switchCamera() {
this.currentFacingMode = this.currentFacingMode === 'user' ? 'environment' : 'user';
await this.initCamera();
},
async capturePhoto() {
const video = this.$refs.video;
const canvas = this.$refs.canvas;
const ctx = canvas.getContext('2d');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
ctx.drawImage(video, 0, 0);
// Save captured image to hidden input (optional)
const snapshot = canvas.toDataURL('image/png');
this.$refs.hiddenInput.value = snapshot;
alert("Photo captured!");
},
async detectText() {
const video = this.$refs.video;
const overlay = this.$refs.overlay;
const ctx = overlay.getContext('2d');
if (!video.videoWidth) return;
const tempCanvas = document.createElement('canvas');
tempCanvas.width = video.videoWidth;
tempCanvas.height = video.videoHeight;
const tempCtx = tempCanvas.getContext('2d');
tempCtx.drawImage(video, 0, 0);
try {
const result = await Tesseract.recognize(tempCanvas, 'eng', {
logger: m => console.log(m)
});
const words = result.data.words;
ctx.clearRect(0, 0, overlay.width, overlay.height);
ctx.strokeStyle = 'lime';
ctx.lineWidth = 2;
words.forEach(w => {
if (!w.bbox || w.confidence < 50) return;
const { x0, y0, x1, y1 } = w.bbox;
ctx.strokeRect(x0, y0, x1 - x0, y1 - y0);
});
} catch (err) {
console.error("OCR error:", err);
}
},
startDetection() {
if (this.textDetectionInterval) clearInterval(this.textDetectionInterval);
this.textDetectionInterval = setInterval(() => this.detectText(), 1000);
}
}
}
</script> --}}
{{-- .. --}}
<div x-data="cameraCapture()" x-init="initCamera()" wire:ignore class="relative space-y-2">
<!-- Video feed -->
<video
x-ref="video"
autoplay
playsinline
class="border rounded w-80 h-auto"
style="display:block;"
></video>
<!-- Overlay canvas for OCR highlight -->
<canvas
x-ref="overlay"
class="border rounded w-80 h-auto"
style="position:absolute; top:0; left:0; pointer-events:none;"
></canvas>
<!-- Hidden canvas for capturing snapshot -->
<canvas x-ref="canvas" class="hidden"></canvas>
<div class="flex space-x-4 mt-2">
<x-filament::button color="primary" @click="switchCamera">Switch Camera</x-filament::button>
<x-filament::button color="success" @click="capturePhoto">Capture Photo</x-filament::button>
<x-filament::button color="warning" @click="verifyPhoto">Verify</x-filament::button>
</div>
<input type="hidden" x-ref="hiddenInput" name="camera_capture_file">
{{-- <input type="hidden" x-ref="serialInput" name="serialNumbers"> --}}
<input type="hidden" x-ref="serialInput" x-model="serialNumbers" name="serialNumbers">
</div>
<!-- Scripts -->
<script src="https://cdn.jsdelivr.net/npm/tesseract.js@2.1.5/dist/tesseract.min.js"></script>
<script>
function cameraCapture() {
return {
stream: null,
currentFacingMode: 'user',
textDetectionInterval: null,
capturedPhoto: null, // store captured image
serialNumbers: [],
async initCamera() {
try {
if (this.stream) this.stream.getTracks().forEach(track => track.stop());
const video = this.$refs.video;
this.stream = await navigator.mediaDevices.getUserMedia({
video: { facingMode: this.currentFacingMode }
});
video.srcObject = this.stream;
await new Promise(resolve => video.onloadedmetadata = resolve);
video.play();
// Overlay size matches video
const overlay = this.$refs.overlay;
overlay.width = video.videoWidth;
overlay.height = video.videoHeight;
this.startDetection();
} catch (err) {
console.error("Camera error:", err);
alert("Camera error:\n" + (err.message || err));
}
},
async switchCamera() {
this.currentFacingMode = this.currentFacingMode === 'user' ? 'environment' : 'user';
await this.initCamera();
},
async capturePhoto() {
const video = this.$refs.video;
const canvas = this.$refs.canvas;
const ctx = canvas.getContext('2d');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
ctx.drawImage(video, 0, 0);
const snapshotData = canvas.toDataURL('image/png');
this.$refs.hiddenInput.value = snapshotData;
this.capturedPhoto = snapshotData; // store for verification
alert("Photo captured!");
},
async verifyPhoto() {
if (!this.capturedPhoto) {
alert("Please capture a photo first!");
return;
}
try {
const img = new Image();
img.src = this.capturedPhoto;
img.onload = async () => {
const canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
const ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
const result = await Tesseract.recognize(canvas, 'eng', {
logger: m => console.log(m)
});
const detectedText = result.data.text.trim();
alert("Detected Text:\n" + (detectedText || "[No text detected]"));
// Extract serial numbers (digits only)
const matches = detectedText.match(/\d+/g) || [];
this.serialNumbers = matches.slice(0, 4); // take first 4 serials
this.$refs.serialInput.value = JSON.stringify(this.serialNumbers);
alert("Serial numbers stored in hidden input:\n" + this.$refs.serialInput.value);
}
} catch (err) {
console.error("OCR verify error:", err);
alert("OCR verify failed:\n" + (err.message || err));
}
},
async detectText() {
const video = this.$refs.video;
const overlay = this.$refs.overlay;
const ctx = overlay.getContext('2d');
if (!video.videoWidth) return;
const tempCanvas = document.createElement('canvas');
tempCanvas.width = video.videoWidth;
tempCanvas.height = video.videoHeight;
const tempCtx = tempCanvas.getContext('2d');
tempCtx.drawImage(video, 0, 0);
try {
const result = await Tesseract.recognize(tempCanvas, 'eng');
const words = result.data.words;
ctx.clearRect(0, 0, overlay.width, overlay.height);
ctx.strokeStyle = 'lime';
ctx.lineWidth = 2;
words.forEach(w => {
if (!w.bbox || w.confidence < 50) return;
const { x0, y0, x1, y1 } = w.bbox;
ctx.strokeRect(x0, y0, x1 - x0, y1 - y0);
});
} catch (err) {
console.error("Live OCR error:", err);
}
},
startDetection() {
if (this.textDetectionInterval) clearInterval(this.textDetectionInterval);
this.textDetectionInterval = setInterval(() => this.detectText(), 1000);
}
}
}
</script>