|
| 1 | +<!DOCTYPE html> |
| 2 | +<html lang="en"> |
| 3 | +<head> |
| 4 | + <meta charset="UTF-8"> |
| 5 | + <meta name="viewport" content="width=device-width, initial-scale=1.0"> |
| 6 | + <title>Screen and Speech Recorder</title> |
| 7 | +</head> |
| 8 | +<body> |
| 9 | + <h1>Screen and Speech Recorder</h1> |
| 10 | + <textarea id="textToRead" rows="4" cols="50" placeholder="Enter text here"></textarea><br> |
| 11 | + <button id="start">Start Recording</button> |
| 12 | + <button id="stop" disabled>Stop Recording</button> |
| 13 | + <video id="recordedVideo" controls></video> |
| 14 | + |
| 15 | + <script> |
| 16 | + const startButton = document.getElementById('start'); |
| 17 | + const stopButton = document.getElementById('stop'); |
| 18 | + const video = document.getElementById('recordedVideo'); |
| 19 | + const textToRead = document.getElementById('textToRead'); |
| 20 | + |
| 21 | + let mediaRecorder; |
| 22 | + let recordedChunks = []; |
| 23 | + let speechSynthesisStream; |
| 24 | + |
| 25 | + async function getScreenAndAudioStream() { |
| 26 | + // Capture the screen |
| 27 | + const screenStream = await navigator.mediaDevices.getDisplayMedia({ |
| 28 | + video: true |
| 29 | + }); |
| 30 | + |
| 31 | + // Capture the synthesized speech audio using an audio context |
| 32 | + const audioContext = new AudioContext(); |
| 33 | + const speechSynthesis = window.speechSynthesis; |
| 34 | + const utterance = new SpeechSynthesisUtterance(textToRead.value); |
| 35 | + |
| 36 | + const destination = audioContext.createMediaStreamDestination(); |
| 37 | + speechSynthesis.speak(utterance); |
| 38 | + |
| 39 | + // Pipe speech synthesis through the destination to create an audio stream |
| 40 | + const synthSource = audioContext.createMediaStreamSource(destination.stream); |
| 41 | + utterance.onstart = () => synthSource.connect(audioContext.destination); |
| 42 | + |
| 43 | + // Combine the screen and audio streams |
| 44 | + speechSynthesisStream = destination.stream; |
| 45 | + const combinedStream = new MediaStream([...screenStream.getTracks(), ...speechSynthesisStream.getTracks()]); |
| 46 | + |
| 47 | + return combinedStream; |
| 48 | + } |
| 49 | + |
| 50 | + startButton.addEventListener('click', async () => { |
| 51 | + const stream = await getScreenAndAudioStream(); |
| 52 | + |
| 53 | + mediaRecorder = new MediaRecorder(stream); |
| 54 | + mediaRecorder.ondataavailable = (event) => { |
| 55 | + if (event.data.size > 0) { |
| 56 | + recordedChunks.push(event.data); |
| 57 | + } |
| 58 | + }; |
| 59 | + |
| 60 | + mediaRecorder.onstop = () => { |
| 61 | + const blob = new Blob(recordedChunks, { |
| 62 | + type: 'video/webm' |
| 63 | + }); |
| 64 | + video.src = URL.createObjectURL(blob); |
| 65 | + recordedChunks = []; |
| 66 | + }; |
| 67 | + |
| 68 | + mediaRecorder.start(); |
| 69 | + startButton.disabled = true; |
| 70 | + stopButton.disabled = false; |
| 71 | + }); |
| 72 | + |
| 73 | + stopButton.addEventListener('click', () => { |
| 74 | + mediaRecorder.stop(); |
| 75 | + startButton.disabled = false; |
| 76 | + stopButton.disabled = true; |
| 77 | + }); |
| 78 | + </script> |
| 79 | +</body> |
| 80 | +</html> |
0 commit comments