From bb4772bdec119b3e7a4f7ad0a42ba1acea9e5bab Mon Sep 17 00:00:00 2001
From: Lucio Zambon <lucio.zambon@elettra.eu>
Date: Thu, 6 Jun 2024 01:28:52 +0000
Subject: [PATCH] Add new file

---
 speech/talk.php | 286 ++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 286 insertions(+)
 create mode 100644 speech/talk.php

diff --git a/speech/talk.php b/speech/talk.php
new file mode 100644
index 0000000..250cbde
--- /dev/null
+++ b/speech/talk.php
@@ -0,0 +1,286 @@
+<?php
+	if (!empty($_REQUEST['init'])) {
+		$t = $_REQUEST['init']-0;
+		if ($t==0 || $t>1000000000000) die('');
+		file_put_contents("./token", $t);
+	}
+	if (!empty($_REQUEST['send'])) {
+		$t = file_get_contents("./token");
+		if ($t != $_REQUEST['token']-0) die('');
+		if (time() -filectime("./token") > 36000) die('');
+		file_put_contents("./msg", $_REQUEST['send']);
+	}
+	if (isset($_REQUEST['read'])) {
+		set_time_limit(0);
+		/* $t = file_get_contents("./token");
+		if ($t != $_REQUEST['token']-0) die('');
+		if (time() -filectime("./token") > 36000) die('');*/
+		while (true) {
+			if (file_exists("./msg")) {
+				$f = file_get_contents("./msg");
+				if (!empty($f)) {
+					file_put_contents("./msg", '');
+					die($f);
+				}
+			}
+			usleep(100000);
+		}
+	}
+	function elementArray($id, $element) {
+		global $sql;
+		foreach ($element as $e) {
+			$query = strtr("INSERT INTO launcher (folder_id, title, description, exename) VALUES ($id, \"".$e->title.'","'.$e->description.'","'.$e->attributes()->exename."\")", ['""'=>'NULL',"'"=>'','"'=>"'"]);
+			echo "$query;<br>\n";
+			$sql->sql_query($query);
+		}
+	}
+	function folderArray($id, $folder) {
+		global $sql;
+		foreach ($folder as $i=>$e) {
+			$query = strtr("INSERT INTO launcher (folder_id, title, description, exename) VALUES ($id, \"".$e->title.'","'.$e->description.'","'.$e->attributes()->exename."\")", ['""'=>'NULL',"'"=>'','"'=>"'"]);
+			echo "$query;<br>\n";
+			$sql->sql_query($query);
+			$lastid = $sql->last_insert_id() - 0;
+			if (isset($e->folder)) folderArray($lastid, $e->folder);
+			if (isset($e->element)) elementArray($lastid, $e->element);
+		}
+	}
+	if (isset($_REQUEST['starter'])) {
+		require_once("../../conf.php");
+		require_once("../../lib/sql_interface.php");
+		$sql = open_db();
+		// ALTER SEQUENCE launcher_id_seq RESTART WITH 6
+		/*$sql->sql_query("INSERT INTO launcher (folder_id, title, description, exename) VALUES (2, 'Jive','Jive','jive')");
+		echo $sql->last_insert_id();
+		die();
+puma@pwma-dev:~$ sudo psql -U postgres
+postgres=# \c pwma
+pwma=# CREATE EXTENSION pg_trgm;
+		*/
+		
+		$context = stream_context_create(array("ssl"=>array("verify_peer"=>false,"verify_peer_name"=>false)));
+		$f = strtr(file_get_contents('https://gitlab.elettra.eu/cs/etc/browser/fermi/-/raw/master/fermi.xml', false, $context), ['exename=""'=>'']);
+		// var_dump(libxml_use_internal_errors(true));
+		$xml = simplexml_load_string($f, "SimpleXMLElement", LIBXML_NOERROR |  LIBXML_ERR_NONE);
+		if ($xml === false) {
+			echo "Failed loading XML\n";
+			foreach(libxml_get_errors() as $error) {
+				echo "\t", $error->message;
+			}
+		}
+		// elementArray(1, $xml->folder->element);
+		folderArray(1, $xml->folder->folder);
+		echo "<pre>";
+		print_r($xml);
+		die('</pre>');
+	}
+
+?>
+<!doctype html>
+<html lang="en">
+	<head>
+		<meta charset="utf-8">
+		<meta name="viewport" content="width=device-width, initial-scale=1">
+		<title>CS Talk</title>
+		<script src="../../lib/jquery/jquery.min.js"></script>
+	</head>
+	<body onLoad='myload()'>
+		<div style="max-width: 500px; text-align: center">
+			<audio id='speechText' onended='speakagain();' onerror='showLog("audio err: "+event.error)'></audio>
+			<script>
+				// /home/fermi/etc/launcher/fermi/launcher_es.conf
+				// /runtime/etc/browser/fermi.xml
+				// /runtime/etc/browser/laser.xml
+				if (document.location.search.indexOf('background=')>-1) {
+					const a = document.location.search.split('background=')[1].split('&')[0];
+					$('body').css('background-color', +a > 0? '#'+a: a);
+				}
+				const lang = document.location.search.indexOf('lang=')>-1 && document.location.search.split('lang=')[1].split('&')[0]=='en'? 'en-US': 'it-IT'; // BCP 47 language
+				const host = document.location.search.indexOf('talk=')>-1? document.location.search.split('talk=')[1].split('&')[0].replace('ee', 'pcl-elettra-cre-0').replace('ef','pcl-elettra-crf-0'): '';
+				function switchLocale(newlang) {document.location = './talk.html?lang='+newlang+(host.length>0? '&talk='+host: '');};
+				let locale = {};
+				function myload() {
+					fetch('./talk_locale.json').then((response) => {return response.json();}).then((rlocale) => {
+						locale = rlocale;
+						$('#title').html(rlocale[lang].talkto + host);
+						$('#micna').attr('title', rlocale[lang].micna);
+						$('#miclabel').html(rlocale[lang].miclabel + '&nbsp;&nbsp;&nbsp;');
+					});
+				}
+				// https://stackoverflow.com/questions/64405532/why-speechsynthesisutterance-is-not-working-on-chrome
+				// It's because in Chrome speech synthasis requires user interaction before it speaks e.g. a button click.
+				// https://stackoverflow.com/questions/50490304/how-to-make-audio-autoplay-on-chrome
+				const synth = window.speechSynthesis;
+				const voices = synth.getVoices();
+				const voiceLocale = {'it-IT': false, 'en-US': false};
+				for (let i=voices.length-1; i>=0; i--) {
+					if (voices[i].lang=='it-IT') voiceLocale['it-IT'] = voices[i];
+					if (voices[i].lang=='en-US') voiceLocale['en-US'] = voices[i];
+				}
+				let oldText = '';
+				let oldTime = 0;
+				function speakGenerated(textValue) {
+					if (textValue !== '') {
+						stopRec();
+						if (synth.speaking) {console.error(locale.lang.speaking);showLog(locale.lang.speaking2);return;}
+						const utterThis = new SpeechSynthesisUtterance(textValue);
+						utterThis.onend = function (event) {startRec(); console.log('SpeechSynthesisUtterance.onend');showLog('..');};
+						utterThis.onerror = function (event) {console.error('SpeechSynthesisUtterance.onerror', event.error, event);showLog(locale[lang].onerror+JSON.stringify(event.error));};
+						utterThis.voice = voiceLocale[lang]===false? voices[0]: voiceLocale[lang];
+						utterThis.pitch = 0.1;
+						utterThis.rate = 1;
+						utterThis.lang = lang;
+						showLog(lang);
+						synth.speak(utterThis);
+					}
+				}
+				let keepAlive = false;
+				let aliveTimer = -1;
+				function startRec() {
+					keepAlive = true;
+					aliveTimer = -1;
+					// showLog('start');
+					recognition.start();
+					document.getElementById('micstart').style.display = 'inline';
+					document.getElementById('micstop').style.display = 'none';
+				}
+				function stopRec() {
+					if (typeof window.SpeechRecognition == 'undefined') {$("#micstart").hide();$("#micna").show();}
+					keepAlive = false;
+					recognition.stop();
+				}
+				function commandlistinfo() {
+					alert(locale[lang].commandlistinfo);
+				}
+			</script>
+			<span style='color: darkgreen; font-weight: bold;' id='title'>Talk to ...</span>
+
+			<img src='./microphone-na.png' title='not available' onClick='$("#micstart").show();$("#micna").hide();'  id='micna'>
+			<img src='./microphone-on.png' onClick='stopRec()' id='micstart' style='display: none; padding-right: 60px;'>
+			<img src='./microphone-off.png' onClick='startRec()' id='micstop' style='display: none; padding-right: 60px;'>
+			<div style='cursor: pointer;padding-right: 5px; text-align: right; margin-top: -1.3em;' id='langselect'>
+				<img style="max-height: 18px; margin-right: 10px;" src='./it-IT.png' onClick="switchLocale('it')">
+				<img style="max-height: 18px; margin-right: 10px;" src='./en-US.png' onClick="switchLocale('en')">
+				<span style='color: darkgreen; font-weight: bold;' onClick='commandlistinfo()'> ? </span>
+			</div>
+			<span style='color: darkblue; font-weight: bold;' id='miclabel'>comandi vocali&nbsp;&nbsp;&nbsp;</span>
+			<pre id='log' style='display:".(isset($_REQUEST['debug'])? 'block': 'none').";'></pre>
+			<script>
+				// document.body.style.backgroundColor = 'black'
+				let speechStat = '';
+				let firstRun = true;
+				function showLog(currentToken) {
+					// Show log in console and UI.
+					const logElement = document.querySelector('#log');
+					logElement.textContent = logElement.textContent+currentToken;
+				}
+				// showLog('webkitSpeechRecognition: '+ typeof window.webkitSpeechRecognition);
+				// showLog('SpeechRecognition: '+ typeof window.SpeechRecognition);
+				window.SpeechRecognition = window.webkitSpeechRecognition || window.SpeechRecognition;
+				let finalTranscript = '';
+				if (typeof window.SpeechRecognition == 'undefined') {
+					const tok =  Math.round(Math.random()*1000000000000+1);
+					console.log('./qr.php?data='+tok);
+					fetch('./talk.php?init='+tok);
+					document.getElementById('micstart').src = './qr.php?data='+tok;
+					document.getElementById('langselect').style.display = 'none';
+					document.getElementById('miclabel').style.display = 'none';
+				}
+				let recognition = new window.SpeechRecognition();
+				recognition.maxAlternatives = 10;
+				recognition.continuous = true;
+				recognition.interimResults = true;
+				recognition.lang = lang;
+				recognition.maxAlternatives = 3;
+				showLog('starting...'+'\n');
+				/* recognition.onresult = (event) => {
+				  // The SpeechRecognitionEvent results property returns a SpeechRecognitionResultList object
+				  // The SpeechRecognitionResultList object contains SpeechRecognitionResult objects.
+				  // It has a getter so it can be accessed like an array
+				  // The first [0] returns the SpeechRecognitionResult at position 0.
+				  // Each SpeechRecognitionResult object contains SpeechRecognitionAlternative objects
+				  // that contain individual results.
+				  // These also have getters so they can be accessed like arrays.
+				  // The second [0] returns the SpeechRecognitionAlternative at position 0.
+				  // We then return the transcript property of the SpeechRecognitionAlternative object
+				  const color = event.results[0][0].transcript;
+				  diagnostic.textContent = `Result received: ${color}.`;
+				  bg.style.backgroundColor = color;
+				};*/
+				recognition.onerror = function(event) {
+					showLog(locale[lang].errorname + event.error+', '+event.message+', '+event.lineno+'\n');
+					showLog(locale[lang].errormessage + JSON.stringify(event.error));
+					console.log(event);
+					startRec();
+				}
+				recognition.onend = function(event) {
+					showLog('.');
+					document.getElementById('micna').style.display = 'none';
+					document.getElementById('micstart').style.display = 'none';
+					document.getElementById('micstop').style.display = 'inline';
+					if (keepAlive && aliveTimer==-1) aliveTimer = setTimeout(startRec, 200);
+				}
+				recognition.onstart = function(event) {
+					if (firstRun) {
+						firstRun = false;
+						recognition.stop();
+						return;
+					}
+					showLog('-');
+					document.getElementById('micna').style.display = 'none';
+					document.getElementById('micstart').style.display = 'inline';
+					document.getElementById('micstop').style.display = 'none';
+				}
+				const open = {"facility": ["sr", "ptb", "booster", "bts"],"section": 28};
+				function detect_token(transcript, token) {
+					const t = token.split(';');
+					for (i in t) {
+						if (transcript.indexOf(t[i])>-1) return transcript.indexOf(t[i]) + t[i].length+1;
+					}
+					return -1;
+				}
+				function quick_add(transcript) {
+					const t = + new Date();
+					if ((transcript==oldText || (transcript=='te' && oldText.indexOf('te')>-1)) && t-oldTime<1500) return;
+					oldTime = t;
+					oldText = transcript;
+					speechStat = '';
+					const i = detect_token(transcript, locale[lang].section);
+					if (i>-1) {
+						showLog('section: '+i+'--'+transcript.substring(i));
+						if (transcript.substring(i).length>0) fetch('./talk.php?send=S'+transcript.substring(i)+'&token='+document.location.search.split('talk=')[1].split('&')[0]);
+						return;
+					}
+					// else {showLog('+ '+transcript);}
+					transcript.replace('sky ','');
+				}
+
+				recognition.onresult = (event) => {
+					let interimTranscript = '';
+					for (let i = event.resultIndex, len = event.results.length; i < len; i++) {
+						let transcript = event.results[i][0].transcript;
+						if (event.results[i][0].transcript.length) showLog('i: '+i+', transcripts: '+event.results[i][0].transcript+(event.results[i][1]? ', transcripts: '+event.results[i][1].transcript:'')+'\n');
+						if (event.results[i].isFinal) {
+							quick_add(transcript.toLowerCase());
+							var meno = transcript.toLowerCase().indexOf('meno ');
+							console.log(meno, transcript.substring(meno+5,meno+6), transcript.substring(meno+5,meno+6) % 1);
+							if (meno>-1 && transcript.substring(meno+5,meno+6) % 1 === 0) {
+								console.log('replace');
+								transcript = transcript.replace(transcript.substring(meno,meno+5), '-');
+							}
+							// finalTranscript += transcript+'<br>\\n';
+							if (transcript.length>0) finalTranscript = transcript;
+						} else {
+							interimTranscript += transcript+'<br>\\n';
+						}
+						document.getElementById('transcriptDiv').innerHTML = '<i style=\"color:#ddd;\">' + interimTranscript + '</i>';
+					}
+					// document.getElementById('transcriptDiv').innerHTML = finalTranscript + '<i style=\"color:#00d;\">' + interimTranscript + '</i>';
+					document.getElementById('transcriptDiv').innerHTML =  '<i style=\"color:#00d;\">' + finalTranscript + '</i>';
+				}
+				showLog('start'+'\n');
+				recognition.start();
+			</script>
+		</div>
+	</body>
+</html>
-- 
GitLab