
    var SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
    var recognition = new webkitSpeechRecognition();

    var synth = window.speechSynthesis;
    var voices = [];

   function initialize() {
       
       
        initializePitchAndRate();
        initializeVoiceGender();
        populateVoiceList();
        
        //initializeConversationLog('blah');
        loadBotNamesList();
    
        loadBotIdentity();
   
    }


    function processSpokenText(event) {
   
        var interimTranscript = '';
        var finalTranscript = '';

 
             try {
                for (var i = event.resultIndex; i < event.results.length; ++i) {
                    if (event.results[i].isFinal) {
                        finalTranscript += event.results[i][0].transcript;
                    } else {
                        interimTranscript += event.results[i][0].transcript;
                    }
                }
             }
                
                
        catch (e) {
                errorMessage.innerText = 'Initialize: (cookie) ' + event.error;
                console.error('Error parsing conversation log cookie:', e);
            }

            if (finalTranscript.trim() !== '') {
                
                //User has spoken a request.  PAUSE LISTENING and PROCESS...
                
                
            //finished speaking something.  Pay attention for a bit, then stop paying attention:
               
               
            if (AttentionLevel.value.toLowerCase() === 'respondonclick') {
                messageInput.value += finalTranscript;
                errorMessage.innerText = ':' + AttentionLevel.value.toLowerCase() + ' ';

                return;
            }

                SpokenText.value = finalTranscript;
                messageInput.value = finalTranscript;

                
                //If the system is already speaking, it listens for the shutup keyword.
                if (ListenMode.textContent.toLowerCase() ==  "speaking"  || ListenMode.textContent.toLowerCase() ==  "responding" ){
                    if (finalTranscript.toLowerCase().includes("shutup"))  { 
                            stopSpeaking(); 
                            ListenMode.textContent =  "listening";
                            micStatusElem.textContent = 'Mic On';
                            
                    }  
                    return;
                }

                //Listen for botname keyword when the AI is in ignoring state.
                if (finalTranscript.toLowerCase().includes(botName) && ListenMode.textContent.toLowerCase() ==  "ignoring" ) {
                    ListenMode.textContent =  "listening";
                    micStatusElem.textContent = 'Mic On';
                }


                var wordAfterHey 
                
                //assuming it was listening, process the message.
                if (ListenMode.textContent.toLowerCase() ==  "listening" ) {
                    
                    
                    if (finalTranscript.toLowerCase().includes('hey ')) {
                      var words = finalTranscript.split(' '); // Split the transcript into words
                      var index = words.indexOf('hey'); // Find the index of 'hey'
                    
                      if (index !== -1 && index < words.length - 1) {
                        wordAfterHey = words[index + 1]; // Get the word immediately following 'hey'
                        console.log("Word after 'hey':", wordAfterHey);
                        if (NamedBotExists(wordAfterHey)) {
                            setBotIdentity(wordAfterHey);
                            }
                            else
                            {
                                //Did not find the user.  stay on current user.
                                
                            }
                        }
                    }
                    

                    isRecognitionActive = false;
                    recognition.stop();
                    
                    ListenMode.textContent =  "responding";
                    micStatusElem.textContent = 'Mic Paused';
              
                    try{                    
                        submitTextToChatAPI();
    
                    } catch (e) {
                        errorMessage.innerText = 'ProcessSpokenText.submitTextToChatAPI ' + event.error;
                        console.error('Error parsing conversation log cookie:', e);
                    }
                
                        
                     //ListenMode.textContent = "Listening";
                        
                     return;
                 
                }



                } 
                
                else
                {
                    
                    try{                    
                      ///  recognition.start();
    
                    } catch (e) {
                        errorMessage.innerText = 'ProcessSpokenText.recognition.start()  ' + event.error;
                        console.error('Error parsing conversation log cookie:', e);
                    }
                    
                }
                
}


function reStartListening() {
    if (recognition && recognition.start) {
        try {
            recognition.stop();
            isRecognitionActive = false;

            
            setTimeout(() => recognition.start(), 100);
            isRecognitionActive = true;
        } catch (error) {
            errorMessage.innerText = 'reStartListening: ' + error;
            console.error('An error occurred while starting the speech recognition service:', error);
        }
    }
}


    function startListening() {
        clearRequestText();
    
        if (isRecognitionActive) {
            console.log('Recognition already active');
            return;
        }
    
        recognition = new SpeechRecognition();
        recognition.interimResults = true;
        recognition.maxAlternatives = 1;
    
        recognition.onerror = handleRecognitionError;
        recognition.onsoundend = handleSoundEnd;
        recognition.onend = handleRecognitionEnd;
        
        
        //On Result will start a process of checking to see if there was text. if so, disable listening, while it processes
        recognition.onresult = processSpokenText;
        
    
        recognition.start();
        isRecognitionActive = true;
    }


    ///------------  Microphone  CODE   ----------------//
    function setMicrophoneClicked(){
      //  let microphone = document.querySelector('.fa-microphone');
    //    microphone.addEventListener('click', enableMicrophone() { this.style.color = 'red'});
    }

    // Start or stop the voice recognition based on current state
    function toggleMicrophone() {
        let micButton = document.getElementById('mic-toggle-button');
        let micStatusElem = document.getElementById('micStatus');
        if (micStatusElem.textContent === 'Mic Off' || micStatusElem.textContent === 'Mic Paused') {
            // If microphone is off or paused, start it
            startListening();
            micButton.textContent = 'Mic Stop';
                    microphone.style.color = 'red';

        } else {
            // If microphone is on, stop it
            disableMicrophone();
            micButton.textContent = 'Mic Start';
                        microphone.style.color = 'black';

        }
    }

    function disableMicrophone() {
        micStatusElem.textContent = 'Mic Off';
        recognition.stop();
    }


    function enableMicrophone(){
        var attentionLevel = document.getElementById('attentionLevel').value;
        micStatusElem.textContent = 'Mic On';
    
        if (attentionLevel.toLowerCase() === 'respondwhencalled') {
            ListenMode.textContent = "Ignoring";
        }
        else {
            ListenMode.textContent = "Listening";
        }
        startListening();
    }

    function findVoiceByLanguageAndGender(voices, lang, gender) {
        let langVoices = voices.filter(voice => voice.lang.startsWith(lang));
        let genderVoice = langVoices.find(voice => voice.name.toLowerCase().includes(gender.toLowerCase())) || langVoices[0];
        return genderVoice;
    }

    function findPreferredVoice(voices, gender) {
        let userLang = navigator.language.split('-')[0]; // Get the primary language subtag (e.g., 'en' from 'en-US')
        let preferredVoice = findVoiceByLanguageAndGender(voices, userLang, gender) || voices[0];
        return preferredVoice;
    }

    function speak(text, pitch, rate, gender, onEndCallback) {
        if (!text) {
            console.error('No text to speak');
            return;
        }

        let utterance = new SpeechSynthesisUtterance(text);
        utterance.voice = findPreferredVoice(voices, gender);
        utterance.pitch = pitch;
        utterance.rate = rate;
        utterance.onerror = (event) => console.error('Text-to-speech error', event.error);
        
        utterance.onend = function(event) {
        // Call the provided callback function if there is one
           ListenMode.textContent =  "Listening"; 
                                   micStatusElem.textContent = 'Mic On';
    
            if (onEndCallback) onEndCallback(event);
        }
        synth.speak(utterance);
    }
    
    function speakRequest() {
        const pitch = document.getElementById('pitch').value;
        const rate = document.getElementById('rate').value;
        const text = document.getElementById('SpokenText').value.trim();
        const gender = 'male'; // Set the desired gender here

        if (!text) {
            console.error('No text to speak');
            return;
        }

        speak(text, pitch, rate, gender);
    }

    function speakResponse() {
        const response = document.getElementById('response-output').value.trim();

        if (!response) {
            return;
        }
    
        const pitch = document.getElementById('pitch').value;
        const rate = document.getElementById('rate').value;
        const gender = 'male'; // Set the desired gender here

        const lines = response.split(/\r?\n/)
        .map(line => line.trim())
        .filter(line => line !== '');

        if (lines.length === 0) {
            return;
        }

        const spokenText = response;
        const containsTable = response.indexOf('|') !== -1;
        const MAX_SPEAK_LENGTH = 5000;

        if (containsTable || spokenText.length > MAX_SPEAK_LENGTH) {
            const errorMessage = containsTable ? 'Text contains a table' : `Response is too long to speak (${spokenText.length} characters).`;
            console.error(errorMessage);
            
            if (AttentionLevel.value.toLowerCase() == 'respondwhencalled') {
                setTimeout(() => waitForKeyword(), 8000);
            }

            return;
        }

        speak(spokenText, pitch, rate, gender, (event) => {
            console.log('Finished speaking response');
            
            clearResponseText();

            lastResponseTime = new Date().getTime();

           // ListenMode.textContent = "Ignoring";
            disablerespondByName();
            
            //finished speaking something.  Pay attention for a bit, then stop paying attention:
            if (AttentionLevel.value.toLowerCase() == 'respondwhencalled') {
                setTimeout(() => waitForKeyword(), 8000);
            }
        });
    }





    function stopSpeaking() {
        synth.cancel();
    }           


    function initializeVoiceGender() {
        var voiceGender = document.getElementById('voiceGender');
    
        voiceGender.addEventListener('change', function() {
            var voiceSelect = document.getElementById('voiceSelect');
            voiceSelect.innerHTML = '';
    
            if (voiceGender.value === 'male') {
                filterVoices('Male');
            } else if (voiceGender.value === 'female') {
                filterVoices('Female');
            } else {
                populateVoiceList();
            }
        });
    }

    function filterVoices(gender) {
        voices.forEach(function(voice, index) {
            if (voice.name.includes(gender)) {
                var option = document.createElement('option');
                option.textContent = voice.name + ' (' + voice.lang + ')';
                option.value = index;
                document.getElementById('voiceSelect').appendChild(option);
            }
        });
    }


    
    function populateVoiceList() {
        voices = synth.getVoices();
        if (voices.length === 0) {
            synth.addEventListener('voiceschanged', function() {
                voices = synth.getVoices();
                populateVoicesDropdownList(voices);
            });
        } else {
            populateVoicesDropdownList(voices);
        }

        // Handle errors in text-to-speech
        synth.onerror = function(event) {
            console.error('Text-to-speech error', event.error);
        };
    }



    function populateVoicesDropdownList(voices) {
        var voiceSelect = document.getElementById('voiceSelect');
        voiceSelect.innerHTML = '';
        var defaultVoiceIndex = -1;

        for (var i = 0; i < voices.length; i++) {
            var option = document.createElement('option');
            option.textContent = voices[i].name + ' (' + voices[i].lang + ')';
            option.value = i;
            voiceSelect.appendChild(option);

            // If a voice with the name "Ryan" is found, prioritize it
            if (voices[i].name .includes("Ryan") && voices[i].lang.includes("en")) {
                defaultVoiceIndex = i;
                break;
            }

            // Find the first Female US English or Male GB English voice in the list
            if (defaultVoiceIndex === -1) {
                if ((voices[i].lang.includes("en-US") ) ||
                    (voices[i].lang.includes("en-GB") )) {
                    defaultVoiceIndex = i;
                }
            }
        }

        if (defaultVoiceIndex >= 0) {
            voiceSelect.selectedIndex = defaultVoiceIndex;
        }

        // Set default pitch and rate
        var defaultPitch = 1.0;
        var defaultRate = 1.0;

        // Create a new SpeechSynthesisUtterance object with default pitch and rate
        var msg = new SpeechSynthesisUtterance();
        msg.pitch = defaultPitch;
        msg.rate = defaultRate;
    }

    async function ssloadBotNamesList() {
        try {
            const response = await fetch(botConfigFile);
            const data = await response.json();
            console.log(data);
    
            const select = document.getElementById('botName');
            select.innerHTML = ''; // Clear all options
    
            for (let config of data) {
                let option = document.createElement('option');
                option.textContent = config.PersonalityName;
                option.value = config.PersonalityName;
                select.appendChild(option);
            }


        } catch (error) {
            console.error('Failed to load configuration:', error);
        }
    }





