//
//  SpeechRecognizer.swift
//

import Foundation
import Speech

protocol SpeechRecognizable {
    var delegate: SpeechRecognizableDelegate? { get set }

    func requestPermissions(complition: ((Bool) -> Void)?)
    func start()
    func stop()
}

protocol SpeechRecognizableDelegate: AnyObject {
    func output(result: SFSpeechRecognitionResult, isFinal: Bool)
}

final class SpeechRecognizer: NSObject, SpeechRecognizable {

    // MARK: - Propreties

    weak var delegate: SpeechRecognizableDelegate?

    private let recognizer: SFSpeechRecognizer = SFSpeechRecognizer(locale: Locale(identifier: "ru-RU"))!
    private var request: SFSpeechAudioBufferRecognitionRequest?
    private var task: SFSpeechRecognitionTask?
    private let engine = AVAudioEngine()

    // MARK: - Public methods

    func requestPermissions(complition: ((Bool) -> Void)?) {
        requestMicrophonePermission { [weak self] success in
            if success {
                self?.requestSpeechPermission { success in
                    complition?(success)
                }
            } else {
                complition?(success)
            }
        }
    }

    func start() {
        guard !engine.isRunning else {
            return
        }
        do {
            try startRecording()
        } catch {
            print("Recording Not Available")
        }
    }

    func stop() {
        guard engine.isRunning else {
            return
        }
        engine.inputNode.removeTap(onBus: 0)
        engine.stop()
        request?.endAudio()
    }

    // MARK: - Private methods

    private func requestMicrophonePermission(complition: ((Bool) -> Void)?) {
        AVAudioSession.sharedInstance().requestRecordPermission { success in
            OperationQueue.main.addOperation {
                complition?(success)
            }
            print("Microphone status permission:", success)
        }
    }

    private func requestSpeechPermission(complition: ((Bool) -> Void)?) {
        SFSpeechRecognizer.requestAuthorization { authStatus in
            OperationQueue.main.addOperation {
                complition?(authStatus == .authorized)
            }
            print("Speech status permission:", authStatus == .authorized)
        }
    }

    private func startRecording() throws {
        task?.cancel()
        self.task = nil

        let session = AVAudioSession.sharedInstance()
        try session.setCategory(.record, mode: .measurement)
        try session.setActive(true, options: .notifyOthersOnDeactivation)
        let node = engine.inputNode

        request = SFSpeechAudioBufferRecognitionRequest()
        guard let recognitionRequest = request else {
            print("SFSpeechAudioBufferRecognitionRequest: unable to create")
            return
        }
        recognitionRequest.shouldReportPartialResults = true
        recognitionRequest.requiresOnDeviceRecognition = false

        task = recognizer.recognitionTask(with: recognitionRequest) { [weak self] result, error in
            guard let self = self else {
                return
            }
            var isFinal = false
            
            if let result = result {
                isFinal = result.isFinal
                self.delegate?.output(result: result, isFinal: isFinal)
            }
            
            if error != nil || isFinal {
                self.engine.stop()
                node.removeTap(onBus: 0)
                self.request = nil
                self.task = nil
            }
        }

        let format = node.outputFormat(forBus: 0)
        node.installTap(onBus: 0, bufferSize: 1024, format: format) { buffer , _ in
            self.request?.append(buffer)
        }

        engine.prepare()
        try engine.start()
    }
}