| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372 |
- import bisect
- import functools
- import os
- from dataclasses import dataclass
- from typing import Dict, List, Optional, Tuple
- import numpy as np
- from faster_whisper.utils import get_assets_path
- # The code below is adapted from https://github.com/snakers4/silero-vad.
- @dataclass
- class VadOptions:
- """VAD options.
- Attributes:
- threshold: Speech threshold. Silero VAD outputs speech probabilities for each audio chunk,
- probabilities ABOVE this value are considered as SPEECH. It is better to tune this
- parameter for each dataset separately, but "lazy" 0.5 is pretty good for most datasets.
- neg_threshold: Silence threshold for determining the end of speech. If a probability is lower
- than neg_threshold, it is always considered silence. Values higher than neg_threshold
- are only considered speech if the previous sample was classified as speech; otherwise,
- they are treated as silence. This parameter helps refine the detection of speech
- transitions, ensuring smoother segment boundaries.
- min_speech_duration_ms: Final speech chunks shorter min_speech_duration_ms are thrown out.
- max_speech_duration_s: Maximum duration of speech chunks in seconds. Chunks longer
- than max_speech_duration_s will be split at the timestamp of the last silence that
- lasts more than 100ms (if any), to prevent aggressive cutting. Otherwise, they will be
- split aggressively just before max_speech_duration_s.
- min_silence_duration_ms: In the end of each speech chunk wait for min_silence_duration_ms
- before separating it
- speech_pad_ms: Final speech chunks are padded by speech_pad_ms each side
- """
- threshold: float = 0.5
- neg_threshold: float = None
- min_speech_duration_ms: int = 0
- max_speech_duration_s: float = float("inf")
- min_silence_duration_ms: int = 2000
- speech_pad_ms: int = 400
- def get_speech_timestamps(
- audio: np.ndarray,
- vad_options: Optional[VadOptions] = None,
- sampling_rate: int = 16000,
- **kwargs,
- ) -> List[dict]:
- """This method is used for splitting long audios into speech chunks using silero VAD.
- Args:
- audio: One dimensional float array.
- vad_options: Options for VAD processing.
- sampling rate: Sampling rate of the audio.
- kwargs: VAD options passed as keyword arguments for backward compatibility.
- Returns:
- List of dicts containing begin and end samples of each speech chunk.
- """
- if vad_options is None:
- vad_options = VadOptions(**kwargs)
- threshold = vad_options.threshold
- neg_threshold = vad_options.neg_threshold
- min_speech_duration_ms = vad_options.min_speech_duration_ms
- max_speech_duration_s = vad_options.max_speech_duration_s
- min_silence_duration_ms = vad_options.min_silence_duration_ms
- window_size_samples = 512
- speech_pad_ms = vad_options.speech_pad_ms
- min_speech_samples = sampling_rate * min_speech_duration_ms / 1000
- speech_pad_samples = sampling_rate * speech_pad_ms / 1000
- max_speech_samples = (
- sampling_rate * max_speech_duration_s
- - window_size_samples
- - 2 * speech_pad_samples
- )
- min_silence_samples = sampling_rate * min_silence_duration_ms / 1000
- min_silence_samples_at_max_speech = sampling_rate * 98 / 1000
- audio_length_samples = len(audio)
- model = get_vad_model()
- padded_audio = np.pad(
- audio, (0, window_size_samples - audio.shape[0] % window_size_samples)
- )
- speech_probs = model(padded_audio.reshape(1, -1)).squeeze(0)
- triggered = False
- speeches = []
- current_speech = {}
- if neg_threshold is None:
- neg_threshold = max(threshold - 0.15, 0.01)
- # to save potential segment end (and tolerate some silence)
- temp_end = 0
- # to save potential segment limits in case of maximum segment size reached
- prev_end = next_start = 0
- for i, speech_prob in enumerate(speech_probs):
- if (speech_prob >= threshold) and temp_end:
- temp_end = 0
- if next_start < prev_end:
- next_start = window_size_samples * i
- if (speech_prob >= threshold) and not triggered:
- triggered = True
- current_speech["start"] = window_size_samples * i
- continue
- if (
- triggered
- and (window_size_samples * i) - current_speech["start"] > max_speech_samples
- ):
- if prev_end:
- current_speech["end"] = prev_end
- speeches.append(current_speech)
- current_speech = {}
- # previously reached silence (< neg_thres) and is still not speech (< thres)
- if next_start < prev_end:
- triggered = False
- else:
- current_speech["start"] = next_start
- prev_end = next_start = temp_end = 0
- else:
- current_speech["end"] = window_size_samples * i
- speeches.append(current_speech)
- current_speech = {}
- prev_end = next_start = temp_end = 0
- triggered = False
- continue
- if (speech_prob < neg_threshold) and triggered:
- if not temp_end:
- temp_end = window_size_samples * i
- # condition to avoid cutting in very short silence
- if (window_size_samples * i) - temp_end > min_silence_samples_at_max_speech:
- prev_end = temp_end
- if (window_size_samples * i) - temp_end < min_silence_samples:
- continue
- else:
- current_speech["end"] = temp_end
- if (
- current_speech["end"] - current_speech["start"]
- ) > min_speech_samples:
- speeches.append(current_speech)
- current_speech = {}
- prev_end = next_start = temp_end = 0
- triggered = False
- continue
- if (
- current_speech
- and (audio_length_samples - current_speech["start"]) > min_speech_samples
- ):
- current_speech["end"] = audio_length_samples
- speeches.append(current_speech)
- for i, speech in enumerate(speeches):
- if i == 0:
- speech["start"] = int(max(0, speech["start"] - speech_pad_samples))
- if i != len(speeches) - 1:
- silence_duration = speeches[i + 1]["start"] - speech["end"]
- if silence_duration < 2 * speech_pad_samples:
- speech["end"] += int(silence_duration // 2)
- speeches[i + 1]["start"] = int(
- max(0, speeches[i + 1]["start"] - silence_duration // 2)
- )
- else:
- speech["end"] = int(
- min(audio_length_samples, speech["end"] + speech_pad_samples)
- )
- speeches[i + 1]["start"] = int(
- max(0, speeches[i + 1]["start"] - speech_pad_samples)
- )
- else:
- speech["end"] = int(
- min(audio_length_samples, speech["end"] + speech_pad_samples)
- )
- return speeches
- def collect_chunks(
- audio: np.ndarray, chunks: List[dict], sampling_rate: int = 16000
- ) -> Tuple[List[np.ndarray], List[Dict[str, int]]]:
- """Collects audio chunks."""
- if not chunks:
- chunk_metadata = {
- "start_time": 0,
- "end_time": 0,
- }
- return [np.array([], dtype=np.float32)], [chunk_metadata]
- audio_chunks = []
- chunks_metadata = []
- for chunk in chunks:
- chunk_metadata = {
- "start_time": chunk["start"] / sampling_rate,
- "end_time": chunk["end"] / sampling_rate,
- }
- audio_chunks.append(audio[chunk["start"] : chunk["end"]])
- chunks_metadata.append(chunk_metadata)
- return audio_chunks, chunks_metadata
- class SpeechTimestampsMap:
- """Helper class to restore original speech timestamps."""
- def __init__(self, chunks: List[dict], sampling_rate: int, time_precision: int = 2):
- self.sampling_rate = sampling_rate
- self.time_precision = time_precision
- self.chunk_end_sample = []
- self.total_silence_before = []
- previous_end = 0
- silent_samples = 0
- for chunk in chunks:
- silent_samples += chunk["start"] - previous_end
- previous_end = chunk["end"]
- self.chunk_end_sample.append(chunk["end"] - silent_samples)
- self.total_silence_before.append(silent_samples / sampling_rate)
- def get_original_time(
- self,
- time: float,
- chunk_index: Optional[int] = None,
- ) -> float:
- if chunk_index is None:
- chunk_index = self.get_chunk_index(time)
- total_silence_before = self.total_silence_before[chunk_index]
- return round(total_silence_before + time, self.time_precision)
- def get_chunk_index(self, time: float) -> int:
- sample = int(time * self.sampling_rate)
- return min(
- bisect.bisect(self.chunk_end_sample, sample),
- len(self.chunk_end_sample) - 1,
- )
- @functools.lru_cache
- def get_vad_model():
- """Returns the VAD model instance."""
- encoder_path = os.path.join(get_assets_path(), "silero_encoder_v5.onnx")
- decoder_path = os.path.join(get_assets_path(), "silero_decoder_v5.onnx")
- return SileroVADModel(encoder_path, decoder_path)
- class SileroVADModel:
- def __init__(self, encoder_path, decoder_path):
- try:
- import onnxruntime
- except ImportError as e:
- raise RuntimeError(
- "Applying the VAD filter requires the onnxruntime package"
- ) from e
- opts = onnxruntime.SessionOptions()
- opts.inter_op_num_threads = 1
- opts.intra_op_num_threads = 1
- opts.enable_cpu_mem_arena = False
- opts.log_severity_level = 4
- self.encoder_session = onnxruntime.InferenceSession(
- encoder_path,
- providers=["CPUExecutionProvider"],
- sess_options=opts,
- )
- self.decoder_session = onnxruntime.InferenceSession(
- decoder_path,
- providers=["CPUExecutionProvider"],
- sess_options=opts,
- )
- def __call__(
- self, audio: np.ndarray, num_samples: int = 512, context_size_samples: int = 64
- ):
- assert (
- audio.ndim == 2
- ), "Input should be a 2D array with size (batch_size, num_samples)"
- assert (
- audio.shape[1] % num_samples == 0
- ), "Input size should be a multiple of num_samples"
- batch_size = audio.shape[0]
- state = np.zeros((2, batch_size, 128), dtype="float32")
- context = np.zeros(
- (batch_size, context_size_samples),
- dtype="float32",
- )
- batched_audio = audio.reshape(batch_size, -1, num_samples)
- context = batched_audio[..., -context_size_samples:]
- context[:, -1] = 0
- context = np.roll(context, 1, 1)
- batched_audio = np.concatenate([context, batched_audio], 2)
- batched_audio = batched_audio.reshape(-1, num_samples + context_size_samples)
- encoder_batch_size = 10000
- num_segments = batched_audio.shape[0]
- encoder_outputs = []
- for i in range(0, num_segments, encoder_batch_size):
- encoder_output = self.encoder_session.run(
- None, {"input": batched_audio[i : i + encoder_batch_size]}
- )[0]
- encoder_outputs.append(encoder_output)
- encoder_output = np.concatenate(encoder_outputs, axis=0)
- encoder_output = encoder_output.reshape(batch_size, -1, 128)
- decoder_outputs = []
- for window in np.split(encoder_output, encoder_output.shape[1], axis=1):
- out, state = self.decoder_session.run(
- None, {"input": window.squeeze(1), "state": state}
- )
- decoder_outputs.append(out)
- out = np.stack(decoder_outputs, axis=1).squeeze(-1)
- return out
- def merge_segments(segments_list, vad_options: VadOptions, sampling_rate: int = 16000):
- if not segments_list:
- return []
- curr_end = 0
- seg_idxs = []
- merged_segments = []
- edge_padding = vad_options.speech_pad_ms * sampling_rate // 1000
- chunk_length = vad_options.max_speech_duration_s * sampling_rate
- curr_start = segments_list[0]["start"]
- for idx, seg in enumerate(segments_list):
- # if any segment start timing is less than previous segment end timing,
- # reset the edge padding. Similarly for end timing.
- if idx > 0:
- if seg["start"] < segments_list[idx - 1]["end"]:
- seg["start"] += edge_padding
- if idx < len(segments_list) - 1:
- if seg["end"] > segments_list[idx + 1]["start"]:
- seg["end"] -= edge_padding
- if seg["end"] - curr_start > chunk_length and curr_end - curr_start > 0:
- merged_segments.append(
- {
- "start": curr_start,
- "end": curr_end,
- "segments": seg_idxs,
- }
- )
- curr_start = seg["start"]
- seg_idxs = []
- curr_end = seg["end"]
- seg_idxs.append((seg["start"], seg["end"]))
- # add final
- merged_segments.append(
- {
- "start": curr_start,
- "end": curr_end,
- "segments": seg_idxs,
- }
- )
- return merged_segments
|