torchaudio语音识别

import torchaudio
from torchaudio.pipelines import WAV2VEC2_ASR_BASE_960H
import ssl
ssl._create_default_https_context = ssl._create_unverified_context 

# Load the pre-trained Wav2Vec2 model and tokenizer
bundle = WAV2VEC2_ASR_BASE_960H
model = bundle.get_model().to('cpu')
tokenizer = bundle.get_tokenizer()

# Function to transcribe audio file
def transcribe_audio(audio_file_path):
    # Load audio file
    waveform, sample_rate = torchaudio.load(audio_file_path)
    
    # Resample if necessary
    resampler = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=bundle.sample_rate)
    waveform = resampler(waveform).squeeze(0)
    
    # Generate features
    with torch.inference_mode():
        features, _ = model.extract_features(waveform.unsqueeze(0))
    
    # Decode features to text
    emissions = model.classifier(features)
    emission_log_probs = torch.log_softmax(emissions, dim=-1)
    decoded_ids = torch.argmax(emission_log_probs, dim=-1)
    transcript = tokenizer.decode(decoded_ids[0].tolist())
    
    return transcript

# Example usage
audio_file_path = "example.wav"  # Replace with your audio file path
transcription = transcribe_audio(audio_file_path)
print(f"Transcription: {transcription}")
©著作权归作者所有,转载或内容合作请联系作者
平台声明:文章内容(如有图片或视频亦包括在内)由作者上传并发布,文章内容仅代表作者本人观点,简书系信息发布平台,仅提供信息存储服务。

推荐阅读更多精彩内容