-
Notifications
You must be signed in to change notification settings - Fork 1.4k
Description
from funasr import AutoModel
model = AutoModel(model="iic/emotion2vec_plus_large")
wav_file = "/content/zh6.mp3"
res = model.generate(wav_file, output_dir="./outputs", granularity="utterance", extract_embedding=False)
print(res)
输出:[{'key': 'zh6', 'labels': ['生气/angry', '厌恶/disgusted', '恐惧/fearful', '开心/happy', '中立/neutral', '其他/other', '难过/sad', '吃惊/surprised', ''], 'scores': [0.00010033947182819247, 0.0018021559808403254, 0.0001216967502841726, 1.0616841791488696e-05, 0.991152822971344, 1.213764426211128e-05, 0.006745313759893179, 5.416709609562531e-05, 6.601738391509571e-07]}]
from modelscope.pipelines import pipeline
from modelscope.utils.constant import Tasks
inference_pipeline = pipeline(
task=Tasks.emotion_recognition,
model="iic/emotion2vec_plus_large")
rec_result = inference_pipeline('/content/zh6.mp3', granularity="utterance", extract_embedding=False)
print(rec_result)
输出:[{'key': 'zh6', 'labels': ['生气/angry', '开心/happy', '中立/neutral', '难过/sad', ''], 'scores': [0.00010053956793854013, 1.0638013009156566e-05, 0.993129312992096, 0.00675876485183835, 6.614902758883545e-07]}]
另外,还有一个问题也不理解,问题在这里