Sentiment Analysis
The Sentiment Analysis model detects the sentiment of each spoken sentence in the transcript text. Use Sentiment Analysis to get a detailed analysis of the positive, negative, or neutral sentiment conveyed in the audio, along with a confidence score for each result.
Quickstart
Enable Sentiment Analysis by setting sentiment_analysis
to true
in the transcription config.
- Python
- TypeScript
- Go
- Java
- C#
- Ruby
import assemblyai as aai
aai.settings.api_key = "YOUR_API_KEY"
# audio_file = "./local_file.mp3"
audio_file = "https://assembly.ai/wildfires.mp3"
config = aai.TranscriptionConfig(sentiment_analysis=True)
transcript = aai.Transcriber().transcribe(audio_file, config)
for sentiment_result in transcript.sentiment_analysis:
print(sentiment_result.text)
print(sentiment_result.sentiment) # POSITIVE, NEUTRAL, or NEGATIVE
print(sentiment_result.confidence)
print(f"Timestamp: {sentiment_result.start} - {sentiment_result.end}")
import { AssemblyAI } from 'assemblyai'
const client = new AssemblyAI({
apiKey: 'YOUR_API_KEY'
})
// const audioFile = './local_file.mp3'
const audioFile =
'https://assembly.ai/wildfires.mp3'
const params = {
audio: audioFile,
sentiment_analysis: true
}
const run = async () => {
const transcript = await client.transcripts.transcribe(params)
for (const result of transcript.sentiment_analysis_results!) {
console.log(result.text)
console.log(result.sentiment)
console.log(result.confidence)
console.log(`Timestamp: ${result.start} - ${result.end}`)
}
}
run()
package main
import (
"context"
"fmt"
aai "github.com/AssemblyAI/assemblyai-go-sdk"
)
func main() {
client := aai.NewClient("YOUR_API_KEY" )
// For local files see our Getting Started guides.
audioURL := "https://assembly.ai/wildfires.mp3"
ctx := context.Background()
transcript, _ := client.Transcripts.TranscribeFromURL(ctx, audioURL, &aai.TranscriptOptionalParams{
SentimentAnalysis: aai.Bool(true),
})
for _, result := range transcript.SentimentAnalysisResults {
fmt.Println(aai.ToString(result.Text))
fmt.Println(result.Sentiment)
fmt.Println(aai.ToFloat64(result.Confidence))
fmt.Println("Timestamp:",
aai.ToInt64(result.Start), "-",
aai.ToInt64(result.End),
)
}
}
import com.assemblyai.api.AssemblyAI;
import com.assemblyai.api.resources.transcripts.types.*;
public final class App {
public static void main(String[] args) {
AssemblyAI client = AssemblyAI.builder()
.apiKey("YOUR_API_KEY" )
.build();
// For local files see our Getting Started guides.
String audioUrl = "https://assembly.ai/wildfires.mp3";
var params = TranscriptOptionalParams.builder()
.sentimentAnalysis(true)
.build();
Transcript transcript = client.transcripts().transcribe(audioUrl, params);
var sentimentAnalysisResults = transcript.getSentimentAnalysisResults().get();
sentimentAnalysisResults.forEach(result -> {
System.out.println(result.getText());
System.out.println(result.getSentiment()); // POSITIVE, NEUTRAL, or NEGATIVE
System.out.println(result.getConfidence());
System.out.println("Timestamp: " + result.getStart() + " - " + result.getEnd());
});
}
}
using AssemblyAI;
using AssemblyAI.Transcripts;
var client = new AssemblyAIClient("YOUR_API_KEY" );
var transcript = await client.Transcripts.TranscribeAsync(new TranscriptParams
{
// For local files see our Getting Started guides.
AudioUrl = "https://assembly.ai/wildfires.mp3",
SentimentAnalysis = true
});
foreach (var result in transcript.SentimentAnalysisResults!)
{
Console.WriteLine(result.Text);
Console.WriteLine(result.Sentiment); // POSITIVE, NEUTRAL, or NEGATIVE
Console.WriteLine(result.Confidence);
Console.WriteLine($"Timestamp: {result.Start} - {result.End}");
}
require 'assemblyai'
client = AssemblyAI::Client.new(api_key: 'YOUR_API_KEY' )
# For local files see our Getting Started guides.
audio_url = 'https://assembly.ai/wildfires.mp3'
transcript = client.transcripts.transcribe(
audio_url: audio_url,
sentiment_analysis: true
)
transcript.sentiment_analysis_results.each do |result|
puts result.text
puts result.sentiment
puts result.confidence
printf("%<start>d - %<end>d\n", start: result.start, end: result.end_)
end
Example output
Smoke from hundreds of wildfires in Canada is triggering air quality alerts throughout the US.
SentimentType.negative
0.8181032538414001
Timestamp: 250 - 6350
...
Add speaker labels to sentiments
To add speaker labels to each sentiment analysis result, using Speaker Diarization, enable speaker_labels
in the transcription config.
Each sentiment result will then have a speaker
field that contains the speaker label.
- Python
- TypeScript
- Go
- Java
- C#
- Ruby
config = aai.TranscriptionConfig(
sentiment_analysis=True,
speaker_labels=True
)
# ...
for sentiment_result in transcript.sentiment_analysis:
print(sentiment_result.speaker)
const params = {
audio: audioUrl,
sentiment_analysis: true,
speaker_labels: true
}
// ...
for (const result of transcript.sentiment_analysis_results!) {
console.log(result.speaker)
}
transcript, _ := client.Transcripts.TranscribeFromURL(ctx, audioURL, &aai.TranscriptOptionalParams{
SentimentAnalysis: aai.Bool(true),
SpeakerLabels: aai.Bool(true),
})
for _, result := range transcript.SentimentAnalysisResults {
fmt.Println(aai.ToString(result.Speaker))
}
var params = TranscriptOptionalParams.builder()
.sentimentAnalysis(true)
.speakerLabels(true)
.build();
// ...
sentimentAnalysisResults.forEach(result -> {
System.out.println(result.getSpeaker());
});
var transcript = await client.Transcripts.TranscribeAsync(new TranscriptParams
{
AudioUrl = audioUrl,
SentimentAnalysis = true,
SpeakerLabels = true
});
// ...
foreach (var result in transcript.SentimentAnalysisResults!)
{
// ...
Console.WriteLine(result.Speaker);
}
transcript = client.transcripts.transcribe(
audio_url: audio_url,
sentiment_analysis: true,
speaker_labels: true
)
# ...
transcript.sentiment_analysis_results.each do |result|
puts result.speaker
end
API reference
Request
curl https://api.assemblyai.com/v2/transcript \
--header "Authorization: YOUR_API_KEY" \
--header "Content-Type: application/json" \
--data '{
"audio_url": "YOUR_AUDIO_URL",
"sentiment_analysis": true
}'
Key | Type | Description |
---|---|---|
sentiment_analysis | boolean | Enable Sentiment Analysis. |
Response
{sentiment_analysis_results:[...]}
sentiment_analysis_results | array | A temporal sequence of Sentiment Analysis results for the audio file, one element for each sentence in the file. |
sentiment_analysis_results[i].text | string | The transcript of the i-th sentence. |
sentiment_analysis_results[i].start | number | The starting time, in milliseconds, of the i-th sentence. |
sentiment_analysis_results[i].end | number | The ending time, in milliseconds, of the i-th sentence. |
sentiment_analysis_results[i].sentiment | string | The detected sentiment for the i-th sentence, one of POSITIVE , NEUTRAL , NEGATIVE . |
sentiment_analysis_results[i].confidence | number | The confidence score for the detected sentiment of the i-th sentence, from 0 to 1. |
sentiment_analysis_results[i].speaker | string or null | The speaker of the i-th sentence if Speaker Diarization is enabled, else null. |
Frequently asked questions
The Sentiment Analysis model is based on the interpretation of the transcript and may not always accurately capture the intended sentiment of the speaker. It's recommended to take into account the context of the transcript and to validate the sentiment analysis results with human judgment when possible.
The Content Moderation model can be used to identify and filter out sensitive or offensive content from the transcript.
It's important to ensure that the audio being analyzed is relevant to your use case. Additionally, it's recommended to take into account the context of the transcript and to evaluate the confidence score for each sentiment label.
The Sentiment Analysis model is designed to be fast and efficient, but processing times may vary depending on the size of the audio file and the complexity of the language used. If you experience longer processing times than expected, don't hesitate to contact our support team.