Analyze The Sentiment Of A Customer Call using LLM Gateway

In this guide, we’ll show you how to use AssemblyAI’s LLM Gateway framework to process an audio file and then use LLM Gateway to automatically detect sentiment analysis from customer calls as “positive”, “negative”, or “neutral”. In addition, we will glean additional insights beyond these three sentiments and learn the reasoning behind these detected sentiments.

Quickstart

1import requests
2import time
3import json
4
5API_KEY = "YOUR_API_KEY"
6audio_file_path = "./meeting.mp3"
7
8# ------------------------------------------
9# Step 1: Upload the audio file
10# ------------------------------------------
11def upload_file(filename):
12 with open(filename, "rb") as f:
13 upload_url = "https://api.assemblyai.com/v2/upload"
14 headers = {"authorization": API_KEY}
15 response = requests.post(upload_url, headers=headers, data=f)
16 response.raise_for_status()
17 return response.json()["upload_url"]
18
19audio_url = upload_file(audio_file_path)
20print(f"Uploaded audio file. URL: {audio_url}")
21
22# ------------------------------------------
23# Step 2: Request transcription
24# ------------------------------------------
25transcript_request = requests.post(
26 "https://api.assemblyai.com/v2/transcript",
27 headers={"authorization": API_KEY, "content-type": "application/json"},
28 json={"audio_url": audio_url},
29)
30
31transcript_id = transcript_request.json()["id"]
32
33# Poll until completed
34while True:
35 polling_response = requests.get(
36 f"https://api.assemblyai.com/v2/transcript/{transcript_id}",
37 headers={"authorization": API_KEY},
38 )
39 status = polling_response.json()["status"]
40
41 if status == "completed":
42 transcript_text = polling_response.json()["text"]
43 break
44 elif status == "error":
45 raise RuntimeError(f"Transcription failed: {polling_response.json()['error']}")
46 else:
47 print(f"Transcription status: {status}")
48 time.sleep(3)
49
50print("\nTranscription complete.\n")
51
52# ------------------------------------------
53# Step 3: Define questions
54# ------------------------------------------
55agent_context = "The agent is trying to get the customer to go through with the update to their car."
56customer_context = "The customer is calling to check how much it would cost to update the map in his car."
57
58answer_format = "<answer in one sentence> <reason in one sentence>"
59
60questions = [
61 {
62 "question": "What was the overall sentiment of the call?",
63 "context": customer_context,
64 "answer_format": answer_format,
65 },
66 {
67 "question": "What was the sentiment of the agent in this call?",
68 "context": agent_context,
69 "answer_format": answer_format,
70 },
71 {
72 "question": "What was the sentiment of the customer in this call?",
73 "context": customer_context,
74 "answer_format": answer_format,
75 },
76 {
77 "question": "What quote best demonstrates the customer's level of interest?",
78 "context": customer_context,
79 "answer_format": answer_format,
80 },
81 {
82 "question": "Provide a quote from the agent that demonstrates their level of enthusiasm.",
83 "context": agent_context,
84 "answer_format": answer_format,
85 },
86]
87
88# ------------------------------------------
89# Step 4: Build prompt for the LLM
90# ------------------------------------------
91question_strs = []
92for q in questions:
93 q_str = f"Question: {q['question']}"
94 if q.get("context"):
95 q_str += f"\nContext: {q['context']}"
96 if q.get("answer_format"):
97 q_str += f"\nAnswer Format: {q['answer_format']}"
98 question_strs.append(q_str)
99
100questions_prompt = "\n\n".join(question_strs)
101
102prompt = f"""
103You are an expert at analyzing call transcripts.
104Given the series of questions below, answer them accurately and concisely.
105When context or answer format is provided, use it to guide your answers.
106
107Transcript:
108{transcript_text}
109
110Questions:
111{questions_prompt}
112"""
113
114# ------------------------------------------
115# Step 5: Query the LLM Gateway
116# ------------------------------------------
117headers = {"authorization": API_KEY}
118
119response = requests.post(
120 "https://llm-gateway.assemblyai.com/v1/chat/completions",
121 headers=headers,
122 json={
123 "model": "claude-sonnet-4-5-20250929",
124 "messages": [{"role": "user", "content": prompt}],
125 "max_tokens": 2000,
126 },
127)
128
129response_json = response.json()
130llm_output = response_json["choices"][0]["message"]["content"]
131
132# ------------------------------------------
133# Step 6: Parse and display the results
134# ------------------------------------------
135print("\n--- LLM Responses ---\n")
136print(llm_output)

Get Started

Before we begin, make sure you have an AssemblyAI account and an API key. You can sign up for an AssemblyAI account and get your API key from your dashboard.

See our pricing page for LLM Gateway pricing rates.

Step-by-Step Instructions

In this guide, we will ask five questions to learn about the sentiment of the customer and agent. You can adjust the questions to suit your project’s needs.

Start by importing the required libraries and setting your AssemblyAI API key.

1import requests
2import time
3import json
4
5API_KEY = "YOUR_API_KEY"

Next, you’ll upload your audio file to AssemblyAI’s servers. Once the upload is complete, the API will return a temporary URL that can be used to start the transcription.

After submitting the transcription request, your script will poll the API until the transcription is finished.

1audio_file_path = "./meeting.mp3"
2# ------------------------------------------
3# Step 1: Upload the audio file
4# ------------------------------------------
5def upload_file(filename):
6 with open(filename, "rb") as f:
7 upload_url = "https://api.assemblyai.com/v2/upload"
8 headers = {"authorization": API_KEY}
9 response = requests.post(upload_url, headers=headers, data=f)
10 response.raise_for_status()
11 return response.json()["upload_url"]
12audio_url = upload_file(audio_file_path)
13print(f"Uploaded audio file. URL: {audio_url}")
14# ------------------------------------------
15# Step 2: Request transcription
16# ------------------------------------------
17transcript_request = requests.post(
18 "https://api.assemblyai.com/v2/transcript",
19 headers={"authorization": API_KEY, "content-type": "application/json"},
20 json={"audio_url": audio_url},
21)
22transcript_id = transcript_request.json()["id"]
23# Poll until completed
24while True:
25 polling_response = requests.get(
26 f"https://api.assemblyai.com/v2/transcript/{transcript_id}",
27 headers={"authorization": API_KEY},
28 )
29 status = polling_response.json()["status"]
30 if status == "completed":
31 transcript_text = polling_response.json()["text"]
32 break
33 elif status == "error":
34 raise RuntimeError(f"Transcription failed: {polling_response.json()['error']}")
35 else:
36 print(f"Transcription status: {status}")
37 time.sleep(3)
38print("\nTranscription complete.\n")

Once you have the transcript, you’ll define short context strings for both the agent and the customer. These will help the model better understand the roles and perspectives in the conversation.

1agent_context = "The agent is trying to get the customer to go through with the update to their car."
2customer_context = "The customer is calling to check how much it would cost to update the map in his car."

You can now specify the exact questions you want the LLM Gateway to answer. Each question can include optional context and an answer format that tells the model how to structure its response.

1answer_format = "<answer in one sentence> <reason in one sentence>"
2questions = [
3 {
4 "question": "What was the overall sentiment of the call?",
5 "context": customer_context,
6 "answer_format": answer_format,
7 },
8 {
9 "question": "What was the sentiment of the agent in this call?",
10 "context": agent_context,
11 "answer_format": answer_format,
12 },
13 {
14 "question": "What was the sentiment of the customer in this call?",
15 "context": customer_context,
16 "answer_format": answer_format,
17 },
18 {
19 "question": "What quote best demonstrates the customer's level of interest?",
20 "context": customer_context,
21 "answer_format": answer_format,
22 },
23 {
24 "question": "Provide a quote from the agent that demonstrates their level of enthusiasm.",
25 "context": agent_context,
26 "answer_format": answer_format,
27 },
28]

Now that the questions are defined, combine them into a single formatted prompt. This prompt includes both the call transcript and the questions you want the model to address. The model will use these details to generate accurate and concise responses.

1# ------------------------------------------
2# Step 4: Build prompt for the LLM
3# ------------------------------------------
4question_strs = []
5for q in questions:
6 q_str = f"Question: {q['question']}"
7 if q.get("context"):
8 q_str += f"\nContext: {q['context']}"
9 if q.get("answer_format"):
10 q_str += f"\nAnswer Format: {q['answer_format']}"
11 question_strs.append(q_str)
12questions_prompt = "\n\n".join(question_strs)
13prompt = f"""
14You are an expert at analyzing call transcripts.
15Given the series of questions below, answer them accurately and concisely.
16When context or answer format is provided, use it to guide your answers.
17Transcript:
18{transcript_text}
19Questions:
20{questions_prompt}
21"""

With the prompt prepared, query LLM Gateway then extract and print the answers returned by the LLM Gateway. This step displays the model’s assessment of each question, including the identified sentiments and their reasoning.

1# ------------------------------------------
2# Step 5: Query the LLM Gateway
3# ------------------------------------------
4headers = {"authorization": API_KEY}
5response = requests.post(
6 "https://llm-gateway.assemblyai.com/v1/chat/completions",
7 headers=headers,
8 json={
9 "model": "claude-sonnet-4-5-20250929",
10 "messages": [{"role": "user", "content": prompt}],
11 "max_tokens": 2000,
12 },
13)
14response_json = response.json()
15llm_output = response_json["choices"][0]["message"]["content"]
16# ------------------------------------------
17# Step 6: Parse and display the results
18# ------------------------------------------
19print("\n--- LLM Responses ---\n")
20print(llm_output)