Here is my code. I am unable to see any console log messages, yet a file is being produced and charging me a few cents. Trying to iterate a prompt over a file with 300 rows, testing it on just 2 rows for now. Working on an independent project, changed the names of the fields for the sake of security for my idea. I’m confused why I don’t see any console logs, and why I keep getting errors back for the two fields I want Perplexity to produce: Date and Summary. Any advice? Thank you so much for your time, this is my first ever community post. Tried following documentation, even using the prompt in the Getting Started doesn’t work. I pip installed OpenAI by the way.
from openai import OpenAI
import pandas as pd
import time
import sys
API setup
YOUR_API_KEY = “nuhUhUh!” # Replace with your API key
client = OpenAI(api_key=YOUR_API_KEY, base_url=“https://api.perplexity.ai”)
File paths
input_file = r"fileWithDataForPrompts"
output_file = r"OutputFile"
Load the Excel file
try:
data = pd.read_excel(input_file)
print(f"Loaded {len(data)} rows from the input file.“)
except Exception as e:
print(f"Failed to load input file: {e}”)
sys.exit(1)
Validate headers
required_headers = [“Enitity”, “EntityId”, “Name”, “Amount”, “Date”]
if not all(header in data.columns for header in required_headers):
raise ValueError(f"The input file must contain the headers: {required_headers}")
Results list
results =
Testing with the first 2 rows
data_sample = data.head(2)
Process rows
for index, row in data_sample.iterrows():
try:
print(f"Processing row {index + 1}/{len(data_sample)}…", flush=True)
# Construct messages
messages = [
{"role": "system", "content": "You are a helpful assistant for summarizing details."},
{
"role": "user",
"content": (
f"Please summarize the following contract details:\n"
f"Entity: {row['Entity']}, "
f"EntityId: {row['EntityId']}, "
f"Name: {row['Name']}, "
f"Amount: {row['Amount']}, "
f"Date: {row['Date']}. "
"Include any details such as scope, completion date, and key features."
),
},
]
# API request with streaming
response_content = ""
response_stream = client.chat.completions.create(
model="llama-3.1-sonar-large-128k-online",
messages=messages,
stream=True,
)
for chunk in response_stream:
print(f"Chunk received: {chunk}", flush=True)
response_content += chunk["choices"][0]["delta"].get("content", "")
# Extract summary and completion date
summary = response_content.split("\n")[0]
completion_date = "Not available"
for line in response_content.split("\n"):
if "completion" in line.lower() or "expiration" in line.lower():
completion_date = line.strip()
break
# Append results
results.append({
"Entity": row["Entity"],
"EntityId": row["EntityId"],
"Name": row["Name"],
"Amount": row["Amount"],
"Date": row["Date"],
"Summary": summary,
"Completion/Expiration Date": completion_date,
})
print(f"Row {index + 1} processed: Summary and date extracted.", flush=True)
except Exception as e:
print(f"Error for row {index + 1}: {e}", flush=True)
results.append({
"Agency": row["Agency"],
"Contract Number": row["Contract Number"],
"Contractor": row["Contractor"],
"Award Amount": row["Award Amount"],
"Award Date": row["Award Date"],
"Summary": "Error",
"Completion/Expiration Date": "Error",
})
time.sleep(2) # Prevent API rate limits
Save results
try:
pd.DataFrame(results).to_excel(output_file, index=False)
print(f"Results saved to {output_file}“, flush=True)
except Exception as e:
print(f"Failed to save results: {e}”, flush=True)