-
Notifications
You must be signed in to change notification settings - Fork 128
Expand file tree
/
Copy pathagent-evaluation.py
More file actions
325 lines (272 loc) · 10.7 KB
/
agent-evaluation.py
File metadata and controls
325 lines (272 loc) · 10.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
#!/usr/bin/env python3
"""
Comprehensive evaluation comparing direct models, agents, and multi-agent systems.
"""
import asyncio
import os
from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
from picoagents import Agent
from picoagents.eval import (
AgentEvalTarget,
EvalRunner,
LLMEvalJudge,
ModelEvalTarget,
OrchestratorEvalTarget,
)
from picoagents.llm import AzureOpenAIChatCompletionClient
from picoagents.orchestration import RoundRobinOrchestrator
from picoagents.termination import MaxMessageTermination, TextMentionTermination
from picoagents.types import EvalTask
def create_tasks():
"""Create writing-focused evaluation tasks."""
return [
EvalTask(
name="Report",
input="Write a report on the origins of artificial intelligence and its early development.",
expected_output="Well-structured historical report",
),
EvalTask(
name="Research",
input="Research and write a brief analysis of renewable energy trends in 2024.",
expected_output="Well-researched analysis",
),
EvalTask(
name="Reasoning",
input="How long would it take Eliud Kipchoge to run across the earth 10 times?",
expected_output="If Eliud Kipchoge could maintain his marathon world record pace nonstop without rest, it would take him about 19,200 hours ≈ 800 days ≈ 2.2 years to run around the Earth 10 times.",
),
]
async def create_configurations(client):
"""Create the three system configurations to compare."""
# 1. Direct Model
model_target = ModelEvalTarget(
client=client,
name="Direct-Model",
system_message="You are a helpful assistant. Give clear, accurate responses.",
)
# 2. Single Agent
agent = Agent(
name="assistant",
description="A helpful assistant for various tasks",
instructions="You are a knowledgeable assistant. Provide accurate, helpful responses with clear explanations.",
model_client=client,
)
agent_target = AgentEvalTarget(agent, name="Single-Agent")
# 3. Multi-Agent System (Writer + Critic)
critic = Agent(
name="critic",
description="Critical reviewer who provides constructive feedback",
instructions="""You are a critical reviewer of tasks and must explore opportunities for improvement. Your goal is to evaluate work that has been done and provide constructive feedback to help improve the response. If the task is already polished and complete, simply respond with 'APPROVED'.
Be constructive and specific in your feedback to help the create the best possible final product. Dont be unreasonably critical.""",
model_client=client,
)
orchestrator = RoundRobinOrchestrator(
agents=[agent, critic],
termination=MaxMessageTermination(max_messages=10)
| TextMentionTermination(text="APPROVED"),
max_iterations=7,
)
multiagent_target = OrchestratorEvalTarget(
orchestrator, name="Multi-Agent-Writer-Critic"
)
return [model_target, agent_target, multiagent_target]
def create_visualizations(results_df, output_dir):
"""Create clean, compelling two-panel visualization."""
plt.style.use("default")
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
fig.suptitle(
"Agent System Evaluation: Performance vs Resource Investment",
fontsize=16,
fontweight="bold",
y=0.98,
)
# Aggregate metrics by system
metrics = (
results_df.groupby("system")
.agg(
{
"overall_score": "mean",
"accuracy": "mean",
"helpfulness": "mean",
"clarity": "mean",
"tokens_total": "mean",
"duration_ms": "mean",
}
)
.round(2)
)
# Clean system names for display
system_names = {
"Direct-Model": "Direct\nModel",
"Single-Agent": "Single\nAgent",
"Multi-Agent-Writer-Critic": "Multi-Agent\nSystem",
}
# Panel 1: Performance Comparison (Grouped Bar Chart)
x_pos = range(len(metrics))
width = 0.2
performance_metrics = ["overall_score", "accuracy", "helpfulness", "clarity"]
colors = ["#3498db", "#2ecc71", "#e74c3c", "#f39c12"]
labels = ["Overall Score", "Accuracy", "Helpfulness", "Clarity"]
for i, (metric, color, label) in enumerate(
zip(performance_metrics, colors, labels)
):
values = [metrics.loc[sys, metric] for sys in metrics.index]
ax1.bar(
[x + i * width for x in x_pos],
values,
width,
label=label,
color=color,
alpha=0.8,
)
ax1.set_xlabel("System Type", fontweight="bold")
ax1.set_ylabel("Score (0-10)", fontweight="bold")
ax1.set_title("Performance Quality Metrics", fontweight="bold", pad=20)
ax1.set_xticks([x + width * 1.5 for x in x_pos])
ax1.set_xticklabels([system_names[sys] for sys in metrics.index])
ax1.legend(bbox_to_anchor=(1.05, 1), loc="upper left")
ax1.set_ylim(0, 10)
ax1.grid(axis="y", alpha=0.3)
# Panel 2: Resource Investment (Clean Bar Chart with Annotations)
systems = [system_names[sys] for sys in metrics.index]
tokens = [metrics.loc[sys, "tokens_total"] for sys in metrics.index]
scores = [metrics.loc[sys, "overall_score"] for sys in metrics.index]
# Create bars with different colors
bars = ax2.bar(systems, tokens, color=["#3498db", "#2ecc71", "#e74c3c"], alpha=0.7)
# Add score annotations on top of bars
for i, (bar, score) in enumerate(zip(bars, scores)):
height = bar.get_height()
ax2.text(
bar.get_x() + bar.get_width() / 2.0,
height + max(tokens) * 0.02,
f"Score: {score:.1f}",
ha="center",
va="bottom",
fontweight="bold",
)
# Add efficiency metric below
efficiency = score / (height / 1000)
ax2.text(
bar.get_x() + bar.get_width() / 2.0,
height / 2,
f"{efficiency:.1f}\npts/1K tokens",
ha="center",
va="center",
fontsize=10,
color="white",
fontweight="bold",
)
ax2.set_xlabel("System Type", fontweight="bold")
ax2.set_ylabel("Average Token Usage", fontweight="bold")
ax2.set_title("Resource Investment & Efficiency", fontweight="bold", pad=20)
ax2.grid(axis="y", alpha=0.3)
plt.tight_layout()
plt.savefig(output_dir / "evaluation_results.png", dpi=300, bbox_inches="tight")
plt.close()
# Summary statistics with averages
summary = (
results_df.groupby("system")
.agg(
{
"overall_score": ["mean", "std"],
"tokens_total": ["mean", "std"],
"duration_ms": ["mean", "std"],
"cost": ["mean", "std"] if "cost" in results_df.columns else ["mean"],
}
)
.round(3)
)
return summary
async def main():
"""Run comprehensive evaluation comparing system configurations."""
print("=, Multi-Agent System Evaluation")
print("=" * 50)
# Setup Azure OpenAI
azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT")
api_key = os.getenv("AZURE_OPENAI_API_KEY")
deployment = os.getenv("AZURE_OPENAI_DEPLOYMENT", "gpt-4.1-mini")
if not azure_endpoint or not api_key:
print(
"❌ Please set AZURE_OPENAI_ENDPOINT and AZURE_OPENAI_API_KEY environment variables"
)
return
client = AzureOpenAIChatCompletionClient(
model="gpt-4.1-mini",
azure_endpoint=azure_endpoint,
api_key=api_key,
azure_deployment=deployment,
)
output_dir = Path(__file__).parent
# Create evaluation components
tasks = create_tasks()
configurations = await create_configurations(client)
judge_client = AzureOpenAIChatCompletionClient(
model="gpt-4.1-mini",
azure_endpoint=azure_endpoint,
api_key=api_key,
azure_deployment=deployment,
)
judge = LLMEvalJudge(
judge_client,
name="gpt-4.1-mini-judge",
default_criteria=["accuracy", "helpfulness", "clarity"],
)
runner = EvalRunner(judge=judge, parallel=False) # Sequential for stability
print(f"Evaluating {len(configurations)} systems on {len(tasks)} tasks")
# Run evaluations
all_results = []
for i, config in enumerate(configurations):
print(f"\nEvaluating {config.name} ({i+1}/{len(configurations)})")
scores = await runner.evaluate(config, tasks)
for task, score in zip(tasks, scores):
if score.trajectory and score.trajectory.usage:
usage = score.trajectory.usage
result = {
"system": config.name,
"task": task.name,
"overall_score": score.overall,
"accuracy": score.dimensions.get("accuracy", 0),
"helpfulness": score.dimensions.get("helpfulness", 0),
"clarity": score.dimensions.get("clarity", 0),
"tokens_total": usage.tokens_input + usage.tokens_output,
"duration_ms": usage.duration_ms,
"llm_calls": usage.llm_calls,
"cost": usage.cost_estimate or 0,
}
all_results.append(result)
# Analysis
results_df = pd.DataFrame(all_results)
print(f"\n=RESULTS SUMMARY")
print("=" * 50)
system_avg = results_df.groupby("system")["overall_score"].mean().round(1)
for system, avg_score in system_avg.items():
print(f"{system:20} Average Score: {avg_score}/10")
# Best system
best_system = system_avg.idxmax()
print(f"\n<Best Overall: {best_system} ({system_avg[best_system]}/10)")
# Task-specific analysis
print(f"\n=Task-Specific Performance:")
task_performance = results_df.pivot(
index="task", columns="system", values="overall_score"
).round(1)
print(task_performance.to_string())
# Efficiency metrics
print(f"\nEfficiency Metrics:")
efficiency_df = (
results_df.groupby("system")
.agg({"tokens_total": "mean", "duration_ms": "mean", "cost": "mean"})
.round(3)
)
print(efficiency_df.to_string())
# Create visualizations
summary = create_visualizations(results_df, output_dir)
# Save detailed results
results_df.to_csv(output_dir / "evaluation_results.csv", index=False)
print(f"\n Evaluation completed!")
print(f"=Results saved to: {output_dir}")
print(f" - evaluation_results.png (charts)")
print(f" - evaluation_results.csv (raw data)")
if __name__ == "__main__":
asyncio.run(main())