Skip to content
1 change: 0 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
*.so
*.tar.gz
/release/
sample-app

# Test binary, built with `go test -c`
*.test
Expand Down
293 changes: 293 additions & 0 deletions sample-app/generate_joke_workflow_example.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,293 @@
package main

import (
"context"
"fmt"
"os"

"github.com/sashabaranov/go-openai"
sdk "github.com/traceloop/go-openllmetry/traceloop-sdk"
)

func createJoke(ctx context.Context, workflow *sdk.Workflow, client *openai.Client) (string, error) {
task := workflow.NewTask("joke_creation")
defer task.End()

// Log prompt
prompt := sdk.Prompt{
Vendor: "openai",
Mode: "chat",
Model: "gpt-3.5-turbo",
Messages: []sdk.Message{
{
Index: 0,
Role: "user",
Content: "Tell me a joke about opentelemetry",
},
},
}

llmSpan := task.LogPrompt(prompt)

// Make API call
resp, err := client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
Model: "gpt-3.5-turbo",
Messages: []openai.ChatCompletionMessage{
{
Role: "user",
Content: "Tell me a joke about opentelemetry",
},
},
})
if err != nil {
return "", fmt.Errorf("CreateChatCompletion error: %w", err)
}

// Log completion
var completionMsgs []sdk.Message
for _, choice := range resp.Choices {
completionMsgs = append(completionMsgs, sdk.Message{
Index: choice.Index,
Content: choice.Message.Content,
Role: choice.Message.Role,
})
}

llmSpan.LogCompletion(ctx, sdk.Completion{
Model: resp.Model,
Messages: completionMsgs,
}, sdk.Usage{
TotalTokens: resp.Usage.TotalTokens,
CompletionTokens: resp.Usage.CompletionTokens,
PromptTokens: resp.Usage.PromptTokens,
})

return resp.Choices[0].Message.Content, nil
}

func translateJokeToPirate(ctx context.Context, workflow *sdk.Workflow, client *openai.Client, joke string) (string, error) {
// Log prompt
piratePrompt := fmt.Sprintf("Translate the below joke to pirate-like english:\n\n%s", joke)
prompt := sdk.Prompt{
Vendor: "openai",
Mode: "chat",
Model: "gpt-3.5-turbo",
Messages: []sdk.Message{
{
Index: 0,
Role: "user",
Content: piratePrompt,
},
},
}

agent := workflow.NewAgent("joke_translation")
defer agent.End()

llmSpan := agent.LogPrompt(prompt)

// Make API call
resp, err := client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
Model: "gpt-3.5-turbo",
Messages: []openai.ChatCompletionMessage{
{
Role: "user",
Content: piratePrompt,
},
},
})
if err != nil {
return "", fmt.Errorf("CreateChatCompletion error: %w", err)
}

// Log completion
var completionMsgs []sdk.Message
for _, choice := range resp.Choices {
completionMsgs = append(completionMsgs, sdk.Message{
Index: choice.Index,
Content: choice.Message.Content,
Role: choice.Message.Role,
})
}

llmSpan.LogCompletion(ctx, sdk.Completion{
Model: resp.Model,
Messages: completionMsgs,
}, sdk.Usage{
TotalTokens: resp.Usage.TotalTokens,
CompletionTokens: resp.Usage.CompletionTokens,
PromptTokens: resp.Usage.PromptTokens,
})

// Call history jokes tool
_, err = historyJokesTool(ctx, agent, client)
if err != nil {
fmt.Printf("Warning: history_jokes_tool error: %v\n", err)
}

return resp.Choices[0].Message.Content, nil
}

func historyJokesTool(ctx context.Context, agent *sdk.Agent, client *openai.Client) (string, error) {
// Log prompt
prompt := sdk.Prompt{
Vendor: "openai",
Mode: "chat",
Model: "gpt-3.5-turbo",
Messages: []sdk.Message{
{
Index: 0,
Role: "user",
Content: "get some history jokes",
},
},
}

tool := agent.NewTool("history_jokes", "function", sdk.ToolFunction{
Name: "history_jokes",
Description: "Get some history jokes",
Parameters: map[string]interface{}{},
})
defer tool.End()

llmSpan := tool.LogPrompt(prompt)

// Make API call
resp, err := client.CreateChatCompletion(ctx, openai.ChatCompletionRequest{
Model: "gpt-3.5-turbo",
Messages: []openai.ChatCompletionMessage{
{
Role: "user",
Content: "get some history jokes",
},
},
})
if err != nil {
return "", fmt.Errorf("CreateChatCompletion error: %w", err)
}

// Log completion
var completionMsgs []sdk.Message
for _, choice := range resp.Choices {
completionMsgs = append(completionMsgs, sdk.Message{
Index: choice.Index,
Content: choice.Message.Content,
Role: choice.Message.Role,
})
}

llmSpan.LogCompletion(ctx, sdk.Completion{
Model: resp.Model,
Messages: completionMsgs,
}, sdk.Usage{
TotalTokens: resp.Usage.TotalTokens,
CompletionTokens: resp.Usage.CompletionTokens,
PromptTokens: resp.Usage.PromptTokens,
})

return resp.Choices[0].Message.Content, nil
}

func generateSignature(ctx context.Context, workflow *sdk.Workflow, client *openai.Client, joke string) (string, error) {
task := workflow.NewTask("signature_generation")
defer task.End()

signaturePrompt := "add a signature to the joke:\n\n" + joke

// Log prompt
prompt := sdk.Prompt{
Vendor: "openai",
Mode: "completion",
Model: "davinci-002",
Messages: []sdk.Message{
{
Index: 0,
Role: "user",
Content: signaturePrompt,
},
},
}

llmSpan := task.LogPrompt(prompt)

// Make API call
resp, err := client.CreateCompletion(ctx, openai.CompletionRequest{
Model: "davinci-002",
Prompt: signaturePrompt,
})
if err != nil {
return "", fmt.Errorf("CreateCompletion error: %w", err)
}

// Log completion
llmSpan.LogCompletion(ctx, sdk.Completion{
Model: resp.Model,
Messages: []sdk.Message{
{
Index: 0,
Role: "assistant",
Content: resp.Choices[0].Text,
},
},
}, sdk.Usage{
TotalTokens: resp.Usage.TotalTokens,
CompletionTokens: resp.Usage.CompletionTokens,
PromptTokens: resp.Usage.PromptTokens,
})

return resp.Choices[0].Text, nil
}

func runJokeWorkflow() {
ctx := context.Background()

// Initialize Traceloop SDK
traceloop, err := sdk.NewClient(ctx, sdk.Config{
APIKey: os.Getenv("TRACELOOP_API_KEY"),
})
if err != nil {
fmt.Printf("NewClient error: %v\n", err)
return
}
defer func() { traceloop.Shutdown(ctx) }()

client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

// Create workflow
wf := traceloop.NewWorkflow(ctx, sdk.WorkflowAttributes{
Name: "go-joke_generator",
AssociationProperties: map[string]string{
"user_id": "user_12345",
"chat_id": "chat_1234",
},
})
defer wf.End()

// Execute workflow steps
fmt.Println("Creating joke...")
engJoke, err := createJoke(ctx, wf, client)
if err != nil {
fmt.Printf("Error creating joke: %v\n", err)
return
}
fmt.Printf("\nEnglish joke:\n%s\n\n", engJoke)

fmt.Println("Translating to pirate...")
pirateJoke, err := translateJokeToPirate(ctx, wf, client, engJoke)
if err != nil {
fmt.Printf("Error translating joke: %v\n", err)
return
}
fmt.Printf("\nPirate joke:\n%s\n\n", pirateJoke)

fmt.Println("Generating signature...")
signature, err := generateSignature(ctx, wf, client, pirateJoke)
if err != nil {
fmt.Printf("Error generating signature: %v\n", err)
return
}

// Combine result
result := pirateJoke + "\n\n" + signature
fmt.Printf("\n=== Final Result ===\n%s\n", result)
}
11 changes: 4 additions & 7 deletions sample-app/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,8 @@ func main() {
}

// Default to workflow example using prompt registry
workflowExample()
// workflowExample()
runJokeWorkflow()
}

func workflowExample() {
Expand Down Expand Up @@ -61,7 +62,7 @@ func workflowExample() {
}

// Log the prompt
llmSpan, err := traceloop.LogPrompt(
llmSpan := traceloop.LogPrompt(
ctx,
sdk.Prompt{
Vendor: "openai",
Expand Down Expand Up @@ -103,18 +104,14 @@ func workflowExample() {
}

// Log the completion
err = llmSpan.LogCompletion(ctx, sdk.Completion{
llmSpan.LogCompletion(ctx, sdk.Completion{
Model: resp.Model,
Messages: completionMsgs,
}, sdk.Usage{
TotalTokens: resp.Usage.TotalTokens,
CompletionTokens: resp.Usage.CompletionTokens,
PromptTokens: resp.Usage.PromptTokens,
})
if err != nil {
fmt.Printf("LogCompletion error: %v\n", err)
return
}

fmt.Println(resp.Choices[0].Message.Content)
}
12 changes: 2 additions & 10 deletions sample-app/tool_calling.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,11 +91,7 @@ func runToolCallingExample() {
fmt.Printf("User: %s\n", userPrompt)

// Log the prompt
llmSpan, err := traceloop.LogPrompt(ctx, prompt, workflowAttrs)
if err != nil {
fmt.Printf("Error logging prompt: %v\n", err)
return
}
llmSpan := traceloop.LogPrompt(ctx, prompt, workflowAttrs)

// Make API call to OpenAI
startTime := time.Now()
Expand Down Expand Up @@ -175,11 +171,7 @@ func runToolCallingExample() {
PromptTokens: int(resp.Usage.PromptTokens),
}

err = llmSpan.LogCompletion(ctx, completion, usage)
if err != nil {
fmt.Printf("Error logging completion: %v\n", err)
return
}
llmSpan.LogCompletion(ctx, completion, usage)

// If tool calls were made, execute them
if len(resp.Choices[0].Message.ToolCalls) > 0 {
Expand Down
8 changes: 2 additions & 6 deletions sample-app/workflow_example.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,18 +47,14 @@ func workflowMain() {
})
}

llmSpan, err := factGenTask.LogPrompt(
llmSpan := factGenTask.LogPrompt(
tlp.Prompt{
Vendor: "openai",
Mode: "chat",
Model: request.Model,
Messages: promptMsgs,
},
)
if err != nil {
fmt.Printf("LogPrompt error: %v\n", err)
return
}

client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))
resp, err := client.CreateChatCompletion(
Expand Down Expand Up @@ -91,7 +87,7 @@ func workflowMain() {
someOtherTask := wf.NewTask("some_other_task")
defer someOtherTask.End()

otherPrompt, _ := someOtherTask.LogPrompt(tlp.Prompt{
otherPrompt := someOtherTask.LogPrompt(tlp.Prompt{
Vendor: "openai",
Mode: "chat",
Model: request.Model,
Expand Down
Loading
Loading