diff --git a/product_comparison/baml_src/clients.baml b/product_comparison/baml_src/clients.baml new file mode 100644 index 0000000..f12893b --- /dev/null +++ b/product_comparison/baml_src/clients.baml @@ -0,0 +1,75 @@ +// Learn more about clients at https://docs.boundaryml.com/docs/snippets/clients/overview + +client CustomGPT4o { + provider openai + options { + model "gpt-4o" + api_key env.OPENAI_API_KEY + } +} + +client CustomGPT4oMini { + provider openai + retry_policy Exponential + options { + model "gpt-4o-mini" + api_key env.OPENAI_API_KEY + } +} + +client CustomSonnet { + provider anthropic + options { + model "claude-3-5-sonnet-20241022" + api_key env.ANTHROPIC_API_KEY + } +} + + +client CustomHaiku { + provider anthropic + retry_policy Constant + options { + model "claude-3-haiku-20240307" + api_key env.ANTHROPIC_API_KEY + } +} + +// https://docs.boundaryml.com/docs/snippets/clients/round-robin +client CustomFast { + provider round-robin + options { + // This will alternate between the two clients + strategy [CustomGPT4oMini, CustomHaiku] + } +} + +// https://docs.boundaryml.com/docs/snippets/clients/fallback +client OpenaiFallback { + provider fallback + options { + // This will try the clients in order until one succeeds + strategy [CustomGPT4oMini, CustomGPT4oMini] + } +} + +// https://docs.boundaryml.com/docs/snippets/clients/retry +retry_policy Constant { + max_retries 3 + // Strategy is optional + strategy { + type constant_delay + delay_ms 200 + } +} + +retry_policy Exponential { + max_retries 2 + // Strategy is optional + strategy { + type exponential_backoff + delay_ms 300 + multiplier 1.5 + max_delay_ms 10000 + } +} \ No newline at end of file diff --git a/product_comparison/baml_src/example.baml b/product_comparison/baml_src/example.baml new file mode 100644 index 0000000..4784bab --- /dev/null +++ b/product_comparison/baml_src/example.baml @@ -0,0 +1,35 @@ +class MyOutputType { + prompt string + completion string + tokens int + model string + time float + cost float + usage int + error string + result string +} + + +function FunctionName(arg:string) -> MyOutputType { + client CustomGPT4o + prompt #" + Your prompt here in jinja format + + {{ arg }} + {{ ctx.output_format }} + "# + + + +} + + +test test_function { + functions [FunctionName] + args { + arg #" + what is the capital of France? + "# + } +} \ No newline at end of file diff --git a/product_comparison/baml_src/generators.baml b/product_comparison/baml_src/generators.baml new file mode 100644 index 0000000..ed7bcf7 --- /dev/null +++ b/product_comparison/baml_src/generators.baml @@ -0,0 +1,18 @@ +// This helps use auto generate libraries you can use in the language of +// your choice. You can have multiple generators if you use multiple languages. +// Just ensure that the output_dir is different for each generator. +generator target { + // Valid values: "python/pydantic", "typescript", "ruby/sorbet", "rest/openapi" + output_type "python/pydantic" + + // Where the generated code will be saved (relative to baml_src/) + output_dir "../" + + // The version of the BAML package you have installed (e.g. same version as your baml-py or @boundaryml/baml). + // The BAML VSCode extension version should also match this version. + version "0.89.0" + + // Valid values: "sync", "async" + // This controls what `b.FunctionName()` will be (sync or async). + default_client_mode sync +} diff --git a/product_comparison/baml_src/product_comparison.baml b/product_comparison/baml_src/product_comparison.baml new file mode 100644 index 0000000..77ad8ca --- /dev/null +++ b/product_comparison/baml_src/product_comparison.baml @@ -0,0 +1,47 @@ +class Product { + name string + description string + brand string + price string + features string[] +} + +class Product_comparison_response { + product1 Product + product2 Product + comparison string + recommendation string + +} + + +function CompareProducts(product1: string, product2: string) -> Product_comparison_response { + client "openai/gpt-4o" + prompt #" + Compare the following two products: + Product 1: {{ product1 }} + Product 2: {{ product2 }} + + {{ ctx.output_format }} + "# +} +test product_comparison { + functions [CompareProducts] + args { + product1 #" + Name: iPhone 14 + Description: Latest Apple smartphone with A15 Bionic chip. + Brand: Apple + Price: $999 + Features: 5G, Face ID, Dual Camera + "# + + product2 #" + Name: Samsung Galaxy S22 + Description: Flagship Samsung smartphone with Exynos 2200. + Brand: Samsung + Price: $799 + Features: 5G, In-display fingerprint sensor, Triple Camera + "# + } +} \ No newline at end of file diff --git a/product_comparison/baml_src/resume.baml b/product_comparison/baml_src/resume.baml new file mode 100644 index 0000000..cfad6c1 --- /dev/null +++ b/product_comparison/baml_src/resume.baml @@ -0,0 +1,42 @@ +// Defining a data model. +class Resume { + name string + email string + experience string[] + skills string[] +} + +// Create a function to extract the resume from a string. +function ExtractResume(resume: string) -> Resume { + // Specify a client as provider/model-name + // you can use custom LLM params with a custom client name from clients.baml like "client CustomHaiku" + client "openai/gpt-4o" // Set OPENAI_API_KEY to use this client. + prompt #" + Extract from this content: + {{ resume }} + + {{ ctx.output_format }} + "# +} + + + +// Test the function with a sample resume. Open the VSCode playground to run this. +test vaibhav_resume { + functions [ExtractResume] + args { + resume #" + Vaibhav Gupta + vbv@boundaryml.com + + Experience: + - Founder at BoundaryML + - CV Engineer at Google + - CV Engineer at Microsoft + + Skills: + - Rust + - C++ + "# + } +} diff --git a/product_comparison/main.py b/product_comparison/main.py new file mode 100644 index 0000000..5c9970a --- /dev/null +++ b/product_comparison/main.py @@ -0,0 +1,18 @@ +from baml_client.sync_client import b +from baml_client.types import Resume + +def example(raw_resume: str) -> Resume: + # BAML's internal parser guarantees ExtractResume + # to be always return a Resume type + response = b.ExtractResume(raw_resume) + return response + +def example_stream(raw_resume: str) -> Resume: + stream = b.stream.ExtractResume(raw_resume) + for msg in stream: + print(msg) # This will be a PartialResume type + + # This will be a Resume type + final = stream.get_final_response() + + return final diff --git a/product_comparison/product_comparison.py b/product_comparison/product_comparison.py new file mode 100644 index 0000000..3f1146c --- /dev/null +++ b/product_comparison/product_comparison.py @@ -0,0 +1,45 @@ +import os +from dotenv import load_dotenv +from baml_client.sync_client import b +from baml_client.types import Product_comparison_response + +# Load environment variables from .env file +load_dotenv() + +def compare_products(product1: str, product2: str) -> Product_comparison_response: + """ + Compare two products and return a structured comparison. + + Args: + product1 (str): Description of the first product + product2 (str): Description of the second product + + Returns: + Product_comparison_response: Structured comparison with details of both products + """ + # BAML's internal parser guarantees CompareProducts + # to always return a Product_comparison_response type + response = b.CompareProducts(product1, product2) + return response + +def compare_products_stream(product1: str, product2: str) -> Product_comparison_response: + """ + Compare two products with streaming response. + + Args: + product1 (str): Description of the first product + product2 (str): Description of the second product + + Returns: + Product_comparison_response: Final structured comparison with details of both products + """ + stream = b.stream.CompareProducts(product1, product2) + for msg in stream: + print(msg) # This will be a partial response + + # This will be a Product_comparison_response type + final = stream.get_final_response() + + return final + + diff --git a/product_comparison/product_comparison_example.py b/product_comparison/product_comparison_example.py new file mode 100644 index 0000000..d6d30bd --- /dev/null +++ b/product_comparison/product_comparison_example.py @@ -0,0 +1,90 @@ +from product_comparison import compare_products, compare_products_stream + +# Example 1: Smartphones +smartphone1 = """ +Name: iPhone 14 Pro +Description: Apple's premium smartphone with the latest A16 Bionic chip, featuring a Dynamic Island display and always-on screen. +Brand: Apple +Price: $999 +Features: 48MP main camera, Dynamic Island, Always-on display, A16 Bionic chip, ProMotion 120Hz display, Emergency SOS via satellite +""" + +smartphone2 = """ +Name: Samsung Galaxy S23 Ultra +Description: Samsung's flagship smartphone with the Snapdragon 8 Gen 2 processor and an integrated S Pen. +Brand: Samsung +Price: $1,199 +Features: 200MP main camera, 100x Space Zoom, S Pen included, 5000mAh battery, 6.8-inch Dynamic AMOLED 2X display, 45W fast charging +""" + +# Example 2: Laptops +laptop1 = """ +Name: MacBook Pro 16" +Description: Professional-grade laptop with Apple Silicon M2 Pro/Max chip offering exceptional performance and battery life. +Brand: Apple +Price: $2,499 +Features: M2 Pro/Max chip, Liquid Retina XDR display, 16GB-96GB RAM, Up to 8TB storage, 22-hour battery life, MagSafe charging +""" + +laptop2 = """ +Name: Dell XPS 17 +Description: Premium Windows laptop with large display and powerful Intel processors for creative professionals. +Brand: Dell +Price: $1,949 +Features: 12th Gen Intel Core i7/i9, NVIDIA RTX 3060, 17-inch 4K UHD+ display, 16GB-64GB RAM, Up to 2TB SSD, Windows 11 Pro +""" + +# Example 3: Wireless Earbuds +earbuds1 = """ +Name: Apple AirPods Pro 2 +Description: Apple's premium noise-cancelling earbuds with improved sound quality and battery life. +Brand: Apple +Price: $249 +Features: Active Noise Cancellation, Transparency mode, Adaptive EQ, Spatial Audio, H2 chip, 6 hours battery (30 with case), Water resistant +""" + +earbuds2 = """ +Name: Sony WF-1000XM5 +Description: Sony's flagship noise-cancelling earbuds with industry-leading sound quality and noise reduction. +Brand: Sony +Price: $299 +Features: Best-in-class noise cancellation, LDAC Hi-Res Audio, 8 hours battery (24 with case), Speak-to-chat, Wireless charging, Multipoint connection +""" + +def run_comparison_examples(): + print("EXAMPLE 1: SMARTPHONE COMPARISON") + print("-" * 50) + smartphone_result = compare_products(smartphone1, smartphone2) + print(f"Product 1: {smartphone_result.product1.name}") + print(f"Product 2: {smartphone_result.product2.name}") + print("\nCOMPARISON:") + print(smartphone_result.comparison) + print("\nRECOMMENDATION:") + print(smartphone_result.recommendation) + print("\n" + "=" * 80 + "\n") + + print("EXAMPLE 2: LAPTOP COMPARISON") + print("-" * 50) + laptop_result = compare_products(laptop1, laptop2) + print(f"Product 1: {laptop_result.product1.name}") + print(f"Product 2: {laptop_result.product2.name}") + print("\nCOMPARISON:") + print(laptop_result.comparison) + print("\nRECOMMENDATION:") + print(laptop_result.recommendation) + print("\n" + "=" * 80 + "\n") + + print("EXAMPLE 3: EARBUDS COMPARISON (STREAMING)") + print("-" * 50) + print("Streaming response:") + earbuds_result = compare_products_stream(earbuds1, earbuds2) + print("\nFINAL RESULT:") + print(f"Product 1: {earbuds_result.product1.name}") + print(f"Product 2: {earbuds_result.product2.name}") + print("\nCOMPARISON:") + print(earbuds_result.comparison) + print("\nRECOMMENDATION:") + print(earbuds_result.recommendation) + +if __name__ == "__main__": + run_comparison_examples()