|
344 | 344 | "source": [ |
345 | 345 | "import openai\n", |
346 | 346 | "\n", |
347 | | - "raw_llm_response, validated_response = guard(\n", |
348 | | - " openai.Completion.create,\n", |
| 347 | + "raw_llm_response, validated_response, *rest = guard(\n", |
| 348 | + " openai.completions.create,\n", |
349 | 349 | " prompt_params={'document': document},\n", |
350 | | - " engine='text-davinci-003',\n", |
| 350 | + " model='text-davinci-003',\n", |
351 | 351 | " max_tokens=2048,\n", |
352 | 352 | " temperature=0\n", |
353 | 353 | ")\n", |
|
565 | 565 | } |
566 | 566 | ], |
567 | 567 | "source": [ |
568 | | - "print(guard.state.most_recent_call.tree)" |
| 568 | + "print(guard.history.last.tree)" |
569 | 569 | ] |
570 | 570 | }, |
571 | 571 | { |
|
598 | 598 | } |
599 | 599 | ], |
600 | 600 | "source": [ |
601 | | - "raw_llm_response, validated_response = guard(\n", |
602 | | - " openai.Completion.create,\n", |
| 601 | + "raw_llm_response, validated_response, *rest = guard(\n", |
| 602 | + " openai.completions.create,\n", |
603 | 603 | " prompt_params={'document': open(\"data/article1.txt\", \"r\").read()},\n", |
604 | | - " engine='text-ada-001',\n", |
| 604 | + " model='text-ada-001',\n", |
605 | 605 | " max_tokens=512,\n", |
606 | 606 | " temperature=0\n", |
607 | 607 | ")\n", |
|
927 | 927 | } |
928 | 928 | ], |
929 | 929 | "source": [ |
930 | | - "print(guard.state.most_recent_call.tree)" |
| 930 | + "print(guard.history.last.tree)" |
931 | 931 | ] |
932 | 932 | } |
933 | 933 | ], |
|
0 commit comments