diff --git a/README.md b/README.md index 84b784a..811b7b7 100644 --- a/README.md +++ b/README.md @@ -2,17 +2,15 @@ `gpt-json` is a wrapper around GPT that allows for declarative definition of expected output format. Set up a schema, write a prompt, and get results back as beautiful typehinted objects. -Specifically this library: - -- Utilizes Pydantic schema definitions for type casting and validations -- Adds typehinting for both the API and the output schema -- Allows GPT to respond with both single-objects and lists of objects -- Includes some lightweight transformations of the output to remove superfluous context and fix broken json -- Includes retry logic for the most common API failures -- Formats the JSON schema as a flexible prompt that can be added into any message -- Supports templating of prompts to allow for dynamic content -- Validate typehinted function calls in the new GPT models, to better support agent creation -- Lightweight dependencies: only OpenAI, pydantic, and backoff +This library introduces the following features: + +- ๐Ÿ—๏ธ Pydantic schema definitions for type casting and validations +- ๐Ÿงต Templating of prompts to allow for dynamic content +- ๐Ÿ”Ž Supports Vision API, Function Calling, and standard chat prompts +- ๐Ÿš• Lightweight transformations of the output to fix broken json +- โ™ป๏ธ Retry logic for the most common API failures +- ๐Ÿ“‹ Predict single-objects and lists of objects +- โœˆ๏ธ Lightweight dependencies: only OpenAI, pydantic, and backoff ## Getting Started diff --git a/gpt_json/gpt.py b/gpt_json/gpt.py index 80396fc..f7fce1c 100644 --- a/gpt_json/gpt.py +++ b/gpt_json/gpt.py @@ -60,6 +60,9 @@ handler.setFormatter(formatter) logger.addHandler(handler) +# https://github.com/openai/openai-python/issues/1306 +ChatCompletionChunk.model_rebuild() + def handle_backoff(details): logger.warning( @@ -261,11 +264,11 @@ async def run( except (ValueError, ValidationError): raise InvalidFunctionParameters(function_name, function_args_string) - raw_response = GPTMessage.model_validate_json(response_message.model_dump_json()) + raw_response = GPTMessage.model_validate(response_message.model_dump()) raw_response.allow_templating = False extracted_json, fixed_payload = self.extract_json( - response_message, self.extract_type + raw_response, self.extract_type ) # Cast to schema model @@ -372,12 +375,12 @@ async def stream( yield partial_response previous_partial = partial_response - def extract_json(self, response_message, extract_type: ResponseType): + def extract_json(self, response_message: GPTMessage, extract_type: ResponseType): """ Assumes one main block of results, either list of dictionary """ - full_response = response_message.content + full_response = self.get_content_text(response_message.get_content_payloads()) if not full_response: return None, None