"""Ollama chat models."""importjsonfromoperatorimportitemgetterfromtypingimport(Any,AsyncIterator,Callable,Dict,Iterator,List,Literal,Mapping,Optional,Sequence,Type,Union,cast,)fromuuidimportuuid4fromlangchain_core.callbacksimport(CallbackManagerForLLMRun,)fromlangchain_core.callbacks.managerimportAsyncCallbackManagerForLLMRunfromlangchain_core.exceptionsimportOutputParserExceptionfromlangchain_core.language_modelsimportLanguageModelInputfromlangchain_core.language_models.chat_modelsimportBaseChatModel,LangSmithParamsfromlangchain_core.messagesimport(AIMessage,AIMessageChunk,BaseMessage,HumanMessage,SystemMessage,ToolCall,ToolMessage,)fromlangchain_core.messages.aiimportUsageMetadatafromlangchain_core.messages.toolimporttool_callfromlangchain_core.output_parsersimport(JsonOutputKeyToolsParser,JsonOutputParser,PydanticOutputParser,PydanticToolsParser,)fromlangchain_core.outputsimportChatGeneration,ChatGenerationChunk,ChatResultfromlangchain_core.runnablesimportRunnable,RunnableMap,RunnablePassthroughfromlangchain_core.toolsimportBaseToolfromlangchain_core.utils.function_callingimport(_convert_any_typed_dicts_to_pydanticasconvert_any_typed_dicts_to_pydantic,)fromlangchain_core.utils.function_callingimportconvert_to_openai_toolfromlangchain_core.utils.pydanticimportTypeBaseModel,is_basemodel_subclassfromollamaimportAsyncClient,Client,Message,OptionsfrompydanticimportBaseModel,PrivateAttr,model_validatorfrompydantic.json_schemaimportJsonSchemaValuefromtyping_extensionsimportSelf,is_typeddictdef_get_usage_metadata_from_generation_info(generation_info:Optional[Mapping[str,Any]],)->Optional[UsageMetadata]:"""Get usage metadata from ollama generation info mapping."""ifgeneration_infoisNone:returnNoneinput_tokens:Optional[int]=generation_info.get("prompt_eval_count")output_tokens:Optional[int]=generation_info.get("eval_count")ifinput_tokensisnotNoneandoutput_tokensisnotNone:returnUsageMetadata(input_tokens=input_tokens,output_tokens=output_tokens,total_tokens=input_tokens+output_tokens,)returnNonedef_parse_json_string(json_string:str,raw_tool_call:dict[str,Any],skip:bool)->Any:"""Attempt to parse a JSON string for tool calling. Args: json_string: JSON string to parse. skip: Whether to ignore parsing errors and return the value anyways. raw_tool_call: Raw tool call to include in error message. Returns: The parsed JSON string. Raises: OutputParserException: If the JSON string wrong invalid and skip=False. """try:returnjson.loads(json_string)exceptjson.JSONDecodeErrorase:ifskip:returnjson_stringmsg=(f"Function {raw_tool_call['function']['name']} arguments:\n\n"f"{raw_tool_call['function']['arguments']}\n\nare not valid JSON. "f"Received JSONDecodeError {e}")raiseOutputParserException(msg)fromeexceptTypeErrorase:ifskip:returnjson_stringmsg=(f"Function {raw_tool_call['function']['name']} arguments:\n\n"f"{raw_tool_call['function']['arguments']}\n\nare not a string or a "f"dictionary. Received TypeError {e}")raiseOutputParserException(msg)fromedef_parse_arguments_from_tool_call(raw_tool_call:dict[str,Any],)->Optional[dict[str,Any]]:"""Parse arguments by trying to parse any shallowly nested string-encoded JSON. Band-aid fix for issue in Ollama with inconsistent tool call argument structure. Should be removed/changed if fixed upstream. See https://github.com/ollama/ollama/issues/6155 """if"function"notinraw_tool_call:returnNonearguments=raw_tool_call["function"]["arguments"]parsed_arguments={}ifisinstance(arguments,dict):forkey,valueinarguments.items():ifisinstance(value,str):parsed_arguments[key]=_parse_json_string(value,skip=True,raw_tool_call=raw_tool_call)else:parsed_arguments[key]=valueelse:parsed_arguments=_parse_json_string(arguments,skip=False,raw_tool_call=raw_tool_call)returnparsed_argumentsdef_get_tool_calls_from_response(response:Mapping[str,Any],)->List[ToolCall]:"""Get tool calls from ollama response."""tool_calls=[]if"message"inresponse:ifraw_tool_calls:=response["message"].get("tool_calls"):fortcinraw_tool_calls:tool_calls.append(tool_call(id=str(uuid4()),name=tc["function"]["name"],args=_parse_arguments_from_tool_call(tc)or{},))returntool_callsdef_lc_tool_call_to_openai_tool_call(tool_call:ToolCall)->dict:return{"type":"function","id":tool_call["id"],"function":{"name":tool_call["name"],"arguments":tool_call["args"],},}def_is_pydantic_class(obj:Any)->bool:returnisinstance(obj,type)andis_basemodel_subclass(obj)
[docs]defbind_tools(self,tools:Sequence[Union[Dict[str,Any],Type,Callable,BaseTool]],*,tool_choice:Optional[Union[dict,str,Literal["auto","any"],bool]]=None,**kwargs:Any,)->Runnable[LanguageModelInput,BaseMessage]:"""Bind tool-like objects to this chat model. Assumes model is compatible with OpenAI tool-calling API. Args: tools: A list of tool definitions to bind to this chat model. Supports any tool definition handled by :meth:`langchain_core.utils.function_calling.convert_to_openai_tool`. tool_choice: If provided, which tool for model to call. **This parameter is currently ignored as it is not supported by Ollama.** kwargs: Any additional parameters are passed directly to ``self.bind(**kwargs)``. """# noqa: E501formatted_tools=[convert_to_openai_tool(tool)fortoolintools]returnsuper().bind(tools=formatted_tools,**kwargs)
[docs]defwith_structured_output(self,schema:Union[Dict,type],*,method:Literal["function_calling","json_mode","json_schema"]="function_calling",include_raw:bool=False,**kwargs:Any,)->Runnable[LanguageModelInput,Union[Dict,BaseModel]]:"""Model wrapper that returns outputs formatted to match the given schema. Args: schema: The output schema. Can be passed in as: - a Pydantic class, - a JSON schema - a TypedDict class - an OpenAI function/tool schema. If ``schema`` is a Pydantic class then the model output will be a Pydantic instance of that class, and the model-generated fields will be validated by the Pydantic class. Otherwise the model output will be a dict and will not be validated. See :meth:`langchain_core.utils.function_calling.convert_to_openai_tool` for more on how to properly specify types and descriptions of schema fields when specifying a Pydantic or TypedDict class. method: The method for steering model generation, one of: - "function_calling": Uses Ollama's tool-calling API - "json_schema": Uses Ollama's structured output API: https://ollama.com/blog/structured-outputs - "json_mode": Specifies ``format="json"``. Note that if using JSON mode then you must include instructions for formatting the output into the desired schema into the model call. include_raw: If False then only the parsed structured output is returned. If an error occurs during model output parsing it will be raised. If True then both the raw model response (a BaseMessage) and the parsed model response will be returned. If an error occurs during output parsing it will be caught and returned as well. The final output is always a dict with keys "raw", "parsed", and "parsing_error". kwargs: Additional keyword args aren't supported. Returns: A Runnable that takes same inputs as a :class:`langchain_core.language_models.chat.BaseChatModel`. | If ``include_raw`` is False and ``schema`` is a Pydantic class, Runnable outputs an instance of ``schema`` (i.e., a Pydantic object). Otherwise, if ``include_raw`` is False then Runnable outputs a dict. | If ``include_raw`` is True, then Runnable outputs a dict with keys: - "raw": BaseMessage - "parsed": None if there was a parsing error, otherwise the type depends on the ``schema`` as described above. - "parsing_error": Optional[BaseException] .. versionchanged:: 0.2.2 Added support for structured output API via ``format`` parameter. .. dropdown:: Example: schema=Pydantic class, method="function_calling", include_raw=False .. code-block:: python from typing import Optional from langchain_ollama import ChatOllama from pydantic import BaseModel, Field class AnswerWithJustification(BaseModel): '''An answer to the user question along with justification for the answer.''' answer: str justification: Optional[str] = Field( default=..., description="A justification for the answer." ) llm = ChatOllama(model="llama3.1", temperature=0) structured_llm = llm.with_structured_output( AnswerWithJustification ) structured_llm.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> AnswerWithJustification( # answer='They weigh the same', # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.' # ) .. dropdown:: Example: schema=Pydantic class, method="function_calling", include_raw=True .. code-block:: python from langchain_ollama import ChatOllama from pydantic import BaseModel class AnswerWithJustification(BaseModel): '''An answer to the user question along with justification for the answer.''' answer: str justification: str llm = ChatOllama(model="llama3.1", temperature=0) structured_llm = llm.with_structured_output( AnswerWithJustification, include_raw=True ) structured_llm.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { # 'raw': AIMessage(content='', additional_kwargs={'tool_calls': [{'id': 'call_Ao02pnFYXD6GN1yzc0uXPsvF', 'function': {'arguments': '{"answer":"They weigh the same.","justification":"Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ."}', 'name': 'AnswerWithJustification'}, 'type': 'function'}]}), # 'parsed': AnswerWithJustification(answer='They weigh the same.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.'), # 'parsing_error': None # } .. dropdown:: Example: schema=Pydantic class, method="json_schema", include_raw=False .. code-block:: python from typing import Optional from langchain_ollama import ChatOllama from pydantic import BaseModel, Field class AnswerWithJustification(BaseModel): '''An answer to the user question along with justification for the answer.''' answer: str justification: Optional[str] = Field( default=..., description="A justification for the answer." ) llm = ChatOllama(model="llama3.1", temperature=0) structured_llm = llm.with_structured_output( AnswerWithJustification, method="json_schema" ) structured_llm.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> AnswerWithJustification( # answer='They weigh the same', # justification='Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume or density of the objects may differ.' # ) .. dropdown:: Example: schema=TypedDict class, method="function_calling", include_raw=False .. code-block:: python # IMPORTANT: If you are using Python <=3.8, you need to import Annotated # from typing_extensions, not from typing. from typing_extensions import Annotated, TypedDict from langchain_ollama import ChatOllama class AnswerWithJustification(TypedDict): '''An answer to the user question along with justification for the answer.''' answer: str justification: Annotated[ Optional[str], None, "A justification for the answer." ] llm = ChatOllama(model="llama3.1", temperature=0) structured_llm = llm.with_structured_output(AnswerWithJustification) structured_llm.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { # 'answer': 'They weigh the same', # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' # } .. dropdown:: Example: schema=OpenAI function schema, method="function_calling", include_raw=False .. code-block:: python from langchain_ollama import ChatOllama oai_schema = { 'name': 'AnswerWithJustification', 'description': 'An answer to the user question along with justification for the answer.', 'parameters': { 'type': 'object', 'properties': { 'answer': {'type': 'string'}, 'justification': {'description': 'A justification for the answer.', 'type': 'string'} }, 'required': ['answer'] } } llm = ChatOllama(model="llama3.1", temperature=0) structured_llm = llm.with_structured_output(oai_schema) structured_llm.invoke( "What weighs more a pound of bricks or a pound of feathers" ) # -> { # 'answer': 'They weigh the same', # 'justification': 'Both a pound of bricks and a pound of feathers weigh one pound. The weight is the same, but the volume and density of the two substances differ.' # } .. dropdown:: Example: schema=Pydantic class, method="json_mode", include_raw=True .. code-block:: from langchain_ollama import ChatOllama from pydantic import BaseModel class AnswerWithJustification(BaseModel): answer: str justification: str llm = ChatOllama(model="llama3.1", temperature=0) structured_llm = llm.with_structured_output( AnswerWithJustification, method="json_mode", include_raw=True ) structured_llm.invoke( "Answer the following question. " "Make sure to return a JSON blob with keys 'answer' and 'justification'.\\n\\n" "What's heavier a pound of bricks or a pound of feathers?" ) # -> { # 'raw': AIMessage(content='{\\n "answer": "They are both the same weight.",\\n "justification": "Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight." \\n}'), # 'parsed': AnswerWithJustification(answer='They are both the same weight.', justification='Both a pound of bricks and a pound of feathers weigh one pound. The difference lies in the volume and density of the materials, not the weight.'), # 'parsing_error': None # } """# noqa: E501, D301ifkwargs:raiseValueError(f"Received unsupported arguments {kwargs}")is_pydantic_schema=_is_pydantic_class(schema)ifmethod=="function_calling":ifschemaisNone:raiseValueError("schema must be specified when method is not 'json_mode'. ""Received None.")tool_name=convert_to_openai_tool(schema)["function"]["name"]llm=self.bind_tools([schema],tool_choice=tool_name)ifis_pydantic_schema:output_parser:Runnable=PydanticToolsParser(tools=[schema],# type: ignore[list-item]first_tool_only=True,)else:output_parser=JsonOutputKeyToolsParser(key_name=tool_name,first_tool_only=True)elifmethod=="json_mode":llm=self.bind(format="json")output_parser=(PydanticOutputParser(pydantic_object=schema)# type: ignore[arg-type]ifis_pydantic_schemaelseJsonOutputParser())elifmethod=="json_schema":ifschemaisNone:raiseValueError("schema must be specified when method is not 'json_mode'. ""Received None.")ifis_pydantic_schema:schema=cast(TypeBaseModel,schema)llm=self.bind(format=schema.model_json_schema())output_parser=PydanticOutputParser(pydantic_object=schema)else:ifis_typeddict(schema):schema=cast(type,schema)response_format=convert_any_typed_dicts_to_pydantic(schema,visited={}).schema()# type: ignore[attr-defined]if"required"notinresponse_format:response_format["required"]=list(response_format["properties"].keys())else:# is JSON schemaresponse_format=schemallm=self.bind(format=response_format)output_parser=JsonOutputParser()else:raiseValueError(f"Unrecognized method argument. Expected one of 'function_calling', "f"'json_schema', or 'json_mode'. Received: '{method}'")ifinclude_raw:parser_assign=RunnablePassthrough.assign(parsed=itemgetter("raw")|output_parser,parsing_error=lambda_:None)parser_none=RunnablePassthrough.assign(parsed=lambda_:None)parser_with_fallback=parser_assign.with_fallbacks([parser_none],exception_key="parsing_error")returnRunnableMap(raw=llm)|parser_with_fallbackelse:returnllm|output_parser