
    .hA                    x   d dl mZ d dlmZmZmZmZ d dlmZm	Z	 d dl
Z
ddlmZ ddlmZ ddlmZmZmZmZmZmZmZmZ dd	lmZmZmZ dd
lmZ ddlmZm Z  ddl!m"Z"m#Z# ddl$m%Z%m&Z& ddl'm(Z( ddl)m*Z* ddl+m,Z, ddgZ- G d de      Z. G d de       Z/ G d d      Z0 G d d      Z1 G d d      Z2 G d d      Z3y)    )annotations)DictUnionIterableOptional)LiteraloverloadN   )_legacy_response)completion_create_params)BodyOmitQueryHeadersNotGivenSequenceNotStromit	not_given)required_argsmaybe_transformasync_maybe_transform)cached_property)SyncAPIResourceAsyncAPIResource)to_streamed_response_wrapper"async_to_streamed_response_wrapper)StreamAsyncStream)make_request_options)
Completion) ChatCompletionStreamOptionsParamCompletionsAsyncCompletionsc                     e Zd Zedd       Zedd       Zeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Z	eeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd	       Z	eeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd
       Z	 e
ddgg d      eeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Z	y)r"   c                    t        |       S a  
        This property can be used as a prefix for any HTTP method call to return
        the raw response object instead of the parsed content.

        For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
        )CompletionsWithRawResponseselfs    ]/var/www/html/axiom-chatbot/venv/lib/python3.12/site-packages/openai/resources/completions.pywith_raw_responsezCompletions.with_raw_response   s     *$//    c                    t        |       S z
        An alternative to `.with_raw_response` that doesn't eagerly read the response body.

        For more information, see https://www.github.com/openai/openai-python#with_streaming_response
        ) CompletionsWithStreamingResponser(   s    r*   with_streaming_responsez#Completions.with_streaming_response&   s     055r,   Nbest_ofechofrequency_penalty
logit_biaslogprobs
max_tokensnpresence_penaltyseedstopstreamstream_optionssuffixtemperaturetop_puserextra_headersextra_query
extra_bodytimeoutmodelpromptc                    yu3  
        Creates a completion for the provided prompt and parameters.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models) for descriptions of
              them.

          prompt: The prompt(s) to generate completions for, encoded as a string, array of
              strings, array of tokens, or array of token arrays.

              Note that <|endoftext|> is the document separator that the model sees during
              training, so if a prompt is not specified the model will generate as if from the
              beginning of a new document.

          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
              the highest log probability per token). Results cannot be streamed.

              When used with `n`, `best_of` controls the number of candidate completions and
              `n` specifies how many to return – `best_of` must be greater than `n`.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          echo: Echo back the prompt in addition to the completion

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
              tokenizer) to an associated bias value from -100 to 100. You can use this
              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
              Mathematically, the bias is added to the logits generated by the model prior to
              sampling. The exact effect will vary per model, but values between -1 and 1
              should decrease or increase likelihood of selection; values like -100 or 100
              should result in a ban or exclusive selection of the relevant token.

              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
              from being generated.

          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
              list of the 5 most likely tokens. The API will always return the `logprob` of
              the sampled token, so there may be up to `logprobs+1` elements in the response.

              The maximum value for `logprobs` is 5.

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
              completion.

              The token count of your prompt plus `max_tokens` cannot exceed the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many completions to generate for each prompt.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

          seed: If specified, our system will make a best effort to sample deterministically,
              such that repeated requests with the same `seed` and parameters should return
              the same result.

              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
              response parameter to monitor changes in the backend.

          stop: Not supported with latest reasoning models `o3` and `o4-mini`.

              Up to 4 sequences where the API will stop generating further tokens. The
              returned text will not contain the stop sequence.

          stream: Whether to stream back partial progress. If set, tokens will be sent as
              data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          stream_options: Options for streaming response. Only set this when you set `stream: true`.

          suffix: The suffix that comes after a completion of inserted text.

              This parameter is only supported for `gpt-3.5-turbo-instruct`.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        N r)   rF   rG   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   rB   rC   rD   rE   s                          r*   createzCompletions.create/       r 	r,   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r=   r>   r?   r@   rA   rB   rC   rD   rE   c                    yu3  
        Creates a completion for the provided prompt and parameters.

        Args:
          model: ID of the model to use. You can use the
              [List models](https://platform.openai.com/docs/api-reference/models/list) API to
              see all of your available models, or see our
              [Model overview](https://platform.openai.com/docs/models) for descriptions of
              them.

          prompt: The prompt(s) to generate completions for, encoded as a string, array of
              strings, array of tokens, or array of token arrays.

              Note that <|endoftext|> is the document separator that the model sees during
              training, so if a prompt is not specified the model will generate as if from the
              beginning of a new document.

          stream: Whether to stream back partial progress. If set, tokens will be sent as
              data-only
              [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format)
              as they become available, with the stream terminated by a `data: [DONE]`
              message.
              [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).

          best_of: Generates `best_of` completions server-side and returns the "best" (the one with
              the highest log probability per token). Results cannot be streamed.

              When used with `n`, `best_of` controls the number of candidate completions and
              `n` specifies how many to return – `best_of` must be greater than `n`.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          echo: Echo back the prompt in addition to the completion

          frequency_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on their
              existing frequency in the text so far, decreasing the model's likelihood to
              repeat the same line verbatim.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

          logit_bias: Modify the likelihood of specified tokens appearing in the completion.

              Accepts a JSON object that maps tokens (specified by their token ID in the GPT
              tokenizer) to an associated bias value from -100 to 100. You can use this
              [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
              Mathematically, the bias is added to the logits generated by the model prior to
              sampling. The exact effect will vary per model, but values between -1 and 1
              should decrease or increase likelihood of selection; values like -100 or 100
              should result in a ban or exclusive selection of the relevant token.

              As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
              from being generated.

          logprobs: Include the log probabilities on the `logprobs` most likely output tokens, as
              well the chosen tokens. For example, if `logprobs` is 5, the API will return a
              list of the 5 most likely tokens. The API will always return the `logprob` of
              the sampled token, so there may be up to `logprobs+1` elements in the response.

              The maximum value for `logprobs` is 5.

          max_tokens: The maximum number of [tokens](/tokenizer) that can be generated in the
              completion.

              The token count of your prompt plus `max_tokens` cannot exceed the model's
              context length.
              [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
              for counting tokens.

          n: How many completions to generate for each prompt.

              **Note:** Because this parameter generates many completions, it can quickly
              consume your token quota. Use carefully and ensure that you have reasonable
              settings for `max_tokens` and `stop`.

          presence_penalty: Number between -2.0 and 2.0. Positive values penalize new tokens based on
              whether they appear in the text so far, increasing the model's likelihood to
              talk about new topics.

              [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)

          seed: If specified, our system will make a best effort to sample deterministically,
              such that repeated requests with the same `seed` and parameters should return
              the same result.

              Determinism is not guaranteed, and you should refer to the `system_fingerprint`
              response parameter to monitor changes in the backend.

          stop: Not supported with latest reasoning models `o3` and `o4-mini`.

              Up to 4 sequences where the API will stop generating further tokens. The
              returned text will not contain the stop sequence.

          stream_options: Options for streaming response. Only set this when you set `stream: true`.

          suffix: The suffix that comes after a completion of inserted text.

              This parameter is only supported for `gpt-3.5-turbo-instruct`.

          temperature: What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
              make the output more random, while lower values like 0.2 will make it more
              focused and deterministic.

              We generally recommend altering this or `top_p` but not both.

          top_p: An alternative to sampling with temperature, called nucleus sampling, where the
              model considers the results of the tokens with top_p probability mass. So 0.1
              means only the tokens comprising the top 10% probability mass are considered.

              We generally recommend altering this or `temperature` but not both.

          user: A unique identifier representing your end-user, which can help OpenAI to monitor
              and detect abuse.
              [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).

          extra_headers: Send extra headers

          extra_query: Add additional query parameters to the request

          extra_body: Add additional JSON properties to the request

          timeout: Override the client-level default timeout for this request, in seconds
        NrJ   r)   rF   rG   r<   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r=   r>   r?   r@   rA   rB   rC   rD   rE   s                          r*   rL   zCompletions.create   rM   r,   c                    yrP   rJ   rQ   s                          r*   rL   zCompletions.createe  rM   r,   rF   rG   r<   c          
     2   | j                  dt        i d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|d|i|rt        j                  nt        j                        t        ||||      t        |xs dt        t                 S Nz/completionsrF   rG   r2   r3   r4   r5   r6   r7   r8   r9   r:   r;   r<   r=   r>   r?   r@   rA   )rB   rC   rD   rE   F)bodyoptionscast_tor<   
stream_cls)_postr   r   CompletionCreateParamsStreaming"CompletionCreateParamsNonStreamingr   r    r   rK   s                          r*   rL   zCompletions.create   sC   : zz Uf w D	
 (): !*  !*  '(8 D D f %n f  ";!" U#$ D%*  )HH-PP/2 )+Q[el ?Uj)A  !
 !	
r,   )returnr'   )r]   r/   .rF   KUnion[str, Literal['gpt-3.5-turbo-instruct', 'davinci-002', 'babbage-002']]rG   MUnion[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None]r2   Optional[int] | Omitr3   Optional[bool] | Omitr4   Optional[float] | Omitr5   Optional[Dict[str, int]] | Omitr6   ra   r7   ra   r8   ra   r9   rc   r:   ra   r;   6Union[Optional[str], SequenceNotStr[str], None] | Omitr<   zOptional[Literal[False]] | Omitr=   1Optional[ChatCompletionStreamOptionsParam] | Omitr>   Optional[str] | Omitr?   rc   r@   rc   rA   
str | OmitrB   Headers | NonerC   Query | NonerD   Body | NonerE   'float | httpx.Timeout | None | NotGivenr]   r    ).rF   r_   rG   r`   r<   Literal[True]r2   ra   r3   rb   r4   rc   r5   rd   r6   ra   r7   ra   r8   ra   r9   rc   r:   ra   r;   re   r=   rf   r>   rg   r?   rc   r@   rc   rA   rh   rB   ri   rC   rj   rD   rk   rE   rl   r]   zStream[Completion]).rF   r_   rG   r`   r<   boolr2   ra   r3   rb   r4   rc   r5   rd   r6   ra   r7   ra   r8   ra   r9   rc   r:   ra   r;   re   r=   rf   r>   rg   r?   rc   r@   rc   rA   rh   rB   ri   rC   rj   rD   rk   rE   rl   r]   Completion | Stream[Completion]).rF   r_   rG   r`   r2   ra   r3   rb   r4   rc   r5   rd   r6   ra   r7   ra   r8   ra   r9   rc   r:   ra   r;   re   r<   /Optional[Literal[False]] | Literal[True] | Omitr=   rf   r>   rg   r?   rc   r@   rc   rA   rh   rB   ri   rC   rj   rD   rk   rE   rl   r]   ro   __name__
__module____qualname__r   r+   r0   r	   r   r   rL   r   rJ   r,   r*   r"   r"      sw   0 0 6 6  )-&*486:)-+/"&37%)GK26LP'+.2(, )-$("&;D5X [X ^	X
 &X $X 2X 4X 'X )X  X 1X #X EX 0X  J!X" %#X$ ,%X& &'X( )X. &/X0 "1X2  3X4 95X6 
7X Xt  )-&*486:)-+/"&37%)GKLP'+.2(, )-$("&;D5X [X ^	X
 X &X $X 2X 4X 'X )X  X 1X #X EX  J!X" %#X$ ,%X& &'X( )X. &/X0 "1X2  3X4 95X6 
7X Xt  )-&*486:)-+/"&37%)GKLP'+.2(, )-$("&;D5X [X ^	X
 X &X $X 2X 4X 'X )X  X 1X #X EX  J!X" %#X$ ,%X& &'X( )X. &/X0 "1X2  3X4 95X6 
)7X Xt GX&(EF )-&*486:)-+/"&37%)GKBFLP'+.2(, )-$("&;D5=
 [=
 ^	=

 &=
 $=
 2=
 4=
 '=
 )=
  =
 1=
 #=
 E=
 @=
  J!=
" %#=
$ ,%=
& &'=
( )=
. &/=
0 "1=
2  3=
4 95=
6 
)7=
 G=
r,   c                     e Zd Zedd       Zedd       Zeeeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Z	eeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd	       Z	eeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd
       Z	 e
ddgg d      eeeeeeeeeeeeeeeeddded	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 	 dd       Z	y)r#   c                    t        |       S r&   )AsyncCompletionsWithRawResponser(   s    r*   r+   z"AsyncCompletions.with_raw_responseB  s     /t44r,   c                    t        |       S r.   )%AsyncCompletionsWithStreamingResponser(   s    r*   r0   z(AsyncCompletions.with_streaming_responseL  s     5T::r,   Nr1   rF   rG   c                  K   ywrI   rJ   rK   s                          r*   rL   zAsyncCompletions.createU       r 	   rN   c                  K   ywrP   rJ   rQ   s                          r*   rL   zAsyncCompletions.create  r{   r|   c                  K   ywrP   rJ   rQ   s                          r*   rL   zAsyncCompletions.create  r{   r|   rS   c          
     b  K   | j                  dt        i d|d|d|d|d|d|d|d	|d
|	d|
d|d|d|d|d|d|d|d|i|rt        j                  nt        j                         d {   t        ||||      t        |xs dt        t                  d {   S 7 57 wrU   )rZ   r   r   r[   r\   r   r    r   rK   s                          r*   rL   zAsyncCompletions.create&  s\    : ZZ,Uf w D	
 (): !*  !*  '(8 D D f %n f  ";!" U#$ D%*  )HH-PP/ 2 )+Q[el ?U":.A   !
 !
 !	
!
s$   A3B/5B+
60B/&B-'B/-B/)r]   rw   )r]   ry   r^   ).rF   r_   rG   r`   r<   rm   r2   ra   r3   rb   r4   rc   r5   rd   r6   ra   r7   ra   r8   ra   r9   rc   r:   ra   r;   re   r=   rf   r>   rg   r?   rc   r@   rc   rA   rh   rB   ri   rC   rj   rD   rk   rE   rl   r]   zAsyncStream[Completion]).rF   r_   rG   r`   r<   rn   r2   ra   r3   rb   r4   rc   r5   rd   r6   ra   r7   ra   r8   ra   r9   rc   r:   ra   r;   re   r=   rf   r>   rg   r?   rc   r@   rc   rA   rh   rB   ri   rC   rj   rD   rk   rE   rl   r]   $Completion | AsyncStream[Completion]).rF   r_   rG   r`   r2   ra   r3   rb   r4   rc   r5   rd   r6   ra   r7   ra   r8   ra   r9   rc   r:   ra   r;   re   r<   rp   r=   rf   r>   rg   r?   rc   r@   rc   rA   rh   rB   ri   rC   rj   rD   rk   rE   rl   r]   r   rq   rJ   r,   r*   r#   r#   A  sw   5 5 ; ;  )-&*486:)-+/"&37%)GK26LP'+.2(, )-$("&;D5X [X ^	X
 &X $X 2X 4X 'X )X  X 1X #X EX 0X  J!X" %#X$ ,%X& &'X( )X. &/X0 "1X2  3X4 95X6 
7X Xt  )-&*486:)-+/"&37%)GKLP'+.2(, )-$("&;D5X [X ^	X
 X &X $X 2X 4X 'X )X  X 1X #X EX  J!X" %#X$ ,%X& &'X( )X. &/X0 "1X2  3X4 95X6 
!7X Xt  )-&*486:)-+/"&37%)GKLP'+.2(, )-$("&;D5X [X ^	X
 X &X $X 2X 4X 'X )X  X 1X #X EX  J!X" %#X$ ,%X& &'X( )X. &/X0 "1X2  3X4 95X6 
.7X Xt GX&(EF )-&*486:)-+/"&37%)GKBFLP'+.2(, )-$("&;D5=
 [=
 ^	=

 &=
 $=
 2=
 4=
 '=
 )=
  =
 1=
 #=
 E=
 @=
  J!=
" %#=
$ ,%=
& &'=
( )=
. &/=
0 "1=
2  3=
4 95=
6 
.7=
 G=
r,   c                      e Zd ZddZy)r'   c                Z    || _         t        j                  |j                        | _        y N)_completionsr   to_raw_response_wrapperrL   r)   completionss     r*   __init__z#CompletionsWithRawResponse.__init__h  s%    '&>>
r,   Nr   r"   r]   Nonerr   rs   rt   r   rJ   r,   r*   r'   r'   g      
r,   r'   c                      e Zd ZddZy)rw   c                Z    || _         t        j                  |j                        | _        y r   )r   r   async_to_raw_response_wrapperrL   r   s     r*   r   z(AsyncCompletionsWithRawResponse.__init__q  s%    '&DD
r,   Nr   r#   r]   r   r   rJ   r,   r*   rw   rw   p  r   r,   rw   c                      e Zd ZddZy)r/   c                F    || _         t        |j                        | _        y r   )r   r   rL   r   s     r*   r   z)CompletionsWithStreamingResponse.__init__z  s    '2
r,   Nr   r   rJ   r,   r*   r/   r/   y  r   r,   r/   c                      e Zd ZddZy)ry   c                F    || _         t        |j                        | _        y r   )r   r   rL   r   s     r*   r   z.AsyncCompletionsWithStreamingResponse.__init__  s    '8
r,   Nr   r   rJ   r,   r*   ry   ry     r   r,   ry   )4
__future__r   typingr   r   r   r   typing_extensionsr   r	   httpx r   typesr   _typesr   r   r   r   r   r   r   r   _utilsr   r   r   _compatr   	_resourcer   r   	_responser   r   
_streamingr   r   _base_clientr   types.completionr    /types.chat.chat_completion_stream_options_paramr!   __all__r"   r#   r'   rw   r/   ry   rJ   r,   r*   <module>r      s    # 2 2 /   , Z Z Z J J % 9 X , * ^,
-c
/ c
Lc
' c
L
 

 

 

 
r,   