openai package - github.com/sashabaranov/go-openai - Go Packages (original) (raw)

client := openai.NewClient(os.Getenv("OPENAI_API_KEY")) resp, err := client.CreateChatCompletion( context.Background(), openai.ChatCompletionRequest{ Model: openai.GPT3Dot5Turbo, Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleUser, Content: "Hello!", }, }, }, ) if err != nil { fmt.Printf("ChatCompletion error: %v\n", err) return }

fmt.Println(resp.Choices[0].Message.Content)

client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

req := openai.ChatCompletionRequest{ Model: openai.GPT3Dot5Turbo, Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleSystem, Content: "you are a helpful chatbot", }, }, } fmt.Println("Conversation") fmt.Println("---------------------") fmt.Print("> ") s := bufio.NewScanner(os.Stdin) for s.Scan() { req.Messages = append(req.Messages, openai.ChatCompletionMessage{ Role: openai.ChatMessageRoleUser, Content: s.Text(), }) resp, err := client.CreateChatCompletion(context.Background(), req) if err != nil { fmt.Printf("ChatCompletion error: %v\n", err) continue } fmt.Printf("%s\n\n", resp.Choices[0].Message.Content) req.Messages = append(req.Messages, resp.Choices[0].Message) fmt.Print("> ") }

View Source

const ( ChatMessageRoleSystem = "system" ChatMessageRoleUser = "user" ChatMessageRoleAssistant = "assistant" ChatMessageRoleFunction = "function" ChatMessageRoleTool = "tool" ChatMessageRoleDeveloper = "developer" )

Chat message role defined by the OpenAI API.

View Source

const ( O1Mini = "o1-mini" O1Mini20240912 = "o1-mini-2024-09-12" O1Preview = "o1-preview" O1Preview20240912 = "o1-preview-2024-09-12" O1 = "o1" O120241217 = "o1-2024-12-17" O3 = "o3" O320250416 = "o3-2025-04-16" O3Mini = "o3-mini" O3Mini20250131 = "o3-mini-2025-01-31" O4Mini = "o4-mini" O4Mini20250416 = "o4-mini-2025-04-16" GPT432K0613 = "gpt-4-32k-0613" GPT432K0314 = "gpt-4-32k-0314" GPT432K = "gpt-4-32k" GPT40613 = "gpt-4-0613" GPT40314 = "gpt-4-0314" GPT4o = "gpt-4o" GPT4o20240513 = "gpt-4o-2024-05-13" GPT4o20240806 = "gpt-4o-2024-08-06" GPT4o20241120 = "gpt-4o-2024-11-20" GPT4oLatest = "chatgpt-4o-latest" GPT4oMini = "gpt-4o-mini" GPT4oMini20240718 = "gpt-4o-mini-2024-07-18" GPT4Turbo = "gpt-4-turbo" GPT4Turbo20240409 = "gpt-4-turbo-2024-04-09" GPT4Turbo0125 = "gpt-4-0125-preview" GPT4Turbo1106 = "gpt-4-1106-preview" GPT4TurboPreview = "gpt-4-turbo-preview" GPT4VisionPreview = "gpt-4-vision-preview" GPT4 = "gpt-4" GPT4Dot1 = "gpt-4.1" GPT4Dot120250414 = "gpt-4.1-2025-04-14" GPT4Dot1Mini = "gpt-4.1-mini" GPT4Dot1Mini20250414 = "gpt-4.1-mini-2025-04-14" GPT4Dot1Nano = "gpt-4.1-nano" GPT4Dot1Nano20250414 = "gpt-4.1-nano-2025-04-14" GPT4Dot5Preview = "gpt-4.5-preview" GPT4Dot5Preview20250227 = "gpt-4.5-preview-2025-02-27" GPT5 = "gpt-5" GPT5Mini = "gpt-5-mini" GPT5Nano = "gpt-5-nano" GPT5ChatLatest = "gpt-5-chat-latest" GPT3Dot5Turbo0125 = "gpt-3.5-turbo-0125" GPT3Dot5Turbo1106 = "gpt-3.5-turbo-1106" GPT3Dot5Turbo0613 = "gpt-3.5-turbo-0613" GPT3Dot5Turbo0301 = "gpt-3.5-turbo-0301" GPT3Dot5Turbo16K = "gpt-3.5-turbo-16k" GPT3Dot5Turbo16K0613 = "gpt-3.5-turbo-16k-0613" GPT3Dot5Turbo = "gpt-3.5-turbo" GPT3Dot5TurboInstruct = "gpt-3.5-turbo-instruct"

GPT3TextDavinci003 = "text-davinci-003"

GPT3TextDavinci002 = "text-davinci-002"

GPT3TextCurie001 = "text-curie-001"

GPT3TextBabbage001 = "text-babbage-001"

GPT3TextAda001 = "text-ada-001"

GPT3TextDavinci001 = "text-davinci-001"

GPT3DavinciInstructBeta = "davinci-instruct-beta"

GPT3Davinci    = "davinci"
GPT3Davinci002 = "davinci-002"

GPT3CurieInstructBeta = "curie-instruct-beta"
GPT3Curie             = "curie"
GPT3Curie002          = "curie-002"

GPT3Ada    = "ada"
GPT3Ada002 = "ada-002"

GPT3Babbage    = "babbage"
GPT3Babbage002 = "babbage-002"

)

GPT3 Defines the models provided by OpenAI to use when generating completions from OpenAI. GPT3 Models are designed for text-based tasks. For code-specific tasks, please refer to the Codex series of models.

View Source

const ( CodexCodeDavinci002 = "code-davinci-002" CodexCodeCushman001 = "code-cushman-001" CodexCodeDavinci001 = "code-davinci-001" )

Codex Defines the models provided by OpenAI. These models are designed for code-specific tasks, and use a different tokenizer which optimizes for whitespace.

View Source

const ( CreateImageSize256x256 = "256x256" CreateImageSize512x512 = "512x512" CreateImageSize1024x1024 = "1024x1024"

CreateImageSize1792x1024 = "1792x1024"
CreateImageSize1024x1792 = "1024x1792"


CreateImageSize1536x1024 = "1536x1024" 
CreateImageSize1024x1536 = "1024x1536" 

)

Image sizes defined by the OpenAI API.

View Source

const (

CreateImageResponseFormatB64JSON = "b64_json"
CreateImageResponseFormatURL     = "url"

)

View Source

const ( CreateImageModelDallE2 = "dall-e-2" CreateImageModelDallE3 = "dall-e-3" CreateImageModelGptImage1 = "gpt-image-1" )

View Source

const ( CreateImageQualityHD = "hd" CreateImageQualityStandard = "standard"

CreateImageQualityHigh   = "high"
CreateImageQualityMedium = "medium"
CreateImageQualityLow    = "low"

)

View Source

const (

CreateImageStyleVivid   = "vivid"
CreateImageStyleNatural = "natural"

)

View Source

const (

CreateImageBackgroundTransparent = "transparent"
CreateImageBackgroundOpaque      = "opaque"

)

View Source

const (

CreateImageOutputFormatPNG  = "png"
CreateImageOutputFormatJPEG = "jpeg"
CreateImageOutputFormatWEBP = "webp"

)

View Source

const ( ModerationOmniLatest = "omni-moderation-latest" ModerationOmni20240926 = "omni-moderation-2024-09-26" ModerationTextStable = "text-moderation-stable" ModerationTextLatest = "text-moderation-latest"

ModerationText001 = "text-moderation-001"

)

The default is text-moderation-latest which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use text-moderation-stable, we will provide advanced notice before updating the model. Accuracy of text-moderation-stable may be slightly lower than for text-moderation-latest.

View Source

const (

TruncationStrategyAuto = [TruncationStrategy](#TruncationStrategy)("auto")

TruncationStrategyLastMessages = [TruncationStrategy](#TruncationStrategy)("last_messages")

)

View Source

const ( AnthropicAPIVersion = "2023-06-01" )

Whisper Defines the models provided by OpenAI to use when processing audio with OpenAI.

View Source

var ( ErrChatCompletionInvalidModel = errors.New("this model is not supported with this method, please use CreateCompletion client method instead") ErrChatCompletionStreamNotSupported = errors.New("streaming is not supported with this method, please use CreateChatCompletionStream")
ErrContentFieldsMisused = errors.New("can't use both Content and MultiContent properties simultaneously") )

View Source

var (

ErrO1MaxTokensDeprecated                   = [errors](/errors).[New](/errors#New)("this model is not supported MaxTokens, please use MaxCompletionTokens")                               
ErrCompletionUnsupportedModel              = [errors](/errors).[New](/errors#New)("this model is not supported with this method, please use CreateChatCompletion client method instead") 
ErrCompletionStreamNotSupported            = [errors](/errors).[New](/errors#New)("streaming is not supported with this method, please use CreateCompletionStream")                      
ErrCompletionRequestPromptTypeNotSupported = [errors](/errors).[New](/errors#New)("the type of CompletionRequest.Prompt only supports string and []string")                              

)

View Source

var ( ErrO1BetaLimitationsMessageTypes = errors.New("this model has beta-limitations, user and assistant messages only, system messages are not supported")
ErrO1BetaLimitationsTools = errors.New("this model has beta-limitations, tools, function calling, and response format parameters are not supported")

ErrO1BetaLimitationsLogprobs = [errors](/errors).[New](/errors#New)("this model has beta-limitations, logprobs not supported")                                                                               
ErrO1BetaLimitationsOther    = [errors](/errors).[New](/errors#New)("this model has beta-limitations, temperature, top_p and n are fixed at 1, while presence_penalty and frequency_penalty are fixed at 0") 

)

View Source

var (

ErrReasoningModelMaxTokensDeprecated = [errors](/errors).[New](/errors#New)("this model is not supported MaxTokens, please use MaxCompletionTokens")
ErrReasoningModelLimitationsLogprobs = [errors](/errors).[New](/errors#New)("this model has beta-limitations, logprobs not supported")                                                                               
ErrReasoningModelLimitationsOther    = [errors](/errors).[New](/errors#New)("this model has beta-limitations, temperature, top_p and n are fixed at 1, while presence_penalty and frequency_penalty are fixed at 0") 

)

View Source

var ( ErrModerationInvalidModel = errors.New("this model is not supported with moderation, please use text-moderation-stable or text-moderation-latest instead") )

View Source

var ( ErrTooManyEmptyStreamMessages = errors.New("stream has sent too many empty messages") )

WrapReader wraps an io.Reader with filename and Content-type.

type APIError struct { Code any json:"code,omitempty" Message string json:"message" Param *string json:"param,omitempty" Type string json:"type" HTTPStatus string json:"-" HTTPStatusCode int json:"-" InnerError *InnerError json:"innererror,omitempty" }

APIError provides error information returned by the OpenAI API. InnerError struct is only valid for Azure OpenAI Service.

Open-AI maintains clear documentation on how to handle API errors.

see: https://platform.openai.com/docs/guides/error-codes/api-errors

var err error // Assume this is the error you are checking. e := &openai.APIError{} if errors.As(err, &e) { switch e.HTTPStatusCode { case 401: // invalid auth or key (do not retry) case 429: // rate limiting or engine overload (wait and retry) case 500: // openai server error (retry) default: // unhandled } }

func (e *APIError) UnmarshalJSON(data []byte) (err error)

const ( APITypeOpenAI APIType = "OPEN_AI" APITypeAzure APIType = "AZURE" APITypeAzureAD APIType = "AZURE_AD" APITypeCloudflareAzure APIType = "CLOUDFLARE_AZURE" APITypeAnthropic APIType = "ANTHROPIC" )

type Assistant struct { ID string json:"id" Object string json:"object" CreatedAt int64 json:"created_at" Name *string json:"name,omitempty" Description *string json:"description,omitempty" Model string json:"model" Instructions *string json:"instructions,omitempty" Tools []AssistantTool json:"tools" ToolResources *AssistantToolResource json:"tool_resources,omitempty" FileIDs []string json:"file_ids,omitempty" Metadata map[string]any json:"metadata,omitempty" Temperature *float32 json:"temperature,omitempty" TopP *float32 json:"top_p,omitempty" ResponseFormat any json:"response_format,omitempty"

}

func (h *Assistant) GetRateLimitHeaders() RateLimitHeaders

type AssistantDeleteResponse struct { ID string json:"id" Object string json:"object" Deleted bool json:"deleted"

}

func (h *AssistantDeleteResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *AssistantDeleteResponse) Header() http.Header

func (h *AssistantDeleteResponse) SetHeader(header http.Header)

type AssistantFile struct { ID string json:"id" Object string json:"object" CreatedAt int64 json:"created_at" AssistantID string json:"assistant_id"

}

func (h *AssistantFile) GetRateLimitHeaders() RateLimitHeaders

func (h *AssistantFile) SetHeader(header http.Header)

type AssistantFileRequest struct { FileID string json:"file_id" }

type AssistantFilesList struct { AssistantFiles []AssistantFile json:"data"

}

func (h *AssistantFilesList) GetRateLimitHeaders() RateLimitHeaders

func (h *AssistantFilesList) SetHeader(header http.Header)

type AssistantRequest struct { Model string json:"model" Name *string json:"name,omitempty" Description *string json:"description,omitempty" Instructions *string json:"instructions,omitempty" Tools []AssistantTool json:"-" FileIDs []string json:"file_ids,omitempty" Metadata map[string]any json:"metadata,omitempty" ToolResources *AssistantToolResource json:"tool_resources,omitempty" ResponseFormat any json:"response_format,omitempty" Temperature *float32 json:"temperature,omitempty" TopP *float32 json:"top_p,omitempty" }

AssistantRequest provides the assistant request parameters. When modifying the tools the API functions as the following: If Tools is undefined, no changes are made to the Assistant's tools. If Tools is empty slice it will effectively delete all of the Assistant's tools. If Tools is populated, it will replace all of the existing Assistant's tools with the provided tools.

MarshalJSON provides a custom marshaller for the assistant request to handle the API use cases If Tools is nil, the field is omitted from the JSON. If Tools is an empty slice, it's included in the JSON as an empty array ([]). If Tools is populated, it's included in the JSON with the elements.

type AssistantTool struct { Type AssistantToolType json:"type" Function *FunctionDefinition json:"function,omitempty" }

type AssistantToolCodeInterpreter struct { FileIDs []string json:"file_ids" }

type AssistantToolFileSearch struct { VectorStoreIDs []string json:"vector_store_ids" }

type AssistantToolResource struct { FileSearch *AssistantToolFileSearch json:"file_search,omitempty" CodeInterpreter *AssistantToolCodeInterpreter json:"code_interpreter,omitempty" }

const ( AssistantToolTypeCodeInterpreter AssistantToolType = "code_interpreter" AssistantToolTypeRetrieval AssistantToolType = "retrieval" AssistantToolTypeFunction AssistantToolType = "function" AssistantToolTypeFileSearch AssistantToolType = "file_search" )

type AssistantsList struct { Assistants []Assistant json:"data" LastID *string json:"last_id" FirstID *string json:"first_id" HasMore bool json:"has_more"

}

AssistantsList is a list of assistants.

func (h *AssistantsList) GetRateLimitHeaders() RateLimitHeaders

func (h *AssistantsList) SetHeader(header http.Header)

AudioRequest represents a request structure for audio API.

func (r AudioRequest) HasJSONResponse() bool

HasJSONResponse returns true if the response format is JSON.

type AudioResponse struct { Task string json:"task" Language string json:"language" Duration float64 json:"duration" Segments []struct { ID int json:"id" Seek int json:"seek" Start float64 json:"start" End float64 json:"end" Text string json:"text" Tokens []int json:"tokens" Temperature float64 json:"temperature" AvgLogprob float64 json:"avg_logprob" CompressionRatio float64 json:"compression_ratio" NoSpeechProb float64 json:"no_speech_prob" Transient bool json:"transient" } json:"segments" Words []struct { Word string json:"word" Start float64 json:"start" End float64 json:"end" } json:"words" Text string json:"text"

}

AudioResponse represents a response structure for audio API.

func (h *AudioResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *AudioResponse) SetHeader(header http.Header)

type AudioResponseFormat string

Response formats; Whisper uses AudioResponseFormatJSON by default.

const ( AudioResponseFormatJSON AudioResponseFormat = "json" AudioResponseFormatText AudioResponseFormat = "text" AudioResponseFormatSRT AudioResponseFormat = "srt" AudioResponseFormatVerboseJSON AudioResponseFormat = "verbose_json" AudioResponseFormatVTT AudioResponseFormat = "vtt" )

type Base64Embedding struct { Object string json:"object" Embedding base64String json:"embedding" Index int json:"index" }

Base64Embedding is a container for base64 encoded embeddings.

type Batch struct { ID string json:"id" Object string json:"object" Endpoint BatchEndpoint json:"endpoint" Errors *struct { Object string json:"object,omitempty" Data []struct { Code string json:"code,omitempty" Message string json:"message,omitempty" Param *string json:"param,omitempty" Line *int json:"line,omitempty" } json:"data" } json:"errors" InputFileID string json:"input_file_id" CompletionWindow string json:"completion_window" Status string json:"status" OutputFileID *string json:"output_file_id" ErrorFileID *string json:"error_file_id" CreatedAt int json:"created_at" InProgressAt *int json:"in_progress_at" ExpiresAt *int json:"expires_at" FinalizingAt *int json:"finalizing_at" CompletedAt *int json:"completed_at" FailedAt *int json:"failed_at" ExpiredAt *int json:"expired_at" CancellingAt *int json:"cancelling_at" CancelledAt *int json:"cancelled_at" RequestCounts BatchRequestCounts json:"request_counts" Metadata map[string]any json:"metadata" }

type BatchChatCompletionRequest struct { CustomID string json:"custom_id" Body ChatCompletionRequest json:"body" Method string json:"method" URL BatchEndpoint json:"url" }

func (r BatchChatCompletionRequest) MarshalBatchLineItem() []byte

type BatchCompletionRequest struct { CustomID string json:"custom_id" Body CompletionRequest json:"body" Method string json:"method" URL BatchEndpoint json:"url" }

func (r BatchCompletionRequest) MarshalBatchLineItem() []byte

type BatchEmbeddingRequest struct { CustomID string json:"custom_id" Body EmbeddingRequest json:"body" Method string json:"method" URL BatchEndpoint json:"url" }

func (r BatchEmbeddingRequest) MarshalBatchLineItem() []byte

const ( BatchEndpointChatCompletions BatchEndpoint = "/v1/chat/completions" BatchEndpointCompletions BatchEndpoint = "/v1/completions" BatchEndpointEmbeddings BatchEndpoint = "/v1/embeddings" )

type BatchLineItem interface { MarshalBatchLineItem() []byte }

type BatchRequestCounts struct { Total int json:"total" Completed int json:"completed" Failed int json:"failed" }

type BatchResponse struct { Batch

}

func (h *BatchResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *BatchResponse) SetHeader(header http.Header)

type ChatCompletionChoice struct { Index int json:"index" Message ChatCompletionMessage json:"message"

FinishReason         [FinishReason](#FinishReason)         `json:"finish_reason"`
LogProbs             *[LogProbs](#LogProbs)            `json:"logprobs,omitempty"`
ContentFilterResults [ContentFilterResults](#ContentFilterResults) `json:"content_filter_results,omitempty"`

}

type ChatCompletionMessage struct { Role string json:"role" Content string json:"content,omitempty" Refusal string json:"refusal,omitempty" MultiContent []ChatMessagePart

Name [string](/builtin#string) `json:"name,omitempty"`


ReasoningContent [string](/builtin#string) `json:"reasoning_content,omitempty"`

FunctionCall *[FunctionCall](#FunctionCall) `json:"function_call,omitempty"`


ToolCalls [][ToolCall](#ToolCall) `json:"tool_calls,omitempty"`


ToolCallID [string](/builtin#string) `json:"tool_call_id,omitempty"`

}

type ChatCompletionRequest struct { Model string json:"model" Messages []ChatCompletionMessage json:"messages"

MaxTokens [int](/builtin#int) `json:"max_tokens,omitempty"`


MaxCompletionTokens [int](/builtin#int)                           `json:"max_completion_tokens,omitempty"`
Temperature         [float32](/builtin#float32)                       `json:"temperature,omitempty"`
TopP                [float32](/builtin#float32)                       `json:"top_p,omitempty"`
N                   [int](/builtin#int)                           `json:"n,omitempty"`
Stream              [bool](/builtin#bool)                          `json:"stream,omitempty"`
Stop                [][string](/builtin#string)                      `json:"stop,omitempty"`
PresencePenalty     [float32](/builtin#float32)                       `json:"presence_penalty,omitempty"`
ResponseFormat      *[ChatCompletionResponseFormat](#ChatCompletionResponseFormat) `json:"response_format,omitempty"`
Seed                *[int](/builtin#int)                          `json:"seed,omitempty"`
FrequencyPenalty    [float32](/builtin#float32)                       `json:"frequency_penalty,omitempty"`


LogitBias map[[string](/builtin#string)][int](/builtin#int) `json:"logit_bias,omitempty"`


LogProbs [bool](/builtin#bool) `json:"logprobs,omitempty"`


TopLogProbs [int](/builtin#int)    `json:"top_logprobs,omitempty"`
User        [string](/builtin#string) `json:"user,omitempty"`

Functions [][FunctionDefinition](#FunctionDefinition) `json:"functions,omitempty"`

FunctionCall [any](/builtin#any)    `json:"function_call,omitempty"`
Tools        [][Tool](#Tool) `json:"tools,omitempty"`

ToolChoice [any](/builtin#any) `json:"tool_choice,omitempty"`

StreamOptions *[StreamOptions](#StreamOptions) `json:"stream_options,omitempty"`

ParallelToolCalls [any](/builtin#any) `json:"parallel_tool_calls,omitempty"`


Store [bool](/builtin#bool) `json:"store,omitempty"`

ReasoningEffort [string](/builtin#string) `json:"reasoning_effort,omitempty"`

Metadata map[[string](/builtin#string)][string](/builtin#string) `json:"metadata,omitempty"`

Prediction *[Prediction](#Prediction) `json:"prediction,omitempty"`


ChatTemplateKwargs map[[string](/builtin#string)][any](/builtin#any) `json:"chat_template_kwargs,omitempty"`

ServiceTier [ServiceTier](#ServiceTier) `json:"service_tier,omitempty"`


Verbosity [string](/builtin#string) `json:"verbosity,omitempty"`


SafetyIdentifier [string](/builtin#string) `json:"safety_identifier,omitempty"`

[ChatCompletionRequestExtensions](#ChatCompletionRequestExtensions)

}

ChatCompletionRequest represents a request structure for chat completion API.

type ChatCompletionRequestExtensions struct {

GuidedChoice [][string](/builtin#string) `json:"guided_choice,omitempty"`

}

ChatCompletionRequestExtensions contains third-party OpenAI API extensions (e.g., vendor-specific implementations like vLLM).

type ChatCompletionResponse struct { ID string json:"id" Object string json:"object" Created int64 json:"created" Model string json:"model" Choices []ChatCompletionChoice json:"choices" Usage Usage json:"usage" SystemFingerprint string json:"system_fingerprint" PromptFilterResults []PromptFilterResult json:"prompt_filter_results,omitempty" ServiceTier ServiceTier json:"service_tier,omitempty"

}

ChatCompletionResponse represents a response structure for chat completion API.

func (h *ChatCompletionResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *ChatCompletionResponse) Header() http.Header

func (h *ChatCompletionResponse) SetHeader(header http.Header)

type ChatCompletionResponseFormat struct { Type ChatCompletionResponseFormatType json:"type,omitempty" JSONSchema *ChatCompletionResponseFormatJSONSchema json:"json_schema,omitempty" }

type ChatCompletionResponseFormatJSONSchema struct { Name string json:"name" Description string json:"description,omitempty" Schema json.Marshaler json:"schema" Strict bool json:"strict" }

type ChatCompletionResponseFormatType string

const ( ChatCompletionResponseFormatTypeJSONObject ChatCompletionResponseFormatType = "json_object" ChatCompletionResponseFormatTypeJSONSchema ChatCompletionResponseFormatType = "json_schema" ChatCompletionResponseFormatTypeText ChatCompletionResponseFormatType = "text" )

type ChatCompletionStream struct {

}

ChatCompletionStream Note: Perhaps it is more elegant to abstract Stream using generics.

func (stream ChatCompletionStream) Close() error

func (stream ChatCompletionStream) Recv() (response T, err error)

func (stream ChatCompletionStream) RecvRaw() ([]byte, error)

type ChatCompletionStreamChoice struct { Index int json:"index" Delta ChatCompletionStreamChoiceDelta json:"delta" Logprobs *ChatCompletionStreamChoiceLogprobs json:"logprobs,omitempty" FinishReason FinishReason json:"finish_reason" ContentFilterResults ContentFilterResults json:"content_filter_results,omitempty" }

type ChatCompletionStreamChoiceDelta struct { Content string json:"content,omitempty" Role string json:"role,omitempty" FunctionCall *FunctionCall json:"function_call,omitempty" ToolCalls []ToolCall json:"tool_calls,omitempty" Refusal string json:"refusal,omitempty"

ReasoningContent [string](/builtin#string) `json:"reasoning_content,omitempty"`

}

type ChatCompletionStreamChoiceLogprobs struct { Content []ChatCompletionTokenLogprob json:"content,omitempty" Refusal []ChatCompletionTokenLogprob json:"refusal,omitempty" }

type ChatCompletionStreamResponse struct { ID string json:"id" Object string json:"object" Created int64 json:"created" Model string json:"model" Choices []ChatCompletionStreamChoice json:"choices" SystemFingerprint string json:"system_fingerprint" PromptAnnotations []PromptAnnotation json:"prompt_annotations,omitempty" PromptFilterResults []PromptFilterResult json:"prompt_filter_results,omitempty"

Usage *[Usage](#Usage) `json:"usage,omitempty"`

}

type ChatCompletionTokenLogprob struct { Token string json:"token" Bytes []int64 json:"bytes,omitempty" Logprob float64 json:"logprob,omitempty" TopLogprobs []ChatCompletionTokenLogprobTopLogprob json:"top_logprobs" }

type ChatCompletionTokenLogprobTopLogprob struct { Token string json:"token" Bytes []int64 json:"bytes" Logprob float64 json:"logprob" }

type ChatMessageImageURL struct { URL string json:"url,omitempty" Detail ImageURLDetail json:"detail,omitempty" }

type ChatMessagePart struct { Type ChatMessagePartType json:"type,omitempty" Text string json:"text,omitempty" ImageURL *ChatMessageImageURL json:"image_url,omitempty" }

type ChatMessagePartType string

const ( ChatMessagePartTypeText ChatMessagePartType = "text" ChatMessagePartTypeImageURL ChatMessagePartType = "image_url" )

type ChunkingStrategy struct { Type ChunkingStrategyType json:"type" Static *StaticChunkingStrategy json:"static,omitempty" }

type ChunkingStrategyType string

const ( ChunkingStrategyTypeAuto ChunkingStrategyType = "auto" ChunkingStrategyTypeStatic ChunkingStrategyType = "static" )

Client is OpenAI GPT-3 API client.

func NewClient(authToken string) *Client

NewClient creates new OpenAI API client.

func NewClientWithConfig(config ClientConfig) *Client

NewClientWithConfig creates new OpenAI API client for specified config.

func NewOrgClient(authToken, org string) *Client

NewOrgClient creates new OpenAI API client for specified Organization ID.

Deprecated: Please use NewClientWithConfig.

CancelBatch — API call to Cancel batch.

CancelFineTune cancel a fine-tune job. Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

CancelFineTuningJob cancel a fine tuning job.

CancelVectorStoreFileBatch cancel a new vector store file batch.

CreateAssistant creates a new assistant.

CreateAssistantFile creates a new assistant file.

CreateBatch — API call to Create batch.

CreateBatchWithUploadFile — API call to Create batch with upload file.

CreateChatCompletion — API call to Create a completion for the chat message.

CreateChatCompletionStream — API call to create a chat completion w/ streaming support. It sets whether to stream back partial progress. If set, tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message.

client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

stream, err := client.CreateChatCompletionStream( context.Background(), openai.ChatCompletionRequest{ Model: openai.GPT3Dot5Turbo, MaxTokens: 20, Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleUser, Content: "Lorem ipsum", }, }, Stream: true, }, ) if err != nil { fmt.Printf("ChatCompletionStream error: %v\n", err) return } defer stream.Close()

fmt.Print("Stream response: ") for { var response openai.ChatCompletionStreamResponse response, err = stream.Recv() if errors.Is(err, io.EOF) { fmt.Println("\nStream finished") return }

if err != nil {
    fmt.Printf("\nStream error: %v\n", err)
    return
}

fmt.Println(response.Choices[0].Delta.Content)

}

CreateCompletion — API call to create a completion. This is the main endpoint of the API. Returns new text as well as, if requested, the probabilities over each alternative token at each position.

If using a fine-tuned model, simply provide the model's ID in the CompletionRequest object, and the server will use the model's parameters to generate the completion.

client := openai.NewClient(os.Getenv("OPENAI_API_KEY")) resp, err := client.CreateCompletion( context.Background(), openai.CompletionRequest{ Model: openai.GPT3Babbage002, MaxTokens: 5, Prompt: "Lorem ipsum", }, ) if err != nil { fmt.Printf("Completion error: %v\n", err) return } fmt.Println(resp.Choices[0].Text)

CreateCompletionStream — API call to create a completion w/ streaming support. It sets whether to stream back partial progress. If set, tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message.

client := openai.NewClient(os.Getenv("OPENAI_API_KEY")) stream, err := client.CreateCompletionStream( context.Background(), openai.CompletionRequest{ Model: openai.GPT3Babbage002, MaxTokens: 5, Prompt: "Lorem ipsum", Stream: true, }, ) if err != nil { fmt.Printf("CompletionStream error: %v\n", err) return } defer stream.Close()

for { var response openai.CompletionResponse response, err = stream.Recv() if errors.Is(err, io.EOF) { fmt.Println("Stream finished") return }

if err != nil {
    fmt.Printf("Stream error: %v\n", err)
    return
}

fmt.Printf("Stream response: %#v\n", response)

}

CreateEditImage - API call to create an image. This is the main endpoint of the DALL-E API.

CreateEmbeddings returns an EmbeddingResponse which will contain an Embedding for every item in |body.Input|.https://beta.openai.com/docs/api-reference/embeddings/create

Body should be of type EmbeddingRequestStrings for embedding strings or EmbeddingRequestTokens for embedding groups of text already converted to tokens.

CreateFile uploads a jsonl file to GPT3 FilePath must be a local file path.

CreateFileBytes uploads bytes directly to OpenAI without requiring a local file.

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

CreateFineTuningJob create a fine tuning job.

CreateImage - API call to create an image. This is the main endpoint of the DALL-E API.

client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

respURL, err := client.CreateImage( context.Background(), openai.ImageRequest{ Prompt: "Parrot on a skateboard performs a trick, cartoon style, natural light, high detail", Size: openai.CreateImageSize256x256, ResponseFormat: openai.CreateImageResponseFormatURL, N: 1, }, ) if err != nil { fmt.Printf("Image creation error: %v\n", err) return } fmt.Println(respURL.Data[0].URL)

client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

resp, err := client.CreateImage( context.Background(), openai.ImageRequest{ Prompt: "Portrait of a humanoid parrot in a classic costume, high detail, realistic light, unreal engine", Size: openai.CreateImageSize512x512, ResponseFormat: openai.CreateImageResponseFormatB64JSON, N: 1, }, ) if err != nil { fmt.Printf("Image creation error: %v\n", err) return }

b, err := base64.StdEncoding.DecodeString(resp.Data[0].B64JSON) if err != nil { fmt.Printf("Base64 decode error: %v\n", err) return }

f, err := os.Create("example.png") if err != nil { fmt.Printf("File creation error: %v\n", err) return } defer f.Close()

_, err = f.Write(b) if err != nil { fmt.Printf("File write error: %v\n", err) return }

fmt.Println("The image was saved as example.png")

CreateMessage creates a new message.

CreateRun creates a new run.

CreateThread creates a new thread.

func (*Client) CreateThreadAndRun added in v1.17.5

CreateThreadAndRun submits tool outputs.

CreateTranscription — API call to create a transcription. Returns transcribed text.

client := openai.NewClient(os.Getenv("OPENAI_API_KEY")) resp, err := client.CreateTranscription( context.Background(), openai.AudioRequest{ Model: openai.Whisper1, FilePath: "recording.mp3", }, ) if err != nil { fmt.Printf("Transcription error: %v\n", err) return } fmt.Println(resp.Text)

client := openai.NewClient(os.Getenv("OPENAI_API_KEY"))

resp, err := client.CreateTranscription( context.Background(), openai.AudioRequest{ Model: openai.Whisper1, FilePath: os.Args[1], Format: openai.AudioResponseFormatSRT, }, ) if err != nil { fmt.Printf("Transcription error: %v\n", err) return } f, err := os.Create(os.Args[1] + ".srt") if err != nil { fmt.Printf("Could not open file: %v\n", err) return } defer f.Close() if _, err = f.WriteString(resp.Text); err != nil { fmt.Printf("Error writing to file: %v\n", err) return }

CreateTranslation — API call to translate audio into English.

client := openai.NewClient(os.Getenv("OPENAI_API_KEY")) resp, err := client.CreateTranslation( context.Background(), openai.AudioRequest{ Model: openai.Whisper1, FilePath: "recording.mp3", }, ) if err != nil { fmt.Printf("Translation error: %v\n", err) return } fmt.Println(resp.Text)

CreateVariImage - API call to create an image variation. This is the main endpoint of the DALL-E API. Use abbreviations(vari for variation) because ci-lint has a single-line length limit ...

CreateVectorStore creates a new vector store.

CreateVectorStoreFile creates a new vector store file.

CreateVectorStoreFileBatch creates a new vector store file batch.

DeleteAssistant deletes an assistant.

DeleteAssistantFile deletes an existing file.

DeleteFile deletes an existing file.

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

DeleteFineTuneModel Deletes a fine-tune model. You must have the Owner role in your organization to delete a model.

DeleteMessage deletes a message..

DeleteThread deletes a thread.

DeleteVectorStore deletes an vector store.

DeleteVectorStoreFile deletes an existing file.

Edits Perform an API call to the Edits endpoint.

Deprecated: Users of the Edits API and its associated models (e.g., text-davinci-edit-001 or code-davinci-edit-001)

will need to migrate to GPT-3.5 Turbo by January 4, 2024. You can use CreateChatCompletion or CreateChatCompletionStream instead.

GetEngine Retrieves an engine instance, providing basic information about the engine such as the owner and availability.

GetFile Retrieves a file instance, providing basic information about the file such as the file name and purpose.

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

GetModel Retrieves a model instance, providing basic information about the model such as the owner and permissioning.

ListAssistantFiles Lists the currently available files for an assistant.

ListAssistants Lists the currently available assistants.

ListBatch API call to List batch.

ListEngines Lists the currently available engines, and provides basic information about each option such as the owner and availability.

ListFiles Lists the currently available files, and provides basic information about each file such as the file name and purpose.

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

ListFineTuningJobs list fine tuning jobs events.

ListMessage fetches all messages in the thread.

ListMessageFiles fetches all files attached to a message.

ListModels Lists the currently available models, and provides basic information about each model such as the model id and parent.

ListRunSteps lists run steps.

ListVectorStoreFiles Lists the currently available files for a vector store.

ListVectorStoreFiles Lists the currently available files for a vector store.

ListVectorStores Lists the currently available vector store.

Moderations — perform a moderation api call over a string. Input can be an array or slice but a string will reduce the complexity.

ModifyAssistant modifies an assistant.

ModifyMessage modifies a message.

ModifyRun modifies a run.

ModifyThread modifies a thread.

ModifyVectorStore modifies a vector store.

RetrieveAssistant retrieves an assistant.

RetrieveAssistantFile retrieves an assistant file.

RetrieveBatch — API call to Retrieve batch.

RetrieveFineTuningJob retrieve a fine tuning job.

RetrieveMessage retrieves a Message.

RetrieveMessageFile fetches a message file.

RetrieveRun retrieves a run.

RetrieveRunStep retrieves a run step.

RetrieveThread retrieves a thread.

RetrieveVectorStore retrieves an vector store.

RetrieveVectorStoreFile retrieves a vector store file.

RetrieveVectorStoreFileBatch retrieves a vector store file batch.

SubmitToolOutputs submits tool outputs.

UploadBatchFile — upload batch file.

ClientConfig is a configuration of a client.

config := openai.DefaultConfig(os.Getenv("OPENAI_API_KEY")) port := os.Getenv("OPENAI_PROXY_PORT") proxyURL, err := url.Parse(fmt.Sprintf("http://localhost:%s", port)) if err != nil { panic(err) } transport := &http.Transport{ Proxy: http.ProxyURL(proxyURL), } config.HTTPClient = &http.Client{ Transport: transport, }

client := openai.NewClientWithConfig(config)

client.CreateChatCompletion( //nolint:errcheck // outside of the scope of this example. context.Background(), openai.ChatCompletionRequest{ // etc... }, )

func DefaultAnthropicConfig(apiKey, baseURL string) ClientConfig

func DefaultAzureConfig(apiKey, baseURL string) ClientConfig

azureKey := os.Getenv("AZURE_OPENAI_API_KEY") // Your azure API key azureEndpoint := os.Getenv("AZURE_OPENAI_ENDPOINT") // Your azure OpenAI endpoint config := openai.DefaultAzureConfig(azureKey, azureEndpoint) client := openai.NewClientWithConfig(config) resp, err := client.CreateChatCompletion( context.Background(), openai.ChatCompletionRequest{ Model: openai.GPT3Dot5Turbo, Messages: []openai.ChatCompletionMessage{ { Role: openai.ChatMessageRoleUser, Content: "Hello Azure OpenAI!", }, }, }, ) if err != nil { fmt.Printf("ChatCompletion error: %v\n", err) return }

fmt.Println(resp.Choices[0].Message.Content)

func DefaultConfig(authToken string) ClientConfig

type CodeInterpreterToolResources struct { FileIDs []string json:"file_ids,omitempty" }

type CodeInterpreterToolResourcesRequest struct { FileIDs []string json:"file_ids,omitempty" }

type CompletionChoice struct { Text string json:"text" Index int json:"index" FinishReason string json:"finish_reason" LogProbs LogprobResult json:"logprobs" }

CompletionChoice represents one of possible completions.

type CompletionRequest struct { Model string json:"model" Prompt any json:"prompt,omitempty" BestOf int json:"best_of,omitempty" Echo bool json:"echo,omitempty" FrequencyPenalty float32 json:"frequency_penalty,omitempty"

LogitBias map[[string](/builtin#string)][int](/builtin#int) `json:"logit_bias,omitempty"`


Store [bool](/builtin#bool) `json:"store,omitempty"`

Metadata        map[[string](/builtin#string)][string](/builtin#string) `json:"metadata,omitempty"`
LogProbs        [int](/builtin#int)               `json:"logprobs,omitempty"`
MaxTokens       [int](/builtin#int)               `json:"max_tokens,omitempty"`
N               [int](/builtin#int)               `json:"n,omitempty"`
PresencePenalty [float32](/builtin#float32)           `json:"presence_penalty,omitempty"`
Seed            *[int](/builtin#int)              `json:"seed,omitempty"`
Stop            [][string](/builtin#string)          `json:"stop,omitempty"`
Stream          [bool](/builtin#bool)              `json:"stream,omitempty"`
Suffix          [string](/builtin#string)            `json:"suffix,omitempty"`
Temperature     [float32](/builtin#float32)           `json:"temperature,omitempty"`
TopP            [float32](/builtin#float32)           `json:"top_p,omitempty"`
User            [string](/builtin#string)            `json:"user,omitempty"`

StreamOptions *[StreamOptions](#StreamOptions) `json:"stream_options,omitempty"`

}

CompletionRequest represents a request structure for completion API.

type CompletionResponse struct { ID string json:"id" Object string json:"object" Created int64 json:"created" Model string json:"model" Choices []CompletionChoice json:"choices" Usage *Usage json:"usage,omitempty"

}

CompletionResponse represents a response structure for completion API.

func (h *CompletionResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *CompletionResponse) SetHeader(header http.Header)

type CompletionStream struct {

}

func (stream CompletionStream) Close() error

func (stream CompletionStream) Recv() (response T, err error)

func (stream CompletionStream) RecvRaw() ([]byte, error)

type CompletionTokensDetails struct { AudioTokens int json:"audio_tokens" ReasoningTokens int json:"reasoning_tokens" AcceptedPredictionTokens int json:"accepted_prediction_tokens" RejectedPredictionTokens int json:"rejected_prediction_tokens" }

CompletionTokensDetails Breakdown of tokens used in a completion.

type ContentFilterResults added in v1.14.1

type ContentFilterResults struct { Hate Hate json:"hate,omitempty" SelfHarm SelfHarm json:"self_harm,omitempty" Sexual Sexual json:"sexual,omitempty" Violence Violence json:"violence,omitempty" JailBreak JailBreak json:"jailbreak,omitempty" Profanity Profanity json:"profanity,omitempty" }

type CreateBatchRequest struct { InputFileID string json:"input_file_id" Endpoint BatchEndpoint json:"endpoint" CompletionWindow string json:"completion_window" Metadata map[string]any json:"metadata" }

type CreateBatchWithUploadFileRequest struct { Endpoint BatchEndpoint json:"endpoint" CompletionWindow string json:"completion_window" Metadata map[string]any json:"metadata" UploadBatchFileRequest }

type CreateSpeechRequest struct { Model SpeechModel json:"model" Input string json:"input" Voice SpeechVoice json:"voice" Instructions string json:"instructions,omitempty"
ResponseFormat SpeechResponseFormat json:"response_format,omitempty" Speed float64 json:"speed,omitempty"
}

type EditsChoice struct { Text string json:"text" Index int json:"index" }

EditsChoice represents one of possible edits.

type EditsRequest struct { Model *string json:"model,omitempty" Input string json:"input,omitempty" Instruction string json:"instruction,omitempty" N int json:"n,omitempty" Temperature float32 json:"temperature,omitempty" TopP float32 json:"top_p,omitempty" }

EditsRequest represents a request structure for Edits API.

type EditsResponse struct { Object string json:"object" Created int64 json:"created" Usage Usage json:"usage" Choices []EditsChoice json:"choices"

}

EditsResponse represents a response structure for Edits API.

func (h *EditsResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *EditsResponse) SetHeader(header http.Header)

type Embedding struct { Object string json:"object" Embedding []float32 json:"embedding" Index int json:"index" }

Embedding is a special format of data representation that can be easily utilized by machine learning models and algorithms. The embedding is an information dense representation of the semantic meaning of a piece of text. Each embedding is a vector of floating point numbers, such that the distance between two embeddings in the vector space is correlated with semantic similarity between two inputs in the original format. For example, if two texts are similar, then their vector representations should also be similar.

DotProduct calculates the dot product of the embedding vector with another embedding vector. Both vectors must have the same length; otherwise, an ErrVectorLengthMismatch is returned. The method returns the calculated dot product as a float32 value.

type EmbeddingEncodingFormat string

EmbeddingEncodingFormat is the format of the embeddings data. Currently, only "float" and "base64" are supported, however, "base64" is not officially documented. If not specified OpenAI will use "float".

const ( EmbeddingEncodingFormatFloat EmbeddingEncodingFormat = "float" EmbeddingEncodingFormatBase64 EmbeddingEncodingFormat = "base64" )

EmbeddingModel enumerates the models which can be used to generate Embedding vectors.

const (

AdaSimilarity         [EmbeddingModel](#EmbeddingModel) = "text-similarity-ada-001"
BabbageSimilarity     [EmbeddingModel](#EmbeddingModel) = "text-similarity-babbage-001"
CurieSimilarity       [EmbeddingModel](#EmbeddingModel) = "text-similarity-curie-001"
DavinciSimilarity     [EmbeddingModel](#EmbeddingModel) = "text-similarity-davinci-001"
AdaSearchDocument     [EmbeddingModel](#EmbeddingModel) = "text-search-ada-doc-001"
AdaSearchQuery        [EmbeddingModel](#EmbeddingModel) = "text-search-ada-query-001"
BabbageSearchDocument [EmbeddingModel](#EmbeddingModel) = "text-search-babbage-doc-001"
BabbageSearchQuery    [EmbeddingModel](#EmbeddingModel) = "text-search-babbage-query-001"
CurieSearchDocument   [EmbeddingModel](#EmbeddingModel) = "text-search-curie-doc-001"
CurieSearchQuery      [EmbeddingModel](#EmbeddingModel) = "text-search-curie-query-001"
DavinciSearchDocument [EmbeddingModel](#EmbeddingModel) = "text-search-davinci-doc-001"
DavinciSearchQuery    [EmbeddingModel](#EmbeddingModel) = "text-search-davinci-query-001"
AdaCodeSearchCode     [EmbeddingModel](#EmbeddingModel) = "code-search-ada-code-001"
AdaCodeSearchText     [EmbeddingModel](#EmbeddingModel) = "code-search-ada-text-001"
BabbageCodeSearchCode [EmbeddingModel](#EmbeddingModel) = "code-search-babbage-code-001"
BabbageCodeSearchText [EmbeddingModel](#EmbeddingModel) = "code-search-babbage-text-001"

AdaEmbeddingV2  [EmbeddingModel](#EmbeddingModel) = "text-embedding-ada-002"
SmallEmbedding3 [EmbeddingModel](#EmbeddingModel) = "text-embedding-3-small"
LargeEmbedding3 [EmbeddingModel](#EmbeddingModel) = "text-embedding-3-large"

)

type EmbeddingRequest struct { Input any json:"input" Model EmbeddingModel json:"model" User string json:"user,omitempty" EncodingFormat EmbeddingEncodingFormat json:"encoding_format,omitempty"

Dimensions [int](/builtin#int) `json:"dimensions,omitempty"`


ExtraBody map[[string](/builtin#string)][any](/builtin#any) `json:"extra_body,omitempty"`

}

func (r EmbeddingRequest) Convert() EmbeddingRequest

type EmbeddingRequestConverter interface {

Convert() [EmbeddingRequest](#EmbeddingRequest)

}

type EmbeddingRequestStrings struct {

Input [][string](/builtin#string) `json:"input"`


Model [EmbeddingModel](#EmbeddingModel) `json:"model"`

User [string](/builtin#string) `json:"user"`


EncodingFormat [EmbeddingEncodingFormat](#EmbeddingEncodingFormat) `json:"encoding_format,omitempty"`


Dimensions [int](/builtin#int) `json:"dimensions,omitempty"`


ExtraBody map[[string](/builtin#string)][any](/builtin#any) `json:"extra_body,omitempty"`

}

EmbeddingRequestStrings is the input to a create embeddings request with a slice of strings.

func (r EmbeddingRequestStrings) Convert() EmbeddingRequest

type EmbeddingRequestTokens struct {

Input [][][int](/builtin#int) `json:"input"`


Model [EmbeddingModel](#EmbeddingModel) `json:"model"`

User [string](/builtin#string) `json:"user"`


EncodingFormat [EmbeddingEncodingFormat](#EmbeddingEncodingFormat) `json:"encoding_format,omitempty"`


Dimensions [int](/builtin#int) `json:"dimensions,omitempty"`


ExtraBody map[[string](/builtin#string)][any](/builtin#any) `json:"extra_body,omitempty"`

}

func (r EmbeddingRequestTokens) Convert() EmbeddingRequest

type EmbeddingResponse struct { Object string json:"object" Data []Embedding json:"data" Model EmbeddingModel json:"model" Usage Usage json:"usage"

}

EmbeddingResponse is the response from a Create embeddings request.

func (h *EmbeddingResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *EmbeddingResponse) SetHeader(header http.Header)

type EmbeddingResponseBase64 struct { Object string json:"object" Data []Base64Embedding json:"data" Model EmbeddingModel json:"model" Usage Usage json:"usage"

}

EmbeddingResponseBase64 is the response from a Create embeddings request with base64 encoding format.

func (h *EmbeddingResponseBase64) GetRateLimitHeaders() RateLimitHeaders

func (h *EmbeddingResponseBase64) Header() http.Header

func (h *EmbeddingResponseBase64) SetHeader(header http.Header)

ToEmbeddingResponse converts an embeddingResponseBase64 to an EmbeddingResponse.

type Engine struct { ID string json:"id" Object string json:"object" Owner string json:"owner" Ready bool json:"ready"

}

Engine struct represents engine from OpenAPI API.

func (h *Engine) GetRateLimitHeaders() RateLimitHeaders

type EnginesList struct { Engines []Engine json:"data"

}

EnginesList is a list of engines.

func (h *EnginesList) GetRateLimitHeaders() RateLimitHeaders

func (h *EnginesList) SetHeader(header http.Header)

type ErrorResponse struct { Error *APIError json:"error,omitempty" }

type File struct { Bytes int json:"bytes" CreatedAt int64 json:"created_at" ID string json:"id" FileName string json:"filename" Object string json:"object" Status string json:"status" Purpose string json:"purpose" StatusDetails string json:"status_details"

}

File struct represents an OpenAPI file.

func (h *File) GetRateLimitHeaders() RateLimitHeaders

type FileBytesRequest struct {

Name [string](/builtin#string)

Bytes [][byte](/builtin#byte)

Purpose [PurposeType](#PurposeType)

}

FileBytesRequest represents a file upload request.

type FileRequest struct { FileName string json:"file" FilePath string json:"-" Purpose string json:"purpose" }

type FileSearchToolResources struct { VectorStoreIDs []string json:"vector_store_ids,omitempty" }

type FileSearchToolResourcesRequest struct { VectorStoreIDs []string json:"vector_store_ids,omitempty" VectorStores []VectorStoreToolResources json:"vector_stores,omitempty" }

type FilesList struct { Files []File json:"data"

}

FilesList is a list of files that belong to the user or organization.

func (h *FilesList) GetRateLimitHeaders() RateLimitHeaders

type FineTune struct { ID string json:"id" Object string json:"object" Model string json:"model" CreatedAt int64 json:"created_at" FineTuneEventList []FineTuneEvent json:"events,omitempty" FineTunedModel string json:"fine_tuned_model" HyperParams FineTuneHyperParams json:"hyperparams" OrganizationID string json:"organization_id" ResultFiles []File json:"result_files" Status string json:"status" ValidationFiles []File json:"validation_files" TrainingFiles []File json:"training_files" UpdatedAt int64 json:"updated_at"

}

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

func (h *FineTune) GetRateLimitHeaders() RateLimitHeaders

type FineTuneDeleteResponse struct { ID string json:"id" Object string json:"object" Deleted bool json:"deleted"

}

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

func (h *FineTuneDeleteResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *FineTuneDeleteResponse) Header() http.Header

func (h *FineTuneDeleteResponse) SetHeader(header http.Header)

type FineTuneEvent struct { Object string json:"object" CreatedAt int64 json:"created_at" Level string json:"level" Message string json:"message" }

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

type FineTuneEventList struct { Object string json:"object" Data []FineTuneEvent json:"data"

}

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

func (h *FineTuneEventList) GetRateLimitHeaders() RateLimitHeaders

func (h *FineTuneEventList) SetHeader(header http.Header)

type FineTuneHyperParams struct { BatchSize int json:"batch_size" LearningRateMultiplier float64 json:"learning_rate_multiplier" Epochs int json:"n_epochs" PromptLossWeight float64 json:"prompt_loss_weight" }

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

type FineTuneList struct { Object string json:"object" Data []FineTune json:"data"

}

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

func (h *FineTuneList) GetRateLimitHeaders() RateLimitHeaders

func (h *FineTuneList) SetHeader(header http.Header)

type FineTuneModelDeleteResponse struct { ID string json:"id" Object string json:"object" Deleted bool json:"deleted"

}

FineTuneModelDeleteResponse represents the deletion status of a fine-tuned model.

func (h *FineTuneModelDeleteResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *FineTuneModelDeleteResponse) Header() http.Header

func (h *FineTuneModelDeleteResponse) SetHeader(header http.Header)

type FineTuneRequest struct { TrainingFile string json:"training_file" ValidationFile string json:"validation_file,omitempty" Model string json:"model,omitempty" Epochs int json:"n_epochs,omitempty" BatchSize int json:"batch_size,omitempty" LearningRateMultiplier float32 json:"learning_rate_multiplier,omitempty" PromptLossRate float32 json:"prompt_loss_rate,omitempty" ComputeClassificationMetrics bool json:"compute_classification_metrics,omitempty" ClassificationClasses int json:"classification_n_classes,omitempty" ClassificationPositiveClass string json:"classification_positive_class,omitempty" ClassificationBetas []float32 json:"classification_betas,omitempty" Suffix string json:"suffix,omitempty" }

Deprecated: On August 22nd, 2023, OpenAI announced the deprecation of the /v1/fine-tunes API. This API will be officially deprecated on January 4th, 2024. OpenAI recommends to migrate to the new fine tuning API implemented in fine_tuning_job.go.

type FineTuningJob struct { ID string json:"id" Object string json:"object" CreatedAt int64 json:"created_at" FinishedAt int64 json:"finished_at" Model string json:"model" FineTunedModel string json:"fine_tuned_model,omitempty" OrganizationID string json:"organization_id" Status string json:"status" Hyperparameters Hyperparameters json:"hyperparameters" TrainingFile string json:"training_file" ValidationFile string json:"validation_file,omitempty" ResultFiles []string json:"result_files" TrainedTokens int json:"trained_tokens"

}

func (h *FineTuningJob) GetRateLimitHeaders() RateLimitHeaders

func (h *FineTuningJob) SetHeader(header http.Header)

type FineTuningJobEvent struct { Object string json:"object" ID string json:"id" CreatedAt int json:"created_at" Level string json:"level" Message string json:"message" Data any json:"data" Type string json:"type" }

type FineTuningJobEventList struct { Object string json:"object" Data []FineTuneEvent json:"data" HasMore bool json:"has_more"

}

func (h *FineTuningJobEventList) GetRateLimitHeaders() RateLimitHeaders

func (h *FineTuningJobEventList) Header() http.Header

func (h *FineTuningJobEventList) SetHeader(header http.Header)

type FineTuningJobRequest struct { TrainingFile string json:"training_file" ValidationFile string json:"validation_file,omitempty" Model string json:"model,omitempty" Hyperparameters *Hyperparameters json:"hyperparameters,omitempty" Suffix string json:"suffix,omitempty" }

const ( FinishReasonStop FinishReason = "stop" FinishReasonLength FinishReason = "length" FinishReasonFunctionCall FinishReason = "function_call" FinishReasonToolCalls FinishReason = "tool_calls" FinishReasonContentFilter FinishReason = "content_filter" FinishReasonNull FinishReason = "null" )

type FunctionCall struct { Name string json:"name,omitempty"

Arguments [string](/builtin#string) `json:"arguments,omitempty"`

}

type FunctionDefine = FunctionDefinition

Deprecated: use FunctionDefinition instead.

type FunctionDefinition struct { Name string json:"name" Description string json:"description,omitempty" Strict bool json:"strict,omitempty"

Parameters [any](/builtin#any) `json:"parameters"`

}

type Hate struct { Filtered bool json:"filtered" Severity string json:"severity,omitempty" }

type Hyperparameters struct { Epochs any json:"n_epochs,omitempty" LearningRateMultiplier any json:"learning_rate_multiplier,omitempty" BatchSize any json:"batch_size,omitempty" }

type ImageEditRequest struct { Image io.Reader json:"image,omitempty" Mask io.Reader json:"mask,omitempty" Prompt string json:"prompt,omitempty" Model string json:"model,omitempty" N int json:"n,omitempty" Size string json:"size,omitempty" ResponseFormat string json:"response_format,omitempty" Quality string json:"quality,omitempty" User string json:"user,omitempty" }

ImageEditRequest represents the request structure for the image API. Use WrapReader to wrap an io.Reader with filename and Content-type.

type ImageFile struct { FileID string json:"file_id" }

type ImageRequest struct { Prompt string json:"prompt,omitempty" Model string json:"model,omitempty" N int json:"n,omitempty" Quality string json:"quality,omitempty" Size string json:"size,omitempty" Style string json:"style,omitempty" ResponseFormat string json:"response_format,omitempty" User string json:"user,omitempty" Background string json:"background,omitempty" Moderation string json:"moderation,omitempty" OutputCompression int json:"output_compression,omitempty" OutputFormat string json:"output_format,omitempty" }

ImageRequest represents the request structure for the image API.

type ImageResponse struct { Created int64 json:"created,omitempty" Data []ImageResponseDataInner json:"data,omitempty" Usage ImageResponseUsage json:"usage,omitempty"

}

ImageResponse represents a response structure for image API.

func (h *ImageResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *ImageResponse) SetHeader(header http.Header)

type ImageResponseDataInner struct { URL string json:"url,omitempty" B64JSON string json:"b64_json,omitempty" RevisedPrompt string json:"revised_prompt,omitempty" }

ImageResponseDataInner represents a response data structure for image API.

type ImageResponseInputTokensDetails struct { TextTokens int json:"text_tokens,omitempty" ImageTokens int json:"image_tokens,omitempty" }

ImageResponseInputTokensDetails represents the token breakdown for input tokens.

type ImageResponseUsage struct { TotalTokens int json:"total_tokens,omitempty" InputTokens int json:"input_tokens,omitempty" OutputTokens int json:"output_tokens,omitempty" InputTokensDetails ImageResponseInputTokensDetails json:"input_tokens_details,omitempty" }

ImageResponseUsage represents the token usage information for image API.

type ImageURL struct { URL string json:"url" Detail string json:"detail" }

const ( ImageURLDetailHigh ImageURLDetail = "high" ImageURLDetailLow ImageURLDetail = "low" ImageURLDetailAuto ImageURLDetail = "auto" )

type ImageVariRequest struct { Image io.Reader json:"image,omitempty" Model string json:"model,omitempty" N int json:"n,omitempty" Size string json:"size,omitempty" ResponseFormat string json:"response_format,omitempty" User string json:"user,omitempty" }

ImageVariRequest represents the request structure for the image API. Use WrapReader to wrap an io.Reader with filename and Content-type.

type InnerError struct { Code string json:"code,omitempty" ContentFilterResults ContentFilterResults json:"content_filter_result,omitempty" }

InnerError Azure Content filtering. Only valid for Azure OpenAI Service.

type JailBreak struct { Filtered bool json:"filtered" Detected bool json:"detected" }

type ListBatchResponse struct { Object string json:"object" Data []Batch json:"data" FirstID string json:"first_id" LastID string json:"last_id" HasMore bool json:"has_more"

}

func (h *ListBatchResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *ListBatchResponse) SetHeader(header http.Header)

type ListFineTuningJobEventsParameter func(*listFineTuningJobEventsParameters)

func ListFineTuningJobEventsWithAfter(after string) ListFineTuningJobEventsParameter

func ListFineTuningJobEventsWithLimit(limit int) ListFineTuningJobEventsParameter

type LogProb struct { Token string json:"token" LogProb float64 json:"logprob" Bytes []byte json:"bytes,omitempty"

TopLogProbs [][TopLogProbs](#TopLogProbs) `json:"top_logprobs"`

}

LogProb represents the probability information for a token.

type LogProbs struct {

Content [][LogProb](#LogProb) `json:"content"`

}

LogProbs is the top-level structure containing the log probability information.

type LogprobResult struct { Tokens []string json:"tokens" TokenLogprobs []float32 json:"token_logprobs" TopLogprobs []map[string]float32 json:"top_logprobs" TextOffset []int json:"text_offset" }

LogprobResult represents logprob result of Choice.

type Message struct { ID string json:"id" Object string json:"object" CreatedAt int json:"created_at" ThreadID string json:"thread_id" Role string json:"role" Content []MessageContent json:"content" FileIds []string json:"file_ids" AssistantID *string json:"assistant_id,omitempty" RunID *string json:"run_id,omitempty" Metadata map[string]any json:"metadata"

}

func (h *Message) GetRateLimitHeaders() RateLimitHeaders

type MessageContent added in v1.17.6

type MessageContent struct { Type string json:"type" Text *MessageText json:"text,omitempty" ImageFile *ImageFile json:"image_file,omitempty" ImageURL *ImageURL json:"image_url,omitempty" }

type MessageDeletionStatus struct { ID string json:"id" Object string json:"object" Deleted bool json:"deleted"

}

func (h *MessageDeletionStatus) GetRateLimitHeaders() RateLimitHeaders

func (h *MessageDeletionStatus) Header() http.Header

func (h *MessageDeletionStatus) SetHeader(header http.Header)

type MessageFile struct { ID string json:"id" Object string json:"object" CreatedAt int json:"created_at" MessageID string json:"message_id"

}

func (h *MessageFile) GetRateLimitHeaders() RateLimitHeaders

func (h *MessageFile) SetHeader(header http.Header)

type MessageFilesList struct { MessageFiles []MessageFile json:"data"

}

func (h *MessageFilesList) GetRateLimitHeaders() RateLimitHeaders

func (h *MessageFilesList) SetHeader(header http.Header)

type MessageRequest struct { Role string json:"role" Content string json:"content" FileIds []string json:"file_ids,omitempty" Metadata map[string]any json:"metadata,omitempty" Attachments []ThreadAttachment json:"attachments,omitempty" }

type MessageText struct { Value string json:"value" Annotations []any json:"annotations" }

type MessagesList struct { Messages []Message json:"data"

Object  [string](/builtin#string)  `json:"object"`
FirstID *[string](/builtin#string) `json:"first_id"`
LastID  *[string](/builtin#string) `json:"last_id"`
HasMore [bool](/builtin#bool)    `json:"has_more"`

}

func (h *MessagesList) GetRateLimitHeaders() RateLimitHeaders

func (h *MessagesList) SetHeader(header http.Header)

type Model struct { CreatedAt int64 json:"created" ID string json:"id" Object string json:"object" OwnedBy string json:"owned_by" Permission []Permission json:"permission" Root string json:"root" Parent string json:"parent"

}

Model struct represents an OpenAPI model.

func (h *Model) GetRateLimitHeaders() RateLimitHeaders

type ModelsList struct { Models []Model json:"data"

}

ModelsList is a list of models, including those that belong to the user or organization.

func (h *ModelsList) GetRateLimitHeaders() RateLimitHeaders

func (h *ModelsList) SetHeader(header http.Header)

type ModerationRequest struct { Input string json:"input,omitempty" Model string json:"model,omitempty" }

ModerationRequest represents a request structure for moderation API.

type ModerationResponse struct { ID string json:"id" Model string json:"model" Results []Result json:"results"

}

ModerationResponse represents a response structure for moderation API.

func (h *ModerationResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *ModerationResponse) SetHeader(header http.Header)

type ModifyThreadRequest struct { Metadata map[string]any json:"metadata" ToolResources *ToolResources json:"tool_resources,omitempty" }

type Pagination struct { }

type Permission struct { CreatedAt int64 json:"created" ID string json:"id" Object string json:"object" AllowCreateEngine bool json:"allow_create_engine" AllowSampling bool json:"allow_sampling" AllowLogprobs bool json:"allow_logprobs" AllowSearchIndices bool json:"allow_search_indices" AllowView bool json:"allow_view" AllowFineTuning bool json:"allow_fine_tuning" Organization string json:"organization" Group interface{} json:"group" IsBlocking bool json:"is_blocking" }

Permission struct represents an OpenAPI permission.

type Prediction struct { Content string json:"content" Type string json:"type" }

type Profanity struct { Filtered bool json:"filtered" Detected bool json:"detected" }

type PromptAnnotation struct { PromptIndex int json:"prompt_index,omitempty" ContentFilterResults ContentFilterResults json:"content_filter_results,omitempty" }

type PromptFilterResult struct { Index int json:"index" ContentFilterResults ContentFilterResults json:"content_filter_results,omitempty" }

type PromptTokensDetails struct { AudioTokens int json:"audio_tokens" CachedTokens int json:"cached_tokens" }

PromptTokensDetails Breakdown of tokens used in the prompt.

PurposeType represents the purpose of the file when uploading.

const ( PurposeFineTune PurposeType = "fine-tune" PurposeFineTuneResults PurposeType = "fine-tune-results" PurposeAssistants PurposeType = "assistants" PurposeAssistantsOutput PurposeType = "assistants_output" PurposeBatch PurposeType = "batch" )

type RateLimitHeaders struct { RemainingRequests int json:"x-ratelimit-remaining-requests" RemainingTokens int json:"x-ratelimit-remaining-tokens" }

RateLimitHeaders struct represents Openai rate limits headers.

func (h *RawResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *RawResponse) SetHeader(header http.Header)

type ReasoningValidator struct{}

ReasoningValidator handles validation for reasoning model requests.

func NewReasoningValidator() *ReasoningValidator

NewReasoningValidator creates a new validator for reasoning models.

Validate performs all validation checks for reasoning models.

RequestError provides information about generic request errors.

type RequiredActionType string

const ( RequiredActionTypeSubmitToolOutputs RequiredActionType = "submit_tool_outputs" )

type Response interface { }

type Result struct { Categories ResultCategories json:"categories" CategoryScores ResultCategoryScores json:"category_scores" Flagged bool json:"flagged" }

Result represents one of possible moderation results.

type ResultCategories struct { Hate bool json:"hate" HateThreatening bool json:"hate/threatening" Harassment bool json:"harassment" HarassmentThreatening bool json:"harassment/threatening" SelfHarm bool json:"self-harm" SelfHarmIntent bool json:"self-harm/intent" SelfHarmInstructions bool json:"self-harm/instructions" Sexual bool json:"sexual" SexualMinors bool json:"sexual/minors" Violence bool json:"violence" ViolenceGraphic bool json:"violence/graphic" }

ResultCategories represents Categories of Result.

type ResultCategoryScores struct { Hate float32 json:"hate" HateThreatening float32 json:"hate/threatening" Harassment float32 json:"harassment" HarassmentThreatening float32 json:"harassment/threatening" SelfHarm float32 json:"self-harm" SelfHarmIntent float32 json:"self-harm/intent" SelfHarmInstructions float32 json:"self-harm/instructions" Sexual float32 json:"sexual" SexualMinors float32 json:"sexual/minors" Violence float32 json:"violence" ViolenceGraphic float32 json:"violence/graphic" }

ResultCategoryScores represents CategoryScores of Result.

type Run struct { ID string json:"id" Object string json:"object" CreatedAt int64 json:"created_at" ThreadID string json:"thread_id" AssistantID string json:"assistant_id" Status RunStatus json:"status" RequiredAction *RunRequiredAction json:"required_action,omitempty" LastError *RunLastError json:"last_error,omitempty" ExpiresAt int64 json:"expires_at" StartedAt *int64 json:"started_at,omitempty" CancelledAt *int64 json:"cancelled_at,omitempty" FailedAt *int64 json:"failed_at,omitempty" CompletedAt *int64 json:"completed_at,omitempty" Model string json:"model" Instructions string json:"instructions,omitempty" Tools []Tool json:"tools" FileIDS []string json:"file_ids" Metadata map[string]any json:"metadata" Usage Usage json:"usage,omitempty"

Temperature *[float32](/builtin#float32) `json:"temperature,omitempty"`


MaxPromptTokens [int](/builtin#int) `json:"max_prompt_tokens,omitempty"`


MaxCompletionTokens [int](/builtin#int) `json:"max_completion_tokens,omitempty"`

TruncationStrategy *[ThreadTruncationStrategy](#ThreadTruncationStrategy) `json:"truncation_strategy,omitempty"`

}

func (h *Run) GetRateLimitHeaders() RateLimitHeaders

const ( RunErrorServerError RunError = "server_error" RunErrorRateLimitExceeded RunError = "rate_limit_exceeded" )

type RunLastError struct { Code RunError json:"code" Message string json:"message" }

type RunList struct { Runs []Run json:"data"

}

RunList is a list of runs.

func (h *RunList) GetRateLimitHeaders() RateLimitHeaders

type RunModifyRequest struct { Metadata map[string]any json:"metadata,omitempty" }

type RunRequest struct { AssistantID string json:"assistant_id" Model string json:"model,omitempty" Instructions string json:"instructions,omitempty" AdditionalInstructions string json:"additional_instructions,omitempty" AdditionalMessages []ThreadMessage json:"additional_messages,omitempty" Tools []Tool json:"tools,omitempty" Metadata map[string]any json:"metadata,omitempty"

Temperature *[float32](/builtin#float32) `json:"temperature,omitempty"`
TopP        *[float32](/builtin#float32) `json:"top_p,omitempty"`


MaxPromptTokens [int](/builtin#int) `json:"max_prompt_tokens,omitempty"`


MaxCompletionTokens [int](/builtin#int) `json:"max_completion_tokens,omitempty"`


TruncationStrategy *[ThreadTruncationStrategy](#ThreadTruncationStrategy) `json:"truncation_strategy,omitempty"`


ToolChoice [any](/builtin#any) `json:"tool_choice,omitempty"`

ResponseFormat [any](/builtin#any) `json:"response_format,omitempty"`

ParallelToolCalls [any](/builtin#any) `json:"parallel_tool_calls,omitempty"`

}

type RunRequiredAction struct { Type RequiredActionType json:"type" SubmitToolOutputs *SubmitToolOutputs json:"submit_tool_outputs,omitempty" }

const ( RunStatusQueued RunStatus = "queued" RunStatusInProgress RunStatus = "in_progress" RunStatusRequiresAction RunStatus = "requires_action" RunStatusCancelling RunStatus = "cancelling" RunStatusFailed RunStatus = "failed" RunStatusCompleted RunStatus = "completed" RunStatusIncomplete RunStatus = "incomplete" RunStatusExpired RunStatus = "expired" RunStatusCancelled RunStatus = "cancelled" )

type RunStep struct { ID string json:"id" Object string json:"object" CreatedAt int64 json:"created_at" AssistantID string json:"assistant_id" ThreadID string json:"thread_id" RunID string json:"run_id" Type RunStepType json:"type" Status RunStepStatus json:"status" StepDetails StepDetails json:"step_details" LastError *RunLastError json:"last_error,omitempty" ExpiredAt *int64 json:"expired_at,omitempty" CancelledAt *int64 json:"cancelled_at,omitempty" FailedAt *int64 json:"failed_at,omitempty" CompletedAt *int64 json:"completed_at,omitempty" Metadata map[string]any json:"metadata"

}

func (h *RunStep) GetRateLimitHeaders() RateLimitHeaders

type RunStepList struct { RunSteps []RunStep json:"data"

FirstID [string](/builtin#string) `json:"first_id"`
LastID  [string](/builtin#string) `json:"last_id"`
HasMore [bool](/builtin#bool)   `json:"has_more"`

}

RunStepList is a list of steps.

func (h *RunStepList) GetRateLimitHeaders() RateLimitHeaders

func (h *RunStepList) SetHeader(header http.Header)

const ( RunStepStatusInProgress RunStepStatus = "in_progress" RunStepStatusCancelling RunStepStatus = "cancelled" RunStepStatusFailed RunStepStatus = "failed" RunStepStatusCompleted RunStepStatus = "completed" RunStepStatusExpired RunStepStatus = "expired" )

const ( RunStepTypeMessageCreation RunStepType = "message_creation" RunStepTypeToolCalls RunStepType = "tool_calls" )

type SelfHarm struct { Filtered bool json:"filtered" Severity string json:"severity,omitempty" }

const ( ServiceTierAuto ServiceTier = "auto" ServiceTierDefault ServiceTier = "default" ServiceTierFlex ServiceTier = "flex" ServiceTierPriority ServiceTier = "priority" )

type Sexual struct { Filtered bool json:"filtered" Severity string json:"severity,omitempty" }

const ( TTSModel1 SpeechModel = "tts-1" TTSModel1HD SpeechModel = "tts-1-hd" TTSModelCanary SpeechModel = "canary-tts" TTSModelGPT4oMini SpeechModel = "gpt-4o-mini-tts" )

type SpeechResponseFormat string

const ( SpeechResponseFormatMp3 SpeechResponseFormat = "mp3" SpeechResponseFormatOpus SpeechResponseFormat = "opus" SpeechResponseFormatAac SpeechResponseFormat = "aac" SpeechResponseFormatFlac SpeechResponseFormat = "flac" SpeechResponseFormatWav SpeechResponseFormat = "wav" SpeechResponseFormatPcm SpeechResponseFormat = "pcm" )

const ( VoiceAlloy SpeechVoice = "alloy" VoiceAsh SpeechVoice = "ash" VoiceBallad SpeechVoice = "ballad" VoiceCoral SpeechVoice = "coral" VoiceEcho SpeechVoice = "echo" VoiceFable SpeechVoice = "fable" VoiceOnyx SpeechVoice = "onyx" VoiceNova SpeechVoice = "nova" VoiceShimmer SpeechVoice = "shimmer" VoiceVerse SpeechVoice = "verse" )

type StaticChunkingStrategy struct { MaxChunkSizeTokens int json:"max_chunk_size_tokens" ChunkOverlapTokens int json:"chunk_overlap_tokens" }

type StepDetails struct { Type RunStepType json:"type" MessageCreation *StepDetailsMessageCreation json:"message_creation,omitempty" ToolCalls []ToolCall json:"tool_calls,omitempty" }

type StepDetailsMessageCreation struct { MessageID string json:"message_id" }

type StreamOptions struct {

IncludeUsage [bool](/builtin#bool) `json:"include_usage,omitempty"`

}

type SubmitToolOutputs struct { ToolCalls []ToolCall json:"tool_calls" }

type SubmitToolOutputsRequest struct { ToolOutputs []ToolOutput json:"tool_outputs" }

type Thread struct { ID string json:"id" Object string json:"object" CreatedAt int64 json:"created_at" Metadata map[string]any json:"metadata" ToolResources ToolResources json:"tool_resources,omitempty"

}

func (h *Thread) GetRateLimitHeaders() RateLimitHeaders

type ThreadAttachment struct { FileID string json:"file_id" Tools []ThreadAttachmentTool json:"tools" }

type ThreadAttachmentTool struct { Type string json:"type" }

type ThreadDeleteResponse struct { ID string json:"id" Object string json:"object" Deleted bool json:"deleted"

}

func (h *ThreadDeleteResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *ThreadDeleteResponse) Header() http.Header

func (h *ThreadDeleteResponse) SetHeader(header http.Header)

type ThreadMessage struct { Role ThreadMessageRole json:"role" Content string json:"content" FileIDs []string json:"file_ids,omitempty" Attachments []ThreadAttachment json:"attachments,omitempty" Metadata map[string]any json:"metadata,omitempty" }

const ( ThreadMessageRoleAssistant ThreadMessageRole = "assistant" ThreadMessageRoleUser ThreadMessageRole = "user" )

type ThreadRequest struct { Messages []ThreadMessage json:"messages,omitempty" Metadata map[string]any json:"metadata,omitempty" ToolResources *ToolResourcesRequest json:"tool_resources,omitempty" }

type Tool struct { Type ToolType json:"type" Function *FunctionDefinition json:"function,omitempty" }

type ToolCall struct {

Index    *[int](/builtin#int)         `json:"index,omitempty"`
ID       [string](/builtin#string)       `json:"id,omitempty"`
Type     [ToolType](#ToolType)     `json:"type"`
Function [FunctionCall](#FunctionCall) `json:"function"`

}

type ToolChoice struct { Type ToolType json:"type" Function ToolFunction json:"function,omitempty" }

type ToolFunction struct { Name string json:"name" }

type ToolOutput struct { ToolCallID string json:"tool_call_id" Output any json:"output" }

type ToolResources struct { CodeInterpreter *CodeInterpreterToolResources json:"code_interpreter,omitempty" FileSearch *FileSearchToolResources json:"file_search,omitempty" }

type ToolResourcesRequest struct { CodeInterpreter *CodeInterpreterToolResourcesRequest json:"code_interpreter,omitempty" FileSearch *FileSearchToolResourcesRequest json:"file_search,omitempty" }

const ( ToolTypeFunction ToolType = "function" )

type TopLogProbs struct { Token string json:"token" LogProb float64 json:"logprob" Bytes []byte json:"bytes,omitempty" }

type TranscriptionTimestampGranularity string

const ( TranscriptionTimestampGranularityWord TranscriptionTimestampGranularity = "word" TranscriptionTimestampGranularitySegment TranscriptionTimestampGranularity = "segment" )

type TruncationStrategy string

TruncationStrategy defines the existing truncation strategies existing for thread management in an assistant.

type UploadBatchFileRequest struct { FileName string Lines []BatchLineItem }

func (r *UploadBatchFileRequest) AddChatCompletion(customerID string, body ChatCompletionRequest)

func (r *UploadBatchFileRequest) AddCompletion(customerID string, body CompletionRequest)

func (r *UploadBatchFileRequest) AddEmbedding(customerID string, body EmbeddingRequest)

func (r *UploadBatchFileRequest) MarshalJSONL() []byte

type Usage struct { PromptTokens int json:"prompt_tokens" CompletionTokens int json:"completion_tokens" TotalTokens int json:"total_tokens" PromptTokensDetails *PromptTokensDetails json:"prompt_tokens_details" CompletionTokensDetails *CompletionTokensDetails json:"completion_tokens_details" }

Usage Represents the total token usage per request to OpenAI.

type VectorStore struct { ID string json:"id" Object string json:"object" CreatedAt int64 json:"created_at" Name string json:"name" UsageBytes int json:"usage_bytes" FileCounts VectorStoreFileCount json:"file_counts" Status string json:"status" ExpiresAfter *VectorStoreExpires json:"expires_after" ExpiresAt *int json:"expires_at" Metadata map[string]any json:"metadata"

}

func (h *VectorStore) GetRateLimitHeaders() RateLimitHeaders

func (h *VectorStore) SetHeader(header http.Header)

type VectorStoreDeleteResponse struct { ID string json:"id" Object string json:"object" Deleted bool json:"deleted"

}

func (h *VectorStoreDeleteResponse) GetRateLimitHeaders() RateLimitHeaders

func (h *VectorStoreDeleteResponse) Header() http.Header

func (h *VectorStoreDeleteResponse) SetHeader(header http.Header)

type VectorStoreExpires struct { Anchor string json:"anchor" Days int json:"days" }

type VectorStoreFile struct { ID string json:"id" Object string json:"object" CreatedAt int64 json:"created_at" VectorStoreID string json:"vector_store_id" UsageBytes int json:"usage_bytes" Status string json:"status"

}

func (h *VectorStoreFile) GetRateLimitHeaders() RateLimitHeaders

func (h *VectorStoreFile) SetHeader(header http.Header)

type VectorStoreFileBatch struct { ID string json:"id" Object string json:"object" CreatedAt int64 json:"created_at" VectorStoreID string json:"vector_store_id" Status string json:"status" FileCounts VectorStoreFileCount json:"file_counts"

}

func (h *VectorStoreFileBatch) GetRateLimitHeaders() RateLimitHeaders

func (h *VectorStoreFileBatch) Header() http.Header

func (h *VectorStoreFileBatch) SetHeader(header http.Header)

type VectorStoreFileBatchRequest struct { FileIDs []string json:"file_ids" }

type VectorStoreFileCount struct { InProgress int json:"in_progress" Completed int json:"completed" Failed int json:"failed" Cancelled int json:"cancelled" Total int json:"total" }

type VectorStoreFileRequest struct { FileID string json:"file_id" }

type VectorStoreFilesList struct { VectorStoreFiles []VectorStoreFile json:"data" FirstID *string json:"first_id" LastID *string json:"last_id" HasMore bool json:"has_more"

}

func (h *VectorStoreFilesList) GetRateLimitHeaders() RateLimitHeaders

func (h *VectorStoreFilesList) Header() http.Header

func (h *VectorStoreFilesList) SetHeader(header http.Header)

type VectorStoreRequest struct { Name string json:"name,omitempty" FileIDs []string json:"file_ids,omitempty" ExpiresAfter *VectorStoreExpires json:"expires_after,omitempty" Metadata map[string]any json:"metadata,omitempty" }

VectorStoreRequest provides the vector store request parameters.

type VectorStoreToolResources struct { FileIDs []string json:"file_ids,omitempty" ChunkingStrategy *ChunkingStrategy json:"chunking_strategy,omitempty" Metadata map[string]any json:"metadata,omitempty" }

type VectorStoresList struct { VectorStores []VectorStore json:"data" LastID *string json:"last_id" FirstID *string json:"first_id" HasMore bool json:"has_more"

}

VectorStoresList is a list of vector store.

func (h *VectorStoresList) GetRateLimitHeaders() RateLimitHeaders

func (h *VectorStoresList) SetHeader(header http.Header)

type Violence struct { Filtered bool json:"filtered" Severity string json:"severity,omitempty" }