Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save jonschlinkert/8d5fbe437175497471f73dec6e811b88 to your computer and use it in GitHub Desktop.
Save jonschlinkert/8d5fbe437175497471f73dec6e811b88 to your computer and use it in GitHub Desktop.
[
{
"name": "openai_chat_completion",
"title": "Create Chat Completion Schema",
"description": "Schema to validate requests for OpenAI's createChatCompletion endpoint",
"type": "object",
"properties": {
"frequency_penalty": {
"type": ["number", "null"],
"title": "frequency_penalty",
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.",
"minimum": -2,
"maximum": 2,
"default": 0,
"required": false
},
"logit_bias": {
"type": ["object", "null"],
"title": "logit_bias",
"description": "Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.",
"required": false
},
"logprobs": {
"type": ["boolean", "null"],
"title": "logprobs",
"description": "Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.",
"required": false
},
"max_tokens": {
"type": ["integer", "null"],
"title": "max_tokens",
"description": "The maximum number of tokens that can be generated in the chat completion, including both input and output tokens.",
"default": 2500,
"required": false
},
"model": {
"type": "string",
"title": "model",
"description": "ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API.",
"required": true,
"oneOf": [
{ "const": "gpt-3.5-turbo", "x-max-tokens": 16000 },
{ "const": "gpt-3.5-turbo-instruct", "x-max-tokens": 4000 },
{ "const": "gpt-4o", "x-max-tokens": 128000 },
{ "const": "gpt-4-turbo", "x-max-tokens": 128000 }
]
},
"messages": {
"type": "array",
"title": "Messages",
"description": "A list of messages comprising the conversation so far.",
"items": {
"type": "object",
"properties": {
"role": {
"type": "string",
"title": "Role",
"description": "The role of the message's author.",
"enum": ["system", "user", "assistant", "tool"]
},
"content": {
"type": ["string", "null", "array"],
"title": "Content",
"description": "The contents of the message. `content` is required for all messages, and may be null or an array for specific message types."
},
"name": {
"type": "string",
"title": "Name",
"description": "An optional name for the participant. Provides the model information to differentiate between participants of the same role.",
"maxLength": 64,
"required": false
},
"tool_calls": {
"type": "array",
"title": "Tool Calls",
"description": "The tool calls generated by the model, such as function calls.",
"required": false
},
"tool_call_id": {
"type": "string",
"title": "Tool Call ID",
"description": "Tool call that this message is responding to.",
"required": false
}
},
"required": ["role", "content"],
"allOf": [
{
"if": { "properties": { "role": { "const": "system" } } },
"then": {
"properties": {
"role": { "enum": ["system"] },
"name": { "type": "string", "required": false },
"content": { "type": "string", "required": true }
},
"required": ["role", "content"]
}
},
{
"if": { "properties": { "role": { "const": "user" } } },
"then": {
"properties": {
"role": { "enum": ["user"] },
"name": { "type": "string", "required": false },
"content": {
"type": ["string", "array"],
"items": {
"oneOf": [
{
"type": "object",
"properties": {
"type": { "type": "string", "enum": ["text"], "required": true },
"text": { "type": "string", "required": true }
},
"required": ["type", "text"]
},
{
"type": "object",
"properties": {
"type": { "type": "string", "enum": ["image_url"], "required": true },
"image_url": {
"type": "object",
"properties": {
"url": { "type": "string", "required": true },
"detail": { "type": "string", "required": false }
},
"required": ["url"]
}
},
"required": ["type", "image_url"]
}
]
},
"required": true
}
},
"required": ["role", "content"]
}
},
{
"if": { "properties": { "role": { "const": "assistant" } } },
"then": {
"properties": {
"role": { "enum": ["assistant"] },
"name": { "type": "string", "required": false },
"content": { "type": ["string", "null"], "required": false },
"tool_calls": { "type": "array", "required": false }
},
"required": ["role"]
}
},
{
"if": { "properties": { "role": { "const": "tool" } } },
"then": {
"properties": {
"role": { "enum": ["tool"] },
"name": { "type": "string", "required": false },
"content": { "type": "string", "required": true },
"tool_call_id": { "type": "string", "required": true }
},
"required": ["role", "content", "tool_call_id"]
}
}
]
},
"required": true
},
"n": {
"type": ["integer", "null"],
"title": "n",
"description": "How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.",
"default": 1,
"required": false
},
"parallel_tool_calls": {
"type": "boolean",
"description": "Enable parallel tool calling. Defaults to true.",
"default": true
},
"presence_penalty": {
"type": ["number", "null"],
"title": "presence_penalty",
"description": "Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.",
"minimum": -2,
"maximum": 2,
"default": 0,
"required": false
},
"response_format": {
"type": "string",
"title": "response_format",
"description": "The format of the response. If `json`, the response will be a JSON object with a `choices` field containing the completions. If `text`, the response will be a JSON object with a `text` field containing the completions.",
"enum": ["json", "text"],
"default": "json",
"required": false
},
"stop": {
"type": ["string", "array"],
"description": "Up to 4 sequences where the API will stop generating further tokens.",
"default": null
},
"stream": { "type": "boolean", "description": "If set, partial message deltas will be sent.", "default": false },
"temperature": {
"type": ["number", "null"],
"title": "temperature",
"description": "What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.",
"minimum": 0,
"maximum": 2,
"default": 1,
"required": false
},
"tool_choice": {
"oneOf": [
{
"type": "string",
"description": "Controls whether the model should choose to call a tool, and if so, which one.",
"enum": ["required", "none", "auto"],
"default": "required"
},
{
"type": "object",
"description": "Controls whether the model should choose to call a tool, and if so, which one.",
"properties": {
"type": { "type": "string", "const": "function" },
"function": { "type": "string", "description": "The name of the function to call." }
},
"required": ["type", "function"]
}
]
},
"tools": {
"type": "array",
"description": "A list of tools representing functions that the model may generate arguments for.",
"items": {
"type": "object",
"properties": {
"type": {
"type": "string",
"const": "function",
"description": "Currently, the only supported tool type is `function`."
},
"function": {
"type": "object",
"properties": {
"name": { "type": "string", "description": "The name of the function to be called.", "maxLength": 64 },
"description": {
"type": "string",
"description": "A description of what the function does, which is used by the model to choose when and how to call the function."
},
"parameters": {
"type": "object",
"description": "A JSON object containing the parameters that the function accepts. Omitting parameters defines a function with an empty parameter list.",
"additionalProperties": { "type": ["string", "number", "boolean", "object", "array", "null"] }
}
},
"required": ["name"]
}
},
"required": ["type", "function"]
}
},
"top_logprobs": {
"type": ["integer", "null"],
"title": "top_logprobs",
"description": "An integer between `0` and `20` specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to true if this parameter is used.",
"required": false,
"minimum": 0,
"maximum": 20
},
"top_p": {
"title": "top_p",
"description": "An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.",
"type": ["number", "null"],
"maximum": 1,
"default": 1,
"required": false
},
"user": {
"type": "string",
"title": "user",
"description": "A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse.",
"required": false
}
},
"required": ["model", "messages"]
},
{
"name": "openai_completion_usage",
"title": "OpenAI/Completion Usage Schema",
"type": "object",
"description": "Usage statistics for an OpenAI completion request",
"properties": {
"completion_tokens": {
"type": "integer",
"title": "Completion Tokens",
"description": "Number of tokens in the generated completion.",
"minimum": 0
},
"prompt_tokens": {
"type": "integer",
"title": "Prompt Tokens",
"description": "Number of tokens in the prompt.",
"minimum": 0
},
"total_tokens": {
"type": "integer",
"title": "Total Tokens",
"description": "Total number of tokens used in the request (prompt + completion).",
"minimum": 0
}
},
"required": ["completion_tokens", "prompt_tokens", "total_tokens"]
},
{
"name": "openai_fine_tuning_hyperparameters",
"title": "OpenAI/Fine Tuning Hyperparameters Schema",
"description": "Schema for configuring Hyperparameters for Fine-Tuning.",
"type": "object",
"properties": {
"batch_size": {
"title": "Batch Size",
"description": "Number of examples in each batch. A larger batch size means that model parameters are updated less frequently, but with lower variance.",
"oneOf": [
{ "type": "string", "enum": ["auto"] },
{ "type": "integer", "minimum": 1 }
],
"default": "auto"
},
"learning_rate_multiplier": {
"title": "Learning Rate Multiplier",
"description": "Scaling factor for the learning rate. A smaller learning rate may be useful to avoid overfitting.",
"oneOf": [
{ "type": "string", "enum": ["auto"] },
{ "type": "number", "minimum": 0 }
],
"default": "auto"
},
"n_epochs": {
"title": "Number of Epochs",
"description": "The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset.",
"oneOf": [
{ "type": "string", "enum": ["auto"] },
{ "type": "integer", "minimum": 1 }
],
"default": "auto"
}
},
"additionalProperties": false
},
{
"name": "openai_fine_tuning_job_result",
"title": "OpenAI/Fine Tuning Job Schema",
"description": "Schema for the fine-tuning.job object, which is returned by OpenAI after a fine-tuning job is completed.",
"type": "object",
"properties": {
"created_at": {
"type": "integer",
"title": "Created At",
"description": "Unix timestamp for when the job was created"
},
"status": {
"type": "string",
"title": "Status",
"description": "Current status of the job",
"enum": ["validating_files", "queued", "running", "succeeded", "failed", "cancelled"],
"default": "queued"
},
"error": {
"type": ["object", "null"],
"title": "Error",
"description": "JSON object for job failure details, if any",
"properties": {
"code": { "type": "string", "title": "Code", "description": "A machine-readable error code." },
"message": { "type": "string", "title": "Message", "description": "A human-readable error message." },
"param": {
"type": ["string", "null"],
"title": "Param",
"description": "The parameter that was invalid, usually \"training_file\" or \"validation_file\". This field will be null if the failure was not parameter-specific."
}
}
},
"fine_tuned_model": {
"type": ["string", "null"],
"title": "Fine Tuned Model",
"description": "Name of the fine-tuned model, if applicable"
},
"finished_at": {
"type": ["integer", "null"],
"title": "Finished At",
"description": "Unix timestamp for when the job was finished, if applicable"
},
"hyperparameters": {
"type": "object",
"title": "Hyperparameters",
"description": "JSON object for hyperparameters",
"properties": {
"n_epochs": {
"type": ["integer", "null"],
"title": "Number of Epochs",
"description": "The number of epochs to train the model for"
}
}
},
"model": { "type": "string", "title": "Model", "description": "Base model that is being fine-tuned" },
"object": {
"type": "string",
"title": "Object Type",
"description": "Type of the object",
"default": "fine_tuning.job",
"const": "fine_tuning.job"
},
"object_type": {
"type": "string",
"title": "Object Type",
"description": "Type of the object",
"default": "fine_tuning.job",
"const": "fine_tuning.job"
},
"organization_id": {
"type": "string",
"title": "Organization ID",
"description": "Organization that owns the job"
},
"result_files": {
"type": "array",
"title": "Result Files",
"description": "Array of result file IDs",
"items": { "type": "string" }
},
"trained_tokens": {
"type": ["integer", "null"],
"title": "Trained Tokens",
"description": "Total number of billable tokens processed, if applicable"
},
"training_file": { "type": "string", "title": "Training File", "description": "File ID used for training" },
"validation_file": {
"type": ["string", "null"],
"title": "Validation File",
"description": "File ID used for validation, if applicable"
}
},
"required": [
"created_at",
"hyperparameters",
"model",
"object",
"object_type",
"organization_id",
"status",
"result_files",
"training_file"
]
},
{
"name": "openai_transcription_request",
"title": "Audio Transcription Request",
"description": "A schema representing the input required to transcribe an audio file",
"type": "object",
"properties": {
"file": {
"description": "The audio file object to be transcribed.",
"type": "string",
"contentMediaType": "application/octet-stream",
"pattern": ".*\\.(flac|mp3|mp4|mpeg|mpga|m4a|ogg|wav|webm)$",
"x-ui": false
},
"model": { "description": "ID of the transcription model to use.", "type": "string", "enum": ["whisper-1"] },
"language": {
"description": "ISO-639-1 language code of the input audio, improves accuracy and latency.",
"type": "string",
"default": "en",
"pattern": "^[a-z]{2}$"
},
"prompt": {
"description": "Optional text to guide the model's style or to continue a previous segment.",
"type": "string"
},
"response_format": {
"description": "The format of the transcript output.",
"type": "string",
"default": "verbose_json",
"enum": ["json", "text", "srt", "verbose_json", "vtt"],
"x-ui": false
},
"temperature": {
"description": "Sampling temperature between 0 and 1 to control randomness of the output.",
"type": "number",
"default": 0,
"minimum": 0,
"maximum": 1
},
"timestamp_granularities": {
"description": "The timestamp granularities to populate in the transcription.",
"type": "array",
"default": ["segment"],
"items": { "type": "string", "enum": ["word", "segment"] },
"uniqueItems": true
}
},
"required": ["file", "model"]
},
{
"name": "openai_translation_request",
"title": "Create Translation Request",
"description": "A request schema for creating a translation of an audio file into English.",
"type": "object",
"properties": {
"file": {
"description": "The audio file object to translate. Must be in one of the supported formats.",
"type": "string",
"format": "binary",
"contentMediaType": "audio/*",
"contentEncoding": "base64",
"examples": ["flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, webm"],
"pattern": ".*\\.(flac|mp3|mp4|mpeg|mpga|m4a|ogg|wav|webm)$"
},
"model": { "description": "ID of the model to use for translation.", "type": "string", "enum": ["whisper-1"] },
"prompt": {
"description": "An optional text to guide the model's style or to continue a previous audio segment.",
"type": "string"
},
"response_format": {
"description": "Format of the transcript output.",
"type": "string",
"default": "json",
"enum": ["json", "text", "srt", "verbose_json", "vtt"]
},
"temperature": {
"description": "Sampling temperature to control randomness in the output.",
"type": "number",
"default": 0,
"minimum": 0,
"maximum": 1
}
},
"required": ["file", "model"],
"additionalProperties": false
}
]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment