ChatGPTUnofficial.psm1
$API_URL = 'https://api.openai.com/v1' # Generate encrypted API key file in user profile Function Initialize-ChatGPT { if (-not (Test-Path -Path "$($env:USERPROFILE)\ChatGPT")) { New-Item -ItemType Directory -Path "$($env:USERPROFILE)\ChatGPT" | Out-Null } Read-Host "Enter your ChatGPT API Key" | ConvertTo-SecureString -AsPlainText -Force | ConvertFrom-SecureString | Out-File -FilePath "$($env:USERPROFILE)\ChatGPT\api_key" -Encoding ascii } # AUTHENTICATION Function Get-ChatGPTAuthToken { $api_file_content = Get-Content -Path "$($env:USERPROFILE)\ChatGPT\api_key" | ConvertTo-SecureString $api_file_bstr = [System.Runtime.InteropServices.Marshal]::SecureStringToBSTR($api_file_content) $decoded_api_key = [System.Runtime.InteropServices.Marshal]::PtrToStringAuto($api_file_bstr) return ($decoded_api_key.ToString().Trim()) } Function Get-ChatGPTHeaders { return @{ 'Authorization' = "Bearer $(Get-ChatGPTAuthToken)" 'Content-Type' = 'application/json' } } # MODELS - List and describe the various models available in the API. You can refer to the Models documentation to understand what models are available and the differences between them. <# .SYNOPSIS List models .DESCRIPTION GET https://api.openai.com/v1/models Lists the currently available models, and provides basic information about each one such as the owner and availability. #> Function Get-ChatGPTModels { $endpoint = '/models' $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders return (Invoke-RestMethod -Uri $url -Method Get -Headers $headers) } <# .SYNOPSIS Retrieve model .DESCRIPTION GET https://api.openai.com/v1/models/{model} Retrieves a model instance, providing basic information about the model such as the owner and permissioning. .PARAMETER model The ID of the model to use for this request #> Function Get-ChatGPTModel { param([Parameter(Mandatory=$true)][System.String]$model) $endpoint = "/models/$($model)" $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders return (Invoke-RestMethod -Uri $url -Method Get -Headers $headers) } # COMPLETIONS - Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. <# .SYNOPSIS Create completion .DESCRIPTION Creates a completion for the provided prompt and parameters .PARAMETER model ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them. .PARAMETER prompt The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. .PARAMETER suffix The suffix that comes after a completion of inserted text. .PARAMETER max_tokens The maximum number of tokens to generate in the completion. The token count of your prompt plus max_tokens cannot exceed the model's context length. Most models have a context length of 2048 tokens (except for the newest models, which support 4096). .PARAMETER temperature What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or top_p but not both. .PARAMETER top_p An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. .PARAMETER n How many completions to generate for each prompt. Note: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop. .PARAMETER stream Whether to stream back partial progress. If set, tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. .PARAMETER logprobs Include the log probabilities on the logprobs most likely tokens, as well the chosen tokens. For example, if logprobs is 5, the API will return a list of the 5 most likely tokens. The API will always return the logprob of the sampled token, so there may be up to logprobs+1 elements in the response. The maximum value for logprobs is 5. If you need more than this, please contact us through our Help center and describe your use case. .PARAMETER echo Echo back the prompt in addition to the completion .PARAMETER stop Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. .PARAMETER presence_penalty Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. .PARAMETER frequency_penalty Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. .PARAMETER best_of Generates best_of completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. When used with n, best_of controls the number of candidate completions and n specifies how many to return – best_of must be greater than n. Note: Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for max_tokens and stop. .PARAMETER logit_bias Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this tokenizer tool (which works for both GPT-2 and GPT-3) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. As an example, you can pass {"50256": -100} to prevent the <|endoftext|> token from being generated. .PARAMETER user A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. #> Function Create-ChatGPTCompletion { param([Parameter(Mandatory=$true)][ValidateSet('text-davinci-003','text-curie-001','text-babbage-001','text-ada-001')][System.String]$model, [Parameter(Mandatory=$false)][System.String[]]$prompt=$null, [Parameter(Mandatory=$false)][System.String]$suffix=$null, [Parameter(Mandatory=$false)][System.Int32]$max_tokens=4000, [Parameter(Mandatory=$false)][System.Decimal]$temperature=1, [Parameter(Mandatory=$false)][System.Decimal]$top_p=1, [Parameter(Mandatory=$false)][System.Int32]$n=1, [Parameter(Mandatory=$false)][System.Boolean]$stream=$false, [Parameter(Mandatory=$false)][System.Int32]$logprobs=$null, [Parameter(Mandatory=$false)][System.Boolean]$echo=$false, [Parameter(Mandatory=$false)][System.String[]]$stop=$null, [Parameter(Mandatory=$false)][System.Decimal]$presence_penalty=0, [Parameter(Mandatory=$false)][System.Decimal]$frequency_penalty=0, [Parameter(Mandatory=$false)][System.Int32]$best_of=1, [Parameter(Mandatory=$false)][hashtable]$logit_bias=$null, [Parameter(Mandatory=$false)][System.String]$user=$null) $endpoint = '/completions' $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders $body = @{} $body['model'] = $model if ($null -ne $prompt) {$body['prompt'] = $prompt} if ($null -ne $suffix) {$body['suffix'] = $suffix} if (16 -ne $max_tokens) {$body['max_tokens'] = $max_tokens} if (1 -ne $temperature) {$body['temperature'] = $temperature} if (1 -ne $top_p) {$body['top_p'] = $top_p} if (1 -ne $n) {$body['n'] = $n} if ($false -ne $stream) {$body['stream'] = $true} if ($null -ne $logprobs) {$body['logprobs'] = $logprobs} if ($false -ne $echo) {$body['echo'] = $true} if ($null -ne $stop) {$body['stop'] = $stop} if (0 -ne $presence_penalty) {$body['presence_penalty'] = $presence_penalty} if (0 -ne $frequency_penalty) {$body['frequency_penalty'] = $frequency_penalty} if (1 -ne $best_of) {$body['best_of'] = $best_of} if ($null -ne $logit_bias) {$body['logit_bias'] = $logit_bias} if ($null -ne $user) {$body['user'] = $user} $body = $body | ConvertTo-Json return (Invoke-RestMethod -Uri $url -Method Post -Headers $headers -Body $body) } # EDITS - Given a prompt and an instruction, the model will return an edited version of the prompt. <# .SYNOPSIS Create edit .DESCRIPTION POST https://api.openai.com/v1/edits Creates a new edit for the provided input, instruction, and parameters. .PARAMETER model ID of the model to use. You can use the text-davinci-edit-001 or code-davinci-edit-001 model with this endpoint. .PARAMETER input The input text to use as a starting point for the edit. .PARAMETER instruction The instruction that tells the model how to edit the prompt. .PARAMETER n How many edits to generate for the input and instruction. .PARAMETER temperature What sampling temperature to use. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. We generally recommend altering this or top_p but not both. .PARAMETER top_p An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. #> Function Create-ChatGPTEdit { param([Parameter(Mandatory=$true)][System.String]$model, [Parameter(Mandatory=$false)][System.String]$input='', [Parameter(Mandatory=$true)][System.String]$instruction, [Parameter(Mandatory=$false)][System.Int32]$n=1, [Parameter(Mandatory=$false)][System.Decimal]$temperature=1, [Parameter(Mandatory=$false)][System.Decimal]$top_p=1) $endpoint = '/edits' $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders $body = @{} $body['model'] = $model $body['instruction'] = $instruction if ('' -ne $input) {$body['input'] = $input} if (1 -ne $n) {$body['n'] = $n} if (1 -ne $temperature) {$body['temperature'] = $temperature} if (1 -ne $top_p) {$body['top_p'] = $top_p} $body = $body | ConvertTo-Json return (Invoke-RestMethod -Uri $url -Method Post -Headers $headers -Body $body) } # IMAGES - Given a prompt and/or an input image, the model will generate a new image. <# .SYNOPSIS Create image .DESCRIPTION POST https://api.openai.com/v1/images/generations Creates an image given a prompt. .PARAMETER prompt A text description of the desired image(s). The maximum length is 1000 characters. .PARAMETER n The number of images to generate. Must be between 1 and 10. .PARAMETER size The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. .PARAMETER response_format The format in which the generated images are returned. Must be one of url or b64_json. .PARAMETER user A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. #> Function Create-ChatGPTImage { param([Parameter(Mandatory=$true)][System.String]$prompt, [Parameter(Mandatory=$false)][System.Int32]$n=1, [Parameter(Mandatory=$false)][System.String]$size='1024x1024', [Parameter(Mandatory=$false)][System.String]$response_format='url', [Parameter(Mandatory=$false)][System.String]$user) $endpoint = '/images/generations' $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders $body = @{} $body['prompt'] = $prompt if (1 -ne $n) {$body['n'] = $n} if ('1024x1024' -ne $size) {$body['size'] = $size} if ('url' -ne $response_format) {$body['response_format'] = $response_format} if ($null -ne $user) {$body['user'] = $user} $body = $body | ConvertTo-Json return (Invoke-RestMethod -Uri $url -Method Post -Headers $headers -Body $body) } <# .SYNOPSIS Create image edit .DESCRIPTION POST https://api.openai.com/v1/images/edits Creates an edited or extended image given an original image and a prompt. .PARAMETER image The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. .PARAMETER mask An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where image should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as image. .PARAMETER prompt A text description of the desired image(s). The maximum length is 1000 characters. .PARAMETER n The number of images to generate. Must be between 1 and 10. .PARAMETER size The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. .PARAMETER response_format The format in which the generated images are returned. Must be one of url or b64_json. .PARAMETER user A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. #> Function Create-ChatGPTImageEdit { param([Parameter(Mandatory=$true)][System.String]$image, [Parameter(Mandatory=$false)][System.String]$mask=$null, [Parameter(Mandatory=$true)][System.String]$prompt, [Parameter(Mandatory=$false)][System.Int32]$n=1, [Parameter(Mandatory=$false)][System.String]$size='1024x1024', [Parameter(Mandatory=$false)][System.String]$response_format='url', [Parameter(Mandatory=$false)][System.String]$user=$null) $endpoint = '/images/edits' $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders $body = @{} $body['image'] = $image $body['prompt'] = $prompt if ($null -ne $mask) {$body['mask'] = $mask} if (1 -ne $n) {$body['n'] = $n} if ('1024x1024' -ne $size) {$body['size'] = $size} if ('url' -ne $response_format) {$body['response_format'] = $response_format} if ($null -ne $user) {$body['user'] = $user} $body = $body | ConvertTo-Json return (Invoke-RestMethod -Uri $url -Method Post -Headers $headers -Body $body) } <# .SYNOPSIS Create image variation .DESCRIPTION POST https://api.openai.com/v1/images/variations Creates a variation of a given image. .PARAMETER image The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. .PARAMETER n The number of images to generate. Must be between 1 and 10. .PARAMETER size The size of the generated images. Must be one of 256x256, 512x512, or 1024x1024. .PARAMETER response_format The format in which the generated images are returned. Must be one of url or b64_json. .PARAMETER user A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. #> Function Create-ChatGPTImageVariation { param([Parameter(Mandatory=$true)]$image, [Parameter(Mandatory=$false)]$n=1, [Parameter(Mandatory=$false)]$size='1024x1024', [Parameter(Mandatory=$false)]$response_format='url', [Parameter(Mandatory=$false)]$user=$null) $endpoint = '/images/variations' $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders $body = @{} $body['image'] = $image if (1 -ne $n) {$body['n'] = $n} if ('1024x1024' -ne $size) {$body['size'] = $size} if ('url' -ne $response_format) {$body['response_format'] = $response_format} if ($null -ne $user) {$body['user'] = $user} $body = $body | ConvertTo-Json return (Invoke-RestMethod -Uri $url -Method Post -Headers $headers -Body $body) } # EMBEDDINGS - Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. <# .SYNOPSIS Create embeddings .DESCRIPTION POST https://api.openai.com/v1/embeddings Creates an embedding vector representing the input text. .PARAMETER model ID of the model to use. You can use the List models API to see all of your available models, or see our Model overview for descriptions of them. .PARAMETER input Input text to get embeddings for, encoded as a string or array of tokens. To get embeddings for multiple inputs in a single request, pass an array of strings or array of token arrays. Each input must not exceed 8192 tokens in length. .PARAMETER user A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. #> Function Create-ChatGPTEmbeddings { param([Parameter(Mandatory=$true)][System.String]$model, [Parameter(Mandatory=$true)][System.String]$input, [Parameter(Mandatory=$false)][System.String]$user=$null) $endpoint = '/embeddings' $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders $body = @{} $body['model'] = $model $body['input'] = $input if ($null -ne $user) {$body['user'] = $user} $body = $body | ConvertTo-Json return (Invoke-RestMethod -Uri $url -Method Post -Headers $headers -Body $body) } # FILES - Files are used to upload documents that can be used with features like Fine-tuning. <# .SYNOPSIS List files .DESCRIPTION GET https://api.openai.com/v1/files Returns a list of files that belong to the user's organization. #> Function Get-ChatGPTFiles { $endpoint = '/files' $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders return (Invoke-RestMethod -Uri $url -Method Get -Headers $headers) } <# .SYNOPSIS Upload file .DESCRIPTION POST https://api.openai.com/v1/files Upload a file that contains document(s) to be used across various endpoints/features. Currently, the size of all the files uploaded by one organization can be up to 1 GB. Please contact us if you need to increase the storage limit. .PARAMETER file Name of the JSON Lines file to be uploaded. If the purpose is set to "fine-tune", each line is a JSON record with "prompt" and "completion" fields representing your training examples. .PARAMETER purpose The intended purpose of the uploaded documents. Use "fine-tune" for Fine-tuning. This allows us to validate the format of the uploaded file. #> Function Upload-ChatGPTFile { param([Parameter(Mandatory=$true)][System.String]$file, [Parameter(Mandatory=$true)][System.String]$purpose) $endpoint = '/files' $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders $body = @{} $body['file'] = $file $body['purpose'] = $purpose $body = $body | ConvertTo-Json return (Invoke-RestMethod -Uri $url -Method Post -Headers $headers -Body $body) } <# .SYNOPSIS Delete file .DESCRIPTION DELETE https://api.openai.com/v1/files/{file_id} Delete a file. .PARAMETER file_id The ID of the file to use for this request #> Function Delete-ChatGPTFile { param([Parameter(Mandatory=$true)][System.String]$file_id) $endpoint = "/files/$($file_id)" $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders return (Invoke-RestMethod -Uri $url -Method Delete -Headers $headers) } <# .SYNOPSIS Retrieve file .DESCRIPTION GET https://api.openai.com/v1/files/{file_id} Returns information about a specific file. .PARAMETER file_id The ID of the file to use for this request #> Function Get-ChatGPTFile { param([Parameter(Mandatory=$true)][System.String]$file_id) $endpoint = "/files/$($file_id)" $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders return (Invoke-RestMethod -Uri $url -Method Get -Headers $headers) } <# .SYNOPSIS Retrieve file content .DESCRIPTION GET https://api.openai.com/v1/files/{file_id}/content Returns the contents of the specified file .PARAMETER file_id The ID of the file to use for this request #> Function Get-ChatGPTFileContent { param([Parameter(Mandatory=$true)][System.String]$file_id) $endpoint = "/files/$($file_id)/content" $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders return (Invoke-RestMethod -Uri $url -Method Get -Headers $headers) } # FINE-TUNES - Manage fine-tuning jobs to tailor a model to your specific training data. <# .SYNOPSIS Create fine-tune .DESCRIPTION Creates a job that fine-tunes a specified model from a given dataset. Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. .PARAMETER training_file The ID of an uploaded file that contains training data. See upload file for how to upload a file. Your dataset must be formatted as a JSONL file, where each training example is a JSON object with the keys "prompt" and "completion". Additionally, you must upload your file with the purpose fine-tune. See the fine-tuning guide for more details. .PARAMETER validation_file The ID of an uploaded file that contains validation data. If you provide this file, the data is used to generate validation metrics periodically during fine-tuning. These metrics can be viewed in the fine-tuning results file. Your train and validation data should be mutually exclusive. Your dataset must be formatted as a JSONL file, where each validation example is a JSON object with the keys "prompt" and "completion". Additionally, you must upload your file with the purpose fine-tune. See the fine-tuning guide for more details. .PARAMETER model The name of the base model to fine-tune. You can select one of "ada", "babbage", "curie", "davinci", or a fine-tuned model created after 2022-04-21. To learn more about these models, see the Models documentation. .PARAMETER n_epochs The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. .PARAMETER batch_size The batch size to use for training. The batch size is the number of training examples used to train a single forward and backward pass. .PARAMETER learning_rate_multiplier The learning rate multiplier to use for training. The fine-tuning learning rate is the original learning rate used for pretraining multiplied by this value. By default, the learning rate multiplier is the 0.05, 0.1, or 0.2 depending on final batch_size (larger learning rates tend to perform better with larger batch sizes). We recommend experimenting with values in the range 0.02 to 0.2 to see what produces the best results. .PARAMETER prompt_loss_weight The weight to use for loss on the prompt tokens. This controls how much the model tries to learn to generate the prompt (as compared to the completion which always has a weight of 1.0), and can add a stabilizing effect to training when completions are short. If prompts are extremely long (relative to completions), it may make sense to reduce this weight so as to avoid over-prioritizing learning the prompt. .PARAMETER compute_classification_metrics If set, we calculate classification-specific metrics such as accuracy and F-1 score using the validation set at the end of every epoch. These metrics can be viewed in the results file. In order to compute classification metrics, you must provide a validation_file. Additionally, you must specify classification_n_classes for multiclass classification or classification_positive_class for binary classification. .PARAMETER classification_n_classes The number of classes in a classification task. This parameter is required for multiclass classification. .PARAMETER classification_positive_class The positive class in binary classification. This parameter is needed to generate precision, recall, and F1 metrics when doing binary classification. .PARAMETER classification_betas If this is provided, we calculate F-beta scores at the specified beta values. The F-beta score is a generalization of F-1 score. This is only used for binary classification. With a beta of 1 (i.e. the F-1 score), precision and recall are given the same weight. A larger beta score puts more weight on recall and less on precision. A smaller beta score puts more weight on precision and less on recall. .PARAMETER suffix A string of up to 40 characters that will be added to your fine-tuned model name. For example, a suffix of "custom-model-name" would produce a model name like ada:ft-your-org:custom-model-name-2022-02-15-04-21-04. #> Function Create-ChatGPTFineTune { param([Parameter(Mandatory=$true)][System.String]$training_file, [Parameter(Mandatory=$false)][System.String]$validation_file=$null, [Parameter(Mandatory=$false)][System.String]$model='curie', [Parameter(Mandatory=$false)][System.Int32]$n_epochs=4, [Parameter(Mandatory=$false)][System.Int32]$batch_size=$null, [Parameter(Mandatory=$false)][System.Decimal]$learning_rate_multiplier=$null, [Parameter(Mandatory=$false)][System.Decimal]$prompt_loss_weight=0.01, [Parameter(Mandatory=$false)][System.Boolean]$compute_classification_metrics=$false, [Parameter(Mandatory=$false)][System.Int32]$classification_n_classes=$null, [Parameter(Mandatory=$false)][System.String]$classification_positive_class=$null, [Parameter(Mandatory=$false)][System.String[]]$classification_betas=$null, [Parameter(Mandatory=$false)][System.String]$suffix=$null) $endpoint = '/fine-tunes' $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders $body = @{} $body['training_file'] = $training_file if ($null -ne $validation_file) {$body['validation_file'] = $validation_file} if ('curie' -ne $model) {$body['model'] = $model} if (4 -ne $n_epochs) {$body['n_epochs'] = $n_epochs} if ($null -ne $batch_size) {$body['batch_size'] = $batch_size} if ($null -ne $learning_rate_multiplier) {$body['learning_rate_multiplier'] = $learning_rate_multiplier} if (0.01 -ne $prompt_loss_weight) {$body['prompt_loss_weight'] = $prompt_loss_weight} if ($false -ne $compute_classification_metrics) {$body['compute_classification_metrics'] = $compute_classification_metrics} if ($null -ne $classification_n_classes) {$body['classification_n_classes'] = $classification_n_classes} if ($null -ne $classification_positive_class) {$body['classification_positive_class'] = $classification_positive_class} if ($null -ne $classification_betas) {$body['classification_betas'] = $classification_betas} if ($null -ne $suffix) {$body['suffix'] = $suffix} $body = $body | ConvertTo-Json return (Invoke-RestMethod -Uri $url -Method Post -Headers $headers -Body $body) } <# .SYNOPSIS List fine-tunes .DESCRIPTION GET https://api.openai.com/v1/fine-tunes List your organization's fine-tuning jobs #> Function Get-ChatGPTFineTunes { $endpoint = '/fine-tunes' $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders return (Invoke-RestMethod -Uri $url -Method Get -Headers $headers) } <# .SYNOPSIS Retrieve fine-tune .DESCRIPTION GET https://api.openai.com/v1/fine-tunes/{fine_tune_id} Gets info about the fine-tune job. .PARAMETER fine_tune_id The ID of the fine-tune job #> Function Get-ChatGPTFineTune { param([Parameter(Mandatory=$true)][System.String]$fine_tune_id) $endpoint = "/fine-tunes/$($fine_tune_id)" $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders return (Invoke-RestMethod -Uri $url -Method Get -Headers $headers) } <# .SYNOPSIS Cancel fine-tune .DESCRIPTION POST https://api.openai.com/v1/fine-tunes/{fine_tune_id}/cancel Immediately cancel a fine-tune job. .PARAMETER fine_tune_id The ID of the fine-tune job to cancel #> Function Cancel-ChatGPTFineTune { param([Parameter(Mandatory=$true)][System.String]$fine_tune_id) $endpoint = "/fine-tunes/$($fine_tune_id)/cancel" $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders return (Invoke-RestMethod -Uri $url -Method Post -Headers $headers) } <# .SYNOPSIS List fine-tune events .DESCRIPTION GET https://api.openai.com/v1/fine-tunes/{fine_tune_id}/events Get fine-grained status updates for a fine-tune job. .PARAMETER fine_tune_id The ID of the fine-tune job to get events for. .PARAMETER stream Whether to stream events for the fine-tune job. If set to true, events will be sent as data-only server-sent events as they become available. The stream will terminate with a data: [DONE] message when the job is finished (succeeded, cancelled, or failed). If set to false, only events generated so far will be returned. #> Function Get-ChatGPTFineTuneEvents { param([Parameter(Mandatory=$true)][System.String]$fine_tune_id, [Parameter(Mandatory=$false)][System.Boolean]$stream=$false) $endpoint = "/fine-tunes/$($fine_tune_id)/events" $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders if ($false -ne $stream) { $body = @{ 'stream' = $true } | ConvertTo-Json return (Invoke-RestMethod -Uri $url -Method Get -Headers $headers -Body $body) } return (Invoke-RestMethod -Uri $url -Method Get -Headers $headers) } <# .SYNOPSIS Delete fine-tune model .DESCRIPTION DELETE https://api.openai.com/v1/models/{model} Delete a fine-tuned model. You must have the Owner role in your organization. .PARAMETER model The model to delete #> Function Delete-ChatGPTFineTuneModel { param([Parameter(Mandatory=$true)][System.String]$model) $endpoint = "/models/$($model)" $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders return (Invoke-RestMethod -Uri $url -Method Delete -Headers $headers) } # MODERATIONS - Given a input text, outputs if the model classifies it as violating OpenAI's content policy. <# .SYNOPSIS Create moderation .DESCRIPTION POST https://api.openai.com/v1/moderations Classifies if text violates OpenAI's Content Policy #> Function Create-ChatGPTModeration { param([Parameter(Mandatory=$true)][System.String[]]$input, [Parameter(Mandatory=$false)][System.String]$model='text-moderation-latest') $endpoint = '/moderations' $url = "$($API_URL)$($endpoint)" $headers = Get-ChatGPTHeaders $body = @{} $body['input'] = $input if ('text-moderation-latest' -ne $model) {$body['model'] = $model} $body = $body | ConvertTo-Json return (Invoke-RestMethod -Uri $url -Method Post -Headers $headers -Body $body) } <# .SYNOPSIS Invoke ChatGPT .DESCRIPTION Prompts ChatGPT conversation. Type "quit" to stop prompting. The most recent code snippet will automatically be copied to the clipboard. .PARAMETER Prompt Prompt to send to ChatGPT .PARAMETER Model The model to use for answer #> Function Ask-ChatGPT { param([Parameter(Mandatory=$false)][System.String[]]$Prompt='help', [Parameter(Mandatory=$false)][ValidateSet('text-davinci-003','text-curie-001','text-babbage-001','text-ada-001')][System.String]$Model='text-davinci-003', [Parameter(Mandatory=$false)][switch]$KeepConversation) if (-not (Test-Path -Path "$($env:USERPROFILE)\ChatGPT\api_key")) { Initialize-ChatGPT } if ($KeepConversation) { Start-Transcript -Path "$($env:USERPROFILE)\ChatGPT\conversation_$(Get-Date -Format 'yyyyMMdd').log" -Append -Confirm:$false } $LastAnswer = '' while ($Prompt -ne 'quit') { switch ($Prompt) { 'help' { Write-Host @" ------------------------- # CHAT GPT COMMAND LIST # ------------------------- - help: prints this help page - copy: copy the last output from ChatGPT - quit: exit ChatGPT conversation "@ -ForegroundColor Cyan break } 'copy' { Set-Clipboard -Value $results.choices.text Write-Host "Latest result copied to the clipboard.`n" -ForegroundColor Green break } default { $results = Create-ChatGPTCompletion -model $Model -prompt $Prompt Write-Host "$($results.choices.text)`n" -ForegroundColor Yellow $LastAnswer = "You said: `"$($results.choices.text)`"" } } $Prompt = Read-Host "Ask ChatGPT" if ($Prompt -notin @('help','copy','quit')) { $Prompt = "$($LastAnswer). $($Prompt)" } } if ($KeepConversation) { Stop-Transcript } } Export-ModuleMember -Function Initialize-ChatGPT, Get-ChatGPTModels, Get-ChatGPTModel, ` Create-ChatGPTCompletion, Create-ChatGPTEdit, Create-ChatGPTImage, ` Create-ChatGPTImageEdit, Create-ChatGPTImageVariation, Create-ChatGPTEmbeddings, ` Get-ChatGPTFiles, Upload-ChatGPTFile, Delete-ChatGPTFile, ` Get-ChatGPTFile, Get-ChatGPTFileContent, Create-ChatGPTFineTune, ` Get-ChatGPTFineTunes, Get-ChatGPTFineTune, Cancel-ChatGPTFineTune, ` Get-ChatGPTFineTuneEvents, Delete-ChatGPTFineTuneModel, Create-ChatGPTModeration, ` Ask-ChatGPT |