pwshBedrock.psm1
# This is a locally sourced Imports file for local development. # It can be imported by the psm1 in local development to add script level variables. # It will merged in the build process. This is for local development only. # region script variables # $script:resourcePath = "$PSScriptRoot\Resources" #region model tally variables $Global:pwshBedRockSessionCostEstimate = 0 $Global:pwshBedRockSessionModelTally = @( [PSCustomObject]@{ ModelId = 'Converse' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'ai21.j2-grande-instruct' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'ai21.j2-jumbo-instruct' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'ai21.jamba-instruct-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'ai21.j2-mid-v1' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'ai21.j2-ultra-v1' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'amazon.titan-image-generator-v1' ImageCount = 0 ImageCost = 0 } [PSCustomObject]@{ ModelId = 'amazon.titan-text-express-v1' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'amazon.titan-text-lite-v1' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'amazon.titan-text-premier-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'amazon.titan-tg1-large' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'anthropic.claude-v2:1' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'anthropic.claude-3-haiku-20240307-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'anthropic.claude-3-opus-20240229-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'anthropic.claude-3-sonnet-20240229-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'anthropic.claude-3-5-sonnet-20240620-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'cohere.command-text-v14' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'cohere.command-light-text-v14' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'cohere.command-r-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'cohere.command-r-plus-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'meta.llama2-13b-chat-v1' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'meta.llama2-70b-chat-v1' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'meta.llama3-70b-instruct-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'meta.llama3-8b-instruct-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'meta.llama3-1-8b-instruct-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'meta.llama3-1-70b-instruct-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'mistral.mistral-7b-instruct-v0:2' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'mistral.mistral-large-2402-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'mistral.mistral-large-2407-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'mistral.mistral-small-2402-v1:0' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'mistral.mixtral-8x7b-instruct-v0:1' TotalCost = 0 InputTokenCount = 0 OutputTokenCount = 0 InputTokenCost = 0 OutputTokenCost = 0 } [PSCustomObject]@{ ModelId = 'stability.stable-diffusion-xl-v1' ImageCount = 0 ImageCost = 0 } ) #endregion #region model context variables $Global:pwshBedrockModelContext = @( [PSCustomObject]@{ ModelId = 'Converse' Context = New-Object System.Collections.Generic.List[object] } # [PSCustomObject]@{ # ModelId = 'ai21.j2-grande-instruct' # Context = New-Object System.Collections.Generic.List[object] # } # [PSCustomObject]@{ # ModelId = 'ai21.j2-jumbo-instruct' # Context = New-Object System.Collections.Generic.List[object] # } [PSCustomObject]@{ ModelId = 'ai21.jamba-instruct-v1:0' Context = New-Object System.Collections.Generic.List[object] } # [PSCustomObject]@{ # ModelId = 'ai21.j2-mid-v1' # Context = New-Object System.Collections.Generic.List[object] # } # [PSCustomObject]@{ # ModelId = 'ai21.j2-ultra-v1' # Context = New-Object System.Collections.Generic.List[object] # } # [PSCustomObject]@{ # ModelId = 'amazon.titan-image-generator-v1' # Context = New-Object System.Collections.Generic.List[object] # } [PSCustomObject]@{ ModelId = 'amazon.titan-text-express-v1' Context = '' } [PSCustomObject]@{ ModelId = 'amazon.titan-text-lite-v1' Context = '' } [PSCustomObject]@{ ModelId = 'amazon.titan-text-premier-v1:0' Context = '' } [PSCustomObject]@{ ModelId = 'amazon.titan-tg1-large' Context = '' } [PSCustomObject]@{ ModelId = 'anthropic.claude-v2:1' Context = New-Object System.Collections.Generic.List[object] } [PSCustomObject]@{ ModelId = 'anthropic.claude-3-haiku-20240307-v1:0' Context = New-Object System.Collections.Generic.List[object] } [PSCustomObject]@{ ModelId = 'anthropic.claude-3-opus-20240229-v1:0' Context = New-Object System.Collections.Generic.List[object] } [PSCustomObject]@{ ModelId = 'anthropic.claude-3-sonnet-20240229-v1:0' Context = New-Object System.Collections.Generic.List[object] } [PSCustomObject]@{ ModelId = 'anthropic.claude-3-5-sonnet-20240620-v1:0' Context = New-Object System.Collections.Generic.List[object] } # [PSCustomObject]@{ # ModelId = 'cohere.command-text-v14' # Context = New-Object System.Collections.Generic.List[object] # } # [PSCustomObject]@{ # ModelId = 'cohere.command-light-text-v14' # Context = New-Object System.Collections.Generic.List[object] # } [PSCustomObject]@{ ModelId = 'cohere.command-r-v1:0' Context = New-Object System.Collections.Generic.List[object] } [PSCustomObject]@{ ModelId = 'cohere.command-r-plus-v1:0' Context = New-Object System.Collections.Generic.List[object] } [PSCustomObject]@{ ModelId = 'meta.llama2-13b-chat-v1' Context = '' } [PSCustomObject]@{ ModelId = 'meta.llama2-70b-chat-v1' Context = '' } [PSCustomObject]@{ ModelId = 'meta.llama3-70b-instruct-v1:0' Context = '' } [PSCustomObject]@{ ModelId = 'meta.llama3-8b-instruct-v1:0' Context = '' } [PSCustomObject]@{ ModelId = 'meta.llama3-1-8b-instruct-v1:0' Context = '' } [PSCustomObject]@{ ModelId = 'meta.llama3-1-70b-instruct-v1:0' Context = '' } [PSCustomObject]@{ ModelId = 'mistral.mistral-7b-instruct-v0:2' Context = '' } [PSCustomObject]@{ ModelId = 'mistral.mistral-large-2402-v1:0' Context = '' } [PSCustomObject]@{ ModelId = 'mistral.mistral-large-2407-v1:0' Context = '' } [PSCustomObject]@{ ModelId = 'mistral.mistral-small-2402-v1:0' Context = '' } [PSCustomObject]@{ ModelId = 'mistral.mixtral-8x7b-instruct-v0:1' Context = '' } [PSCustomObject]@{ ModelId = 'stability.stable-diffusion-xl-v1' Context = New-Object System.Collections.Generic.List[object] } ) #endregion #region model info # https://docs.aws.amazon.com/bedrock/latest/userguide/models-supported.html # https://docs.aws.amazon.com/bedrock/latest/userguide/model-ids.html # https://aws.amazon.com/bedrock/pricing/ # https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html - supported models and model features #region anthropic # https://docs.anthropic.com/en/docs/models-overview#model-comparison # https://docs.anthropic.com/en/api/messages $script:anthropicModelInfo = @( [PSCustomObject]@{ ProviderName = 'Anthropic' ModelName = 'Claude' ModelId = 'anthropic.claude-v2:1' Description = 'Updated version of Claude 2 with improved accuracy' Strength = 'Legacy model - performs less well than Claude 3 models' Multilingual = $true Text = $true Document = $true Vision = $false SystemPrompt = $true ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 200000 MaxOutput = 4096 TrainingCutoff = '01-01-2023' PayloadLimit = '20MB' InputTokenCost = 0.008 OutputTokenCost = 0.024 } [PSCustomObject]@{ ProviderName = 'Anthropic' ModelName = 'Claude 3 Haiku' ModelId = 'anthropic.claude-3-haiku-20240307-v1:0' Description = 'Fastest and most compact model for near-instant responsiveness' Strength = 'Quick and accurate targeted performance' Multilingual = $true Text = $true Document = $true Vision = $true SystemPrompt = $true ToolUse = $true ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 200000 MaxOutput = 4096 TrainingCutoff = '08-01-2023' PayloadLimit = '20MB' InputTokenCost = 0.00025 OutputTokenCost = 0.00125 } [PSCustomObject]@{ ProviderName = 'Anthropic' ModelName = 'Claude 3 Sonnet' ModelId = 'anthropic.claude-3-sonnet-20240229-v1:0' Description = 'Ideal balance of intelligence and speed for enterprise workloads' Strength = 'Maximum utility at a lower price, dependable, balanced for scaled deployments' Multilingual = $true Text = $true Document = $true Vision = $true SystemPrompt = $true ToolUse = $true ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 200000 MaxOutput = 4096 TrainingCutoff = '08-01-2023' PayloadLimit = '20MB' InputTokenCost = 0.003 OutputTokenCost = 0.015 } [PSCustomObject]@{ ProviderName = 'Anthropic' ModelName = 'Claude 3.5 Sonnet' ModelId = 'anthropic.claude-3-5-sonnet-20240620-v1:0' Description = 'Most intelligent model' Strength = 'Highest level of intelligence and capability' Multilingual = $true Text = $true Document = $false Vision = $true SystemPrompt = $true ToolUse = $true ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 200000 MaxOutput = 4096 TrainingCutoff = '04-01-2024' PayloadLimit = '20MB' InputTokenCost = 0.003 OutputTokenCost = 0.015 } [PSCustomObject]@{ ProviderName = 'Anthropic' ModelName = 'Claude 3 Opus' ModelId = 'anthropic.claude-3-opus-20240229-v1:0' Description = 'Most powerful model for highly complex tasks' Strength = 'Top-level performance, intelligence, fluency, and understanding' Multilingual = $true Text = $true Document = $true Vision = $true SystemPrompt = $true ToolUse = $true ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 200000 MaxOutput = 4096 TrainingCutoff = '08-01-2023' PayloadLimit = '20MB' InputTokenCost = 0.015 OutputTokenCost = 0.075 } ) #anthropicModelInfo #endregion #region amazon # https://docs.aws.amazon.com/bedrock/latest/userguide/titan-text-models.html # https://docs.aws.amazon.com/bedrock/latest/userguide/titan-image-models.html # https://aws.amazon.com/machine-learning/responsible-machine-learning/titan-text-premier/ $script:amazonModelInfo = @( [PSCustomObject]@{ ProviderName = 'Amazon' ModelName = 'Amazon Titan Text G1 - Premier' ModelId = 'amazon.titan-text-premier-v1:0' Description = @' Amazon Titan Text G1 - Premier is a large language model for text generation. It is useful for a wide range of tasks including open-ended and context-based question answering, code generation, and summarization. This model is integrated with Amazon Bedrock Knowledge Base and Amazon Bedrock Agents. The model also supports Custom Fine tuning in preview. '@ Strength = '32k context window, open-ended text generation, brainstorming, summarizations, code generation, table creation, data formatting, paraphrasing, chain of thought, rewrite, extraction, QnA, chat, Knowledge Base support, Agents support, Model Customization (preview)' Multilingual = $false Text = $true Document = $false Vision = $false SystemPrompt = $false ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 32000 MaxOutput = 8192 TrainingCutoff = 'UNKNOWN' # ! Could not find this information in the documentation PayloadLimit = '' InputTokenCost = 0.0005 OutputTokenCost = 0.0015 } [PSCustomObject]@{ ProviderName = 'Amazon' ModelName = 'Titan Text G1 - Express' ModelId = 'amazon.titan-text-express-v1' Description = @' Amazon Titan Text G1 - Express is a large language model for text generation. It is useful for a wide range of advanced, general language tasks such as open-ended text generation and conversational chat, as well as support within Retrieval Augmented Generation (RAG). At launch, the model is optimized for English, with multilingual support for more than 30 additional languages available in preview.' '@ Strength = 'Retrieval augmented generation, open-ended text generation, brainstorming, summarizations, code generation, table creation, data formatting, paraphrasing, chain of thought, rewrite, extraction, QnA, and chat.' Multilingual = $true Text = $true Document = $true Vision = $false SystemPrompt = $false ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 8000 MaxOutput = 8192 TrainingCutoff = 'UNKNOWN' # ! Could not find this information in the documentation PayloadLimit = '' InputTokenCost = 0.0002 OutputTokenCost = 0.0006 } [PSCustomObject]@{ ProviderName = 'Amazon' ModelName = 'Titan Text G1 - Lite' ModelId = 'amazon.titan-text-lite-v1' Description = @' Amazon Titan Text G1 - Lite is a light weight efficient model, ideal for fine-tuning of English-language tasks, including like summarizations and copy writing, where customers want a smaller, more cost-effective model that is also highly customizable.' '@ Strength = 'Open-ended text generation, brainstorming, summarizations, code generation, table creation, data formatting, paraphrasing, chain of thought, rewrite, extraction, QnA, and chat.' Multilingual = $false Text = $true Document = $true Vision = $false SystemPrompt = $false ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 4000 MaxOutput = 4096 TrainingCutoff = 'UNKNOWN' # ! Could not find this information in the documentation PayloadLimit = '' InputTokenCost = 0.00015 OutputTokenCost = 0.0002 } [PSCustomObject]@{ ProviderName = 'Amazon' ModelName = 'Titan Text Large' ModelId = 'amazon.titan-tg1-large' Description = @' Amazon Titan Text G1 - Premier is a large language model for text generation. It is useful for a wide range of tasks including open-ended and context-based question answering, code generation, and summarization. This model is integrated with Amazon Bedrock Knowledge Base and Amazon Bedrock Agents. The model also supports Custom Fine tuning in preview.' '@ Strength = '32k context window, open-ended text generation, brainstorming, summarizations, code generation, table creation, data formatting, paraphrasing, chain of thought, rewrite, extraction, QnA, chat, Knowledge Base support, Agents support, Model Customization (preview)' Multilingual = $false Text = $true Document = $true Vision = $false SystemPrompt = $false ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 32000 MaxOutput = 3072 TrainingCutoff = 'UNKNOWN' # ! Could not find this information in the documentation PayloadLimit = '' InputTokenCost = 0.0005 OutputTokenCost = 0.0015 } [PSCustomObject]@{ ProviderName = 'Amazon' ModelName = 'Titan Image Generator G1' ModelId = 'amazon.titan-image-generator-v1' Description = @' Amazon Titan Image Generator G1 is an image generation model. It generates images from text, and allows users to upload and edit an existing image. This model can generate images from natural language text and can also be used to edit or generate variations for an existing or a generated image. Users can edit an image with a text prompt (without a mask) or parts of an image with an image mask. You can extend the boundaries of an image with outpainting, and fill in an image with inpainting. It can also generate variations of an image based on an optional text prompt. '@ Strength = 'image generation, image editing, image variations' Multilingual = $false Text = $false Document = $false Vision = $true SystemPrompt = $false ToolUse = $false ResponseStreamingSupported = $false ChatHistorySupported = $true ContextWindow = '' MaxOutput = '' TrainingCutoff = '' PayloadLimit = '5MB' ImageCost = 0.012 # InputTokenCost = 0.01 # OutputTokenCost = 0.012 # pricing structure is different for image models } ) #amazonModelInfo #endregion #region AI21 Labs # https://docs.ai21.com/changelog/jurassic-2-and-task-specific-apis-are-now-available # https://docs.ai21.com/docs/jurassic-2-models # https://docs.ai21.com/docs/instruct-models # https://docs.ai21.com/reference/j2-complete-ref # https://docs.ai21.com/docs/choosing-the-right-instance-type-for-amazon-sagemaker-models # https://docs.ai21.com/docs/jamba-models # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-jamba.html # https://docs.ai21.com/reference/jamba-instruct-api#response-details # https://docs.ai21.com/docs/migrating-from-jurassic-to-jamba # https://docs.ai21.com/docs/prompt-engineering $script:ai21ModelInfo = @( [PSCustomObject]@{ ProviderName = 'AI21 Labs' ModelName = 'Jamba-Instruct' ModelId = 'ai21.jamba-instruct-v1:0' Description = 'Built on top of our flagship base model, Jamba Instruct is tailored for commercial use. It is a chat model with instruction-following capability, and integrates safety features and guardrails. Most importantly, this model is optimized for real-world deployment. Jamba responses can include markdown; if you do not want markdown in any responses, indicate it in your system or initial contextual prompt' Strength = '256K context window, instruction following, chat capabilities, enhanced command comprehension.' Multilingual = $true Text = $true Document = $false Vision = $false SystemPrompt = $true ToolUse = $false ResponseStreamingSupported = $false ChatHistorySupported = $true ContextWindow = 256000 MaxOutput = 4096 TrainingCutoff = '02-01-2024' PayloadLimit = '' InputTokenCost = 0.0005 OutputTokenCost = 0.0007 } [PSCustomObject]@{ ProviderName = 'AI21 Labs' ModelName = 'J2 Grande Instruct' ModelId = 'ai21.j2-grande-instruct' Description = 'Designed specifically for generating text based on minimal context. Highly accurate, and can be fine-tuned to power smart chatbot and other conversational interfaces.' Strength = 'Designed to meticulously follow instructions. Trained specifically to handle instructions-only prompts ("zero-shot") without examples ("few-shot"). It is the most natural way to interact with large language models, and it is the best way to get a sense of the optimal output for your task without any examples.' Multilingual = $true Text = $true Document = $false Vision = $false SystemPrompt = $false ToolUse = $false ResponseStreamingSupported = $false ChatHistorySupported = $false ContextWindow = 8192 MaxOutput = 8191 TrainingCutoff = '03-01-2023' PayloadLimit = '' InputTokenCost = 0.0188 #! this pricing was not available in the documentation. keeping the same as ultra pricing. OutputTokenCost = 0.0188 #! this pricing was not available in the documentation. keeping the same as ultra pricing. } [PSCustomObject]@{ ProviderName = 'AI21 Labs' ModelName = 'J2 Jumbo Instruct' ModelId = 'ai21.j2-jumbo-instruct' Description = 'Similar to Grande-Instruct, but with superior language understanding and response generation capabilities. Ideal for users with more advanced conversational interface needs.' Strength = 'Designed to meticulously follow instructions. Trained specifically to handle instructions-only prompts ("zero-shot") without examples ("few-shot"). It is the most natural way to interact with large language models, and it is the best way to get a sense of the optimal output for your task without any examples.' Multilingual = $true Text = $true Document = $false Vision = $false SystemPrompt = $false ToolUse = $false ResponseStreamingSupported = $false ChatHistorySupported = $false ContextWindow = 8192 MaxOutput = 8191 TrainingCutoff = '03-01-2023' PayloadLimit = '' InputTokenCost = 0.0188 #! this pricing was not available in the documentation. keeping the same as ultra pricing. OutputTokenCost = 0.0188 #! this pricing was not available in the documentation. keeping the same as ultra pricing. } [PSCustomObject]@{ ProviderName = 'AI21 Labs' ModelName = 'Jurassic-2 Mid' ModelId = 'ai21.j2-mid-v1' Description = 'This model offers enhanced text generation capabilities, making it well-suited to language tasks with a greater degree of complexity.' Strength = 'Text generation based on prompting, Instruction following, Sentiment analysis, Summarization, Text recommendation including diversifying vocabulary, grammatical error correction, text segmentation, question and answering.' Multilingual = $true Text = $true Document = $false Vision = $false SystemPrompt = $false ToolUse = $false ResponseStreamingSupported = $false ChatHistorySupported = $false ContextWindow = 8192 MaxOutput = 8191 TrainingCutoff = '03-01-2023' PayloadLimit = '' InputTokenCost = 0.0125 OutputTokenCost = 0.0125 } [PSCustomObject]@{ ProviderName = 'AI21 Labs' ModelName = 'Jurassic-2 Ultra' ModelId = 'ai21.j2-ultra-v1' Description = 'As the largest and most powerful model in the Jurassic series, J2-Ultra is an ideal choice for the most complex language processing tasks and generative text applications.' Strength = 'Text generation based on prompting, Instruction following, Sentiment analysis, Summarization, Text recommendation including diversifying vocabulary, grammatical error correction, text segmentation, question and answering.' Multilingual = $true Text = $true Document = $false Vision = $false SystemPrompt = $false ToolUse = $false ResponseStreamingSupported = $false ChatHistorySupported = $false ContextWindow = 8192 MaxOutput = 8191 TrainingCutoff = '03-01-2023' PayloadLimit = '' InputTokenCost = 0.0188 OutputTokenCost = 0.0188 } ) #ai21ModelInfo #endregion #region Cohere # https://docs.cohere.com/docs/the-cohere-platform # https://docs.cohere.com/docs/models # https://docs.cohere.com/docs/command-r-plus # https://docs.cohere.com/docs/command-r # https://docs.cohere.com/docs/command-beta $script:cohereModelInfo = @( [PSCustomObject]@{ ProviderName = 'Cohere' ModelName = 'Command' ModelId = 'cohere.command-text-v14' Description = 'An instruction-following conversational model that performs language tasks with high quality, more reliably and with a longer context than our base generative models.' Strength = 'chat, summarize' Multilingual = $false Text = $true Document = $true Vision = $false SystemPrompt = $false ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $false ContextWindow = 4000 MaxOutput = 4000 TrainingCutoff = 'UNKNOWN' # ! Could not find this information in the documentation PayloadLimit = '' InputTokenCost = 0.0015 OutputTokenCost = 0.0020 } [PSCustomObject]@{ ProviderName = 'Cohere' ModelName = 'Command Light' ModelId = 'cohere.command-light-text-v14' Description = 'A smaller, faster version of command. Almost as capable, but a lot faster.' Strength = 'chat, summarize' Multilingual = $false Text = $true Document = $false Vision = $false SystemPrompt = $false ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $false ContextWindow = 4000 MaxOutput = 4000 TrainingCutoff = 'UNKNOWN' # ! Could not find this information in the documentation PayloadLimit = '' InputTokenCost = 0.0003 OutputTokenCost = 0.0006 } [PSCustomObject]@{ ProviderName = 'Cohere' ModelName = 'Command R' ModelId = 'cohere.command-r-v1:0' Description = 'Command R is an instruction-following conversational model that performs language tasks at a higher quality, more reliably, and with a longer context than previous models.' Strength = 'chat, complex workflows like code generation, retrieval augmented generation (RAG), tool use, and agents.' Multilingual = $true Text = $true Document = $true Vision = $false SystemPrompt = $true ToolUse = $true ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 128000 MaxOutput = 4000 TrainingCutoff = '04-01-2024' PayloadLimit = '' InputTokenCost = 0.0005 OutputTokenCost = 0.0015 } [PSCustomObject]@{ ProviderName = 'Cohere' ModelName = 'Command R+' ModelId = 'cohere.command-r-plus-v1:0' Description = 'Command R+ is an instruction-following conversational model that performs language tasks at a higher quality, more reliably, and with a longer context than previous models.' Strength = 'chat, best suited for complex RAG workflows and multi-step tool use.' Multilingual = $true Text = $true Document = $true Vision = $false SystemPrompt = $true ToolUse = $true ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 128000 MaxOutput = 4000 TrainingCutoff = '04-01-2024' PayloadLimit = '' InputTokenCost = 0.0030 OutputTokenCost = 0.0150 } ) #cohereModelInfo #endregion #region Meta # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-meta.html # https://huggingface.co/blog/llama2#how-to-prompt-llama-2 # https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-2/ # https://github.com/meta-llama/llama/blob/main/MODEL_CARD.md # https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-3/ # https://github.com/meta-llama/llama3/blob/main/MODEL_CARD.md # https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1 # https://github.com/meta-llama/llama-models/blob/main/models/llama3_1/MODEL_CARD.md $script:metaModelInfo = @( [PSCustomObject]@{ ProviderName = 'Meta' ModelName = 'Llama 2 Chat 13B' ModelId = 'meta.llama2-13b-chat-v1' Description = 'Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.' Strength = 'Tuned models are intended for assistant-like chat' Multilingual = $false Text = $true Document = $true Vision = $false SystemPrompt = $true ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 4000 MaxOutput = 2048 TrainingCutoff = '07-01-2023' PayloadLimit = '' InputTokenCost = 0.00075 OutputTokenCost = 0.001 } [PSCustomObject]@{ ProviderName = 'Meta' ModelName = 'Llama 2 Chat 70B' ModelId = 'meta.llama2-70b-chat-v1' Description = 'Our fine-tuned LLMs, called Llama-2-Chat, are optimized for dialogue use cases. Llama-2-Chat models outperform open-source chat models on most benchmarks we tested, and in our human evaluations for helpfulness and safety, are on par with some popular closed-source models like ChatGPT and PaLM.' Strength = 'Tuned models are intended for assistant-like chat' Multilingual = $false Text = $true Document = $true Vision = $false SystemPrompt = $true ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 4000 MaxOutput = 2048 TrainingCutoff = '07-01-2023' PayloadLimit = '' InputTokenCost = 0.00195 OutputTokenCost = 0.00256 } [PSCustomObject]@{ ProviderName = 'Meta' ModelName = 'Llama 3 8B Instruct' ModelId = 'meta.llama3-8b-instruct-v1:0' Description = 'The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.' Strength = 'Instruction tuned models are intended for assistant-like chat' Multilingual = $false Text = $true Document = $true Vision = $false SystemPrompt = $true ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 8000 MaxOutput = 2048 TrainingCutoff = '03-01-2023' PayloadLimit = '' InputTokenCost = 0.0004 OutputTokenCost = 0.0006 } [PSCustomObject]@{ ProviderName = 'Meta' ModelName = 'Llama 3 70B Instruct' ModelId = 'meta.llama3-70b-instruct-v1:0' Description = 'The Llama 3 instruction tuned models are optimized for dialogue use cases and outperform many of the available open source chat models on common industry benchmarks.' Strength = 'Instruction tuned models are intended for assistant-like chat' Multilingual = $false Text = $true Document = $true Vision = $false SystemPrompt = $true ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 8000 MaxOutput = 2048 TrainingCutoff = '12-01-2023' PayloadLimit = '' InputTokenCost = 0.00265 OutputTokenCost = 0.0035 } [PSCustomObject]@{ ProviderName = 'Meta' ModelName = 'Llama 3.1 8B Instruct' ModelId = 'meta.llama3-1-8b-instruct-v1:0' Description = 'Light-weight, ultra-fast model. Instruction tuned text only models are intended for assistant-like chat.' Strength = 'best suited for limited computational power and resources. The model excels at text summarization, text classification, sentiment analysis, and language translation requiring low-latency inferencing.' Multilingual = $false Text = $true Document = $true Vision = $false SystemPrompt = $true ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 128000 MaxOutput = 2048 TrainingCutoff = '12-01-2023' PayloadLimit = '' InputTokenCost = 0.00265 OutputTokenCost = 0.0035 } [PSCustomObject]@{ ProviderName = 'Meta' ModelName = 'Llama 3.1 70B Instruct' ModelId = 'meta.llama3-1-70b-instruct-v1:0' Description = 'Highly performant, cost effective model that enables diverse use cases. Instruction tuned text only models are intended for assistant-like chat.' Strength = 'ideal for content creation, conversational AI, language understanding, R&D, and enterprise applications. The model excels at text summarization and accuracy, text classification, sentiment analysis and nuance reasoning, language modeling, dialogue systems, code generation, and following instructions.' Multilingual = $false Text = $true Document = $true Vision = $false SystemPrompt = $true ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 128000 MaxOutput = 2048 TrainingCutoff = '12-01-2023' PayloadLimit = '' InputTokenCost = 0.00265 OutputTokenCost = 0.0035 } ) #metaModelInfo #endregion #region Mistral AI # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-mistral-text-completion.html # https://docs.mistral.ai/getting-started/models/ $script:mistralAIModelInfo = @( [PSCustomObject]@{ ProviderName = 'Mistral AI' ModelName = 'Mistral 7B Instruct' ModelId = 'mistral.mistral-7b-instruct-v0:2' Description = 'The first dense model released by Mistral AI, perfect for experimentation, customization, and quick iteration' Strength = 'interpret and act on detailed instruction' Multilingual = $false Text = $true Document = $true Vision = $false SystemPrompt = $false ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 32000 MaxOutput = 8192 TrainingCutoff = 'UNKNOWN' # ! Could not find this information in the documentation PayloadLimit = '' InputTokenCost = 0.00015 OutputTokenCost = 0.0002 } [PSCustomObject]@{ ProviderName = 'Mistral AI' ModelName = 'Mixtral 8X7B Instruct' ModelId = 'mistral.mixtral-8x7b-instruct-v0:1' Description = 'A sparse mixture of experts model. As such, it leverages up to 45B parameters but only uses about 12B during inference, leading to better inference throughput at the cost of more vRAM.' Strength = 'Data extraction, Summarizing a Document, Writing emails, Writing a Job Description, or Writing Product Description' Multilingual = $true Text = $true Document = $true Vision = $false SystemPrompt = $false ToolUse = $false ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 32000 MaxOutput = 4096 TrainingCutoff = 'UNKNOWN' # ! Could not find this information in the documentation PayloadLimit = '' InputTokenCost = 0.00045 OutputTokenCost = 0.0007 } [PSCustomObject]@{ ProviderName = 'Mistral AI' ModelName = 'Mistral Large' ModelId = 'mistral.mistral-large-2402-v1:0' Description = "Our flagship model that's ideal for complex tasks that require large reasoning capabilities or are highly specialized." Strength = 'Synthetic Text Generation, Code Generation, RAG, or Agents' Multilingual = $true Text = $true Document = $true Vision = $false SystemPrompt = $true ToolUse = $true ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 32000 MaxOutput = 8192 TrainingCutoff = 'UNKNOWN' # ! Could not find this information in the documentation PayloadLimit = '' InputTokenCost = 0.004 OutputTokenCost = 0.012 } [PSCustomObject]@{ ProviderName = 'Mistral AI' ModelName = 'Mistral Small' ModelId = 'mistral.mistral-small-2402-v1:0' Description = 'Suitable for simple tasks that one can do in bulk.' Strength = 'Classification, Customer Support, or Text Generation' Multilingual = $true Text = $true Document = $false Vision = $false SystemPrompt = $true ToolUse = $true ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 32000 MaxOutput = 8192 TrainingCutoff = 'UNKNOWN' # ! Could not find this information in the documentation PayloadLimit = '' InputTokenCost = 0.001 OutputTokenCost = 0.003 } [PSCustomObject]@{ ProviderName = 'Mistral AI' ModelName = 'Mistral Large (2407)' ModelId = 'mistral.mistral-large-2407-v1:0' Description = 'The latest version of Mistral AI flagship large language model, with significant improvements on multilingual accuracy, conversational behavior, coding capabilities, reasoning and instruction-following behavior.' Strength = 'multilingual translation, text summarization, complex multilingual reasoning tasks, math and coding tasks including code generation' Multilingual = $true Text = $true Document = $false Vision = $false SystemPrompt = $true ToolUse = $true ResponseStreamingSupported = $true ChatHistorySupported = $true ContextWindow = 128000 MaxOutput = 8192 TrainingCutoff = 'UNKNOWN' # ! Could not find this information in the documentation PayloadLimit = '' InputTokenCost = 0.001 OutputTokenCost = 0.003 } ) #mistralModelInfo #endregion #region Stability AI $script:stabilityAIModelInfo = @( [PSCustomObject]@{ ProviderName = 'Stability AI' ModelName = 'Stable Diffusion XL' ModelId = 'stability.stable-diffusion-xl-v1' Model = '' Description = 'Stable Diffusion XL generates images of high quality in virtually any art style and is the best open model for photorealism.' Strength = 'Develop unlimited creative assets and ideate with images.' Multilingual = $false Text = $false Document = $false Vision = $true SystemPrompt = $false ToolUse = $false ResponseStreamingSupported = $false ChatHistorySupported = $false ContextWindow = '' MaxOutput = '' TrainingCutoff = '' PayloadLimit = '' #! Couldn't find in documentation ImageCost = @{ Over50Steps = 0.08 Under50Steps = 0.04 } # InputTokenCost = 0.01 # OutputTokenCost = 0.012 # pricing structure is different for image models } ) #ai21ModelInfo #endregion #endregion <# .SYNOPSIS Updates the cost estimate for a model based on the usage. .DESCRIPTION This function updates the global variables that tally the cost of models used during the session. It calculates the cost based on token usage and adds it to the global session total. .EXAMPLE Add-ModelCostEstimate -Usage $usage -ModelID 'anthropic.claude-v2:1' Adds the cost estimate for the model 'anthropic.claude-v2:1' to the global tally variables. .PARAMETER Usage Token usage object returned by the API. .PARAMETER Message The message that was sent to the model. .PARAMETER ImageCount Image count returned by the API. .PARAMETER Steps Number of steps to run the image model for. .PARAMETER ModelID The unique identifier of the model. .OUTPUTS None .NOTES Tally estimates are approximations. The actual cost may vary. * Note: Image models pass their image count and steps to the cost estimate function. .COMPONENT pwshBedrock #> function Add-ModelCostEstimate { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'Token usage object returned by the API.', ParameterSetName = 'Token')] [ValidateNotNullOrEmpty()] [object]$Usage, [Parameter(Mandatory = $false, HelpMessage = 'The message that was sent to the model.', ParameterSetName = 'Token')] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $true, HelpMessage = 'Image count returned by the API.', ParameterSetName = 'Image')] [ValidateNotNullOrEmpty()] [int]$ImageCount, [Parameter(Mandatory = $false, HelpMessage = 'Number of steps to run the image model for.', ParameterSetName = 'Image')] [int]$Steps, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'ai21.j2-grande-instruct', 'ai21.j2-jumbo-instruct', 'ai21.jamba-instruct-v1:0', 'ai21.j2-mid-v1', 'ai21.j2-ultra-v1', 'amazon.titan-image-generator-v1', 'amazon.titan-text-express-v1', 'amazon.titan-text-lite-v1', 'amazon.titan-text-premier-v1:0', 'amazon.titan-tg1-large', 'anthropic.claude-v2:1', 'anthropic.claude-3-haiku-20240307-v1:0', 'anthropic.claude-3-opus-20240229-v1:0', 'anthropic.claude-3-sonnet-20240229-v1:0', 'anthropic.claude-3-5-sonnet-20240620-v1:0', 'cohere.command-text-v14', 'cohere.command-light-text-v14', 'cohere.command-r-v1:0', 'cohere.command-r-plus-v1:0', 'meta.llama2-13b-chat-v1', 'meta.llama2-70b-chat-v1', 'meta.llama3-70b-instruct-v1:0', 'meta.llama3-8b-instruct-v1:0', 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', 'mistral.mistral-small-2402-v1:0', 'mistral.mixtral-8x7b-instruct-v0:1', 'stability.stable-diffusion-xl-v1' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Indicates that model was called through the Converse API.', ParameterSetName = 'Token' )] [switch]$Converse ) $modelTally = $Global:pwshBedRockSessionModelTally | Where-Object { $_.ModelID -eq $ModelID } switch ($PSCmdlet.ParameterSetName) { Token { if ($Converse) { $inputTokenCount = $Usage.InputTokens $outputTokenCount = $Usage.OutputTokens } #if_converse else { switch ($ModelID) { 'ai21.j2-grande-instruct' { $inputTokenCount = $Usage.prompt.tokens[-1].textRange.end $outputTokenCount = $Usage.completions[-1].data.tokens[-1].textRange.end } 'ai21.j2-jumbo-instruct' { $inputTokenCount = $Usage.prompt.tokens[-1].textRange.end $outputTokenCount = $Usage.completions[-1].data.tokens[-1].textRange.end } 'ai21.jamba-instruct-v1:0' { $inputTokenCount = $Usage.prompt_tokens $outputTokenCount = $Usage.completion_tokens } 'ai21.j2-mid-v1' { $inputTokenCount = $Usage.prompt.tokens[-1].textRange.end $outputTokenCount = $Usage.completions[-1].data.tokens[-1].textRange.end } 'ai21.j2-ultra-v1' { $inputTokenCount = $Usage.prompt.tokens[-1].textRange.end $outputTokenCount = $Usage.completions[-1].data.tokens[-1].textRange.end } 'amazon.titan-text-express-v1' { $inputTokenCount = $Usage.'inputTextTokenCount' $outputTokenCount = $Usage.results.tokenCount } 'amazon.titan-text-lite-v1' { $inputTokenCount = $Usage.'inputTextTokenCount' $outputTokenCount = $Usage.results.tokenCount } 'amazon.titan-text-premier-v1:0' { $inputTokenCount = $Usage.'inputTextTokenCount' $outputTokenCount = $Usage.results.tokenCount } 'amazon.titan-tg1-large' { $inputTokenCount = $Usage.'inputTextTokenCount' $outputTokenCount = $Usage.results.tokenCount } 'anthropic.claude-v2:1' { $inputTokenCount = $Usage.'input_tokens' $outputTokenCount = $Usage.'output_tokens' } 'anthropic.claude-3-haiku-20240307-v1:0' { $inputTokenCount = $Usage.'input_tokens' $outputTokenCount = $Usage.'output_tokens' } 'anthropic.claude-3-opus-20240229-v1:0' { $inputTokenCount = $Usage.'input_tokens' $outputTokenCount = $Usage.'output_tokens' } 'anthropic.claude-3-sonnet-20240229-v1:0' { $inputTokenCount = $Usage.'input_tokens' $outputTokenCount = $Usage.'output_tokens' } 'anthropic.claude-3-5-sonnet-20240620-v1:0' { $inputTokenCount = $Usage.'input_tokens' $outputTokenCount = $Usage.'output_tokens' } 'cohere.command-text-v14' { # this model does not return token counts, but does return the prompt and completion text # so, we can calculate the token counts based on the text length $inputTokenCount = Get-TokenCountEstimate -Text $Usage.prompt # because this model supports multiple generations, we need to sum the token counts foreach ($textGeneration in $Usage.generations.text) { $outputTokenCount += Get-TokenCountEstimate -Text $textGeneration } } 'cohere.command-light-text-v14' { # this model does not return token counts, but does return the prompt and completion text # so, we can calculate the token counts based on the text length $inputTokenCount = Get-TokenCountEstimate -Text $Usage.prompt foreach ($textGeneration in $Usage.generations.text) { $outputTokenCount += Get-TokenCountEstimate -Text $textGeneration } } 'cohere.command-r-v1:0' { $inputTokenCount = Get-TokenCountEstimate -Text $Message $outputTokenCount = Get-TokenCountEstimate -Text $Usage.text } 'cohere.command-r-plus-v1:0' { $inputTokenCount = Get-TokenCountEstimate -Text $Message $outputTokenCount = Get-TokenCountEstimate -Text $Usage.text } 'meta.llama2-13b-chat-v1' { $inputTokenCount = $Usage.prompt_token_count $outputTokenCount = $Usage.generation_token_count } 'meta.llama2-70b-chat-v1' { $inputTokenCount = $Usage.prompt_token_count $outputTokenCount = $Usage.generation_token_count } 'meta.llama3-70b-instruct-v1:0' { $inputTokenCount = $Usage.prompt_token_count $outputTokenCount = $Usage.generation_token_count } 'meta.llama3-8b-instruct-v1:0' { $inputTokenCount = $Usage.prompt_token_count $outputTokenCount = $Usage.generation_token_count } 'meta.llama3-1-8b-instruct-v1:0' { $inputTokenCount = $Usage.prompt_token_count $outputTokenCount = $Usage.generation_token_count } 'meta.llama3-1-70b-instruct-v1:0' { $inputTokenCount = $Usage.prompt_token_count $outputTokenCount = $Usage.generation_token_count } 'mistral.mistral-7b-instruct-v0:2' { $inputTokenCount = Get-TokenCountEstimate -Text $Message $outputTokenCount = Get-TokenCountEstimate -Text $Usage.outputs.text } 'mistral.mistral-large-2402-v1:0' { # this model can return different results depending on the calling API used if ($Usage.choices.message.role -is [string]) { $inputTokenCount = Get-TokenCountEstimate -Text $Message if ($Usage.choices.stop_reason -eq 'tool_calls') { $outputTokenCount = Get-TokenCountEstimate -Text $Usage.choices.message.tool_calls.function.arguments } else { $outputTokenCount = Get-TokenCountEstimate -Text $Usage.choices.message.content } } else { $inputTokenCount = Get-TokenCountEstimate -Text $Message $outputTokenCount = Get-TokenCountEstimate -Text $Usage.outputs.text } } 'mistral.mistral-large-2407-v1:0' { # this model can return different results depending on the calling API used if ($Usage.choices.message.role -is [string]) { $inputTokenCount = Get-TokenCountEstimate -Text $Message if ($Usage.choices.stop_reason -eq 'tool_calls') { $outputTokenCount = Get-TokenCountEstimate -Text $Usage.choices.message.tool_calls.function.arguments } else { $outputTokenCount = Get-TokenCountEstimate -Text $Usage.choices.message.content } } else { $inputTokenCount = Get-TokenCountEstimate -Text $Message $outputTokenCount = Get-TokenCountEstimate -Text $Usage.outputs.text } } 'mistral.mistral-small-2402-v1:0' { $inputTokenCount = Get-TokenCountEstimate -Text $Message $outputTokenCount = Get-TokenCountEstimate -Text $Usage.outputs.text } 'mistral.mixtral-8x7b-instruct-v0:1' { $inputTokenCount = Get-TokenCountEstimate -Text $Message $outputTokenCount = Get-TokenCountEstimate -Text $Usage.outputs.text } } } #else_converse if ($null -eq $Steps -or $Steps -eq 0) { $Steps = 1 } Write-Verbose -Message ('Adding cost estimates for model {0}' -f $ModelID) $costInfo = Get-ModelCostEstimate -InputTokenCount $inputTokenCount -OutputTokenCount $outputTokenCount -ModelID $ModelID Write-Debug -Message ($costInfo | Out-String) $Global:pwshBedRockSessionCostEstimate += $costInfo.Total $modelTally.TotalCost += $costInfo.Total $modelTally.InputTokenCount += $inputTokenCount $modelTally.OutputTokenCount += $outputTokenCount $modelTally.InputTokenCost += $costInfo.InputCost $modelTally.OutputTokenCost += $costInfo.OutputCost } #token Image { $costInfo = Get-ModelCostEstimate -ImageCount $ImageCount -Steps $StepsCount -ModelID $ModelID Write-Debug -Message ($costInfo | Out-String) $Global:pwshBedRockSessionCostEstimate += $costInfo.ImageCost $modelTally.ImageCount += $ImageCount $modelTally.ImageCost += $costInfo.ImageCost } #image } #switch_parameterSetName } #Add-ModelCostEstimate <# .SYNOPSIS Converts a base64 string to bytes. .DESCRIPTION This function converts a base64 string to bytes using the System.IO.File namespace. It reads the base64 string and converts it to bytes. .EXAMPLE Convert-FromBase64ToByte -Base64String $base64 Converts the base64 string to bytes. .PARAMETER Base64String Base64 string to convert to a media file. .OUTPUTS System.Byte .NOTES This function is a wrapper around the System.IO.File namespace, which is not mockable in tests. .COMPONENT pwshBedrock #> function Convert-FromBase64ToByte { [CmdletBinding()] [OutputType([string])] param ( [Parameter(Mandatory = $false, HelpMessage = 'Base64 string to convert to a media file.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$Base64String ) Write-Verbose -Message 'Converting from base64' try { $bytes = [Convert]::FromBase64String($Base64String) } catch { Write-Warning -Message 'Failed to convert from base64' throw } return $bytes } #Convert-FromBase64ToByte <# .SYNOPSIS Converts a media file to a base64 string. .DESCRIPTION This function converts a specified media file to a base64 string using the System.IO.File namespace. It reads the file bytes and encodes them in base64 format. .EXAMPLE Convert-MediaToBase64 -MediaPath 'C:\path\to\image.jpg' Converts the image located at 'C:\path\to\image.jpg' to a base64 string. .PARAMETER MediaPath File path to local media file. .OUTPUTS System.String .NOTES This function is a wrapper around the System.IO.File namespace, which is not mockable in tests. .COMPONENT pwshBedrock #> function Convert-MediaToBase64 { [CmdletBinding()] [OutputType([string])] param ( [Parameter(Mandatory = $false, HelpMessage = 'File path to local media file.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$MediaPath ) Write-Verbose -Message ('{0} Converting to base64' -f $MediaPath) try { $base64 = [Convert]::ToBase64String([System.IO.File]::ReadAllBytes($MediaPath)) } catch { Write-Warning -Message ('Failed to convert {0} to base64' -f $MediaPath) throw } return $base64 } #Convert-MediaToBase64 <# .SYNOPSIS Converts a media file to a MemoryStream. .DESCRIPTION Reads the bytes of a media file and converts them to a MemoryStream. .EXAMPLE Convert-MediaToMemoryStream -MediaPath 'C:\path\to\image.jpg' This example reads the bytes of the image.jpg file and converts them to a MemoryStream. .PARAMETER MediaPath File path to local media file. .OUTPUTS System.String .NOTES This function is a wrapper around the System.IO.File namespace, which is not mockable in tests. .COMPONENT pwshBedrock #> function Convert-MediaToMemoryStream { [CmdletBinding()] [OutputType([string])] param ( [Parameter(Mandatory = $false, HelpMessage = 'File path to local media file.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$MediaPath ) Write-Verbose -Message ('Reading Bytes for {0}' -f $MediaPath) try { $fileBytes = [System.IO.File]::ReadAllBytes($MediaPath) } catch { Write-Warning -Message ('Failed to get Bytes for {0}' -f $MediaPath) throw } if ($fileBytes) { Write-Debug -Message ('Converting Bytes to MemoryStream for {0}' -f $MediaPath) $memoryStream = [System.IO.MemoryStream]::new() $memoryStream.Write($fileBytes, 0, $fileBytes.Length) } else { Write-Warning -Message ('No file bytes were returned for {0}' -f $MediaPath) throw } return $memoryStream } #Convert-MediaToMemoryStream <# .SYNOPSIS Formats a message to be sent to a AI21 Labs Jamba Model. .DESCRIPTION This function formats a message to be sent to a AI21 Labs Jamba Model. .EXAMPLE Format-AI21LabsJambaModel -Role 'User' -Message 'Hello, how are you?' -ModelID 'ai21.jamba-instruct-v1:0' This example formats a message to be sent to the AI21 Labs Jamba Model 'ai21.jamba-instruct-v1:0'. .PARAMETER Role The role of the message sender. .PARAMETER Message The message to be sent to the model. .PARAMETER ModelID The unique identifier of the model. .PARAMETER NoContextPersist Do not persist the conversation context history. If this parameter is specified, you will not be able to have a continuous conversation with the model. .OUTPUTS System.Management.Automation.PSCustomObject .NOTES The model requires a specific format for the message. This function formats the message accordingly. This model uses object based updates to the context instead of a single string. .COMPONENT pwshBedrock #> function Format-AI21LabsJambaModel { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'The role of the message sender.')] [ValidateSet('user', 'assistant', 'system')] [string]$Role, [Parameter(Mandatory = $false, HelpMessage = 'The message to be sent to the model.')] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'ai21.jamba-instruct-v1:0' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [bool]$NoContextPersist = $false ) Write-Verbose -Message 'Formatting AI 21 Labs Jamba Message' $contextEval = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } if ($contextEval.Context -eq '' -or $null -eq $contextEval.Context -or $contextEval.Context.Count -eq 0) { Write-Debug -Message 'No context found. First message.' $firstMessage = $true } else { $firstMessage = $false } switch ($Role) { 'system' { if ($firstMessage -eq $true) { $obj = [PSCustomObject]@{ role = 'system' content = $Message } } else { # we need to determine if the context already has a system message # if it does, we need to replace it with the new system message # if it does not, we need to add the new system message $obj = $contextEval.Context | Where-Object { $_.role -eq 'system' } if ($null -eq $obj) { $obj = [PSCustomObject]@{ role = 'system' content = $Message } } else { $obj.content = $Message return } } } 'user' { $obj = [PSCustomObject]@{ role = 'user' content = $Message } } 'assistant' { $obj = [PSCustomObject]@{ role = 'assistant' content = $Message } } } #switch_role Write-Debug -Message ('Formatted message: {0}' -f ($obj | Out-String) ) if ($NoContextPersist -eq $false) { $contextObj = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } $contextObj.Context.Add($obj) $returnContext = $contextObj.Context } else { $returnContext = $obj } Write-Debug 'out of Format-AI21LabsJambaModel' return $returnContext } #Format-AI21LabsJambaModel <# .SYNOPSIS Formats a message to be sent to an Amazon Titan model. .DESCRIPTION This function formats a message to be sent to an Amazon Titan model. .EXAMPLE Format-AmazonTextMessage -Role 'User' -Message 'Hello, how are you?' -ModelID 'amazon.titan-tg1-large' Formats a text message to be sent to the Amazon Titan model. .EXAMPLE Format-AmazonTextMessage -Role 'User' -Message 'Hello, how are you?' -ModelID 'amazon.titan-tg1-large' Formats a text message to be sent to the Amazon Titan model without persisting the conversation context history. .PARAMETER Role The role of the message sender. Valid values are 'user' or 'assistant'. .PARAMETER Message The message to be sent to the model. .PARAMETER ModelID The unique identifier of the model. .PARAMETER NoContextPersist Do not persist the conversation context history. If this parameter is specified, you will not be able to have a continuous conversation with the model. .OUTPUTS System.Management.Automation.PSCustomObject .NOTES The model requires a specific format for the message. This function formats the message accordingly. .COMPONENT pwshBedrock #> function Format-AmazonTextMessage { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'The role of the message sender.')] [ValidateSet('User', 'Bot')] [string]$Role, [Parameter(Mandatory = $true, HelpMessage = 'The message to be sent to the model.')] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'amazon.titan-text-lite-v1', 'amazon.titan-text-express-v1', 'amazon.titan-tg1-large', 'amazon.titan-text-premier-v1:0' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [bool]$NoContextPersist = $false ) Write-Verbose -Message 'Formatting Amazon Titan Message' if ($Role -eq 'User') { $str = "User: $Message`n" } elseif ($Role -eq 'Bot') { $str = "$Message`n" } Write-Debug -Message ('Formatted message: {0}' -f $str) if ($NoContextPersist -eq $false) { $contextObj = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } $contextObj.Context += $str $returnContext = $contextObj.Context } else { $returnContext = $str } Write-Debug 'out of Format-AmazonTextMessage' return $returnContext } #Format-AmazonTextMessage <# .SYNOPSIS Formats a message to be sent to the Anthropic model. .DESCRIPTION This function formats a message to be sent to the Anthropic model. The message can be either text or a media file. If a media file is specified, it is converted to base64. The function can also persist the conversation context history, unless the NoContextPersist parameter is specified. .EXAMPLE Format-AnthropicMessage -Role 'user' -Message 'Hello, how are you?' -ModelID 'anthropic.claude-v2:1' Formats a text message to be sent to the Anthropic model. .EXAMPLE Format-AnthropicMessage -Role 'user' -Message 'Hello, how are you?' -MediaPath 'C:\path\to\media.jpg' -ModelID 'anthropic.claude-v2:1' Formats a media message to be sent to the Anthropic model by converting the media file to base64. .EXAMPLE Format-AnthropicMessage -Role 'user' -Message 'Hello, how are you?' -ModelID 'anthropic.claude-v2:1' -NoContextPersist Formats a text message to be sent to the Anthropic model without persisting the conversation context history. .EXAMPLE $standardToolsResult = [PSCustomObject]@{ tool_use_id = 'id123' content = 'Elemental Hotel' } $formatAnthropicMessageSplat = @{ Role = 'user' ToolsResults = $standardToolsResult ModelID = $_ } Format-AnthropicMessage @formatAnthropicMessageSplat Formats a message with tools results to be sent to the Anthropic model. .EXAMPLE $standardToolsCall = [PSCustomObject]@{ type = 'tool_use' id = 'id123' name = 'top_song' input = [PSCustomObject]@{ sign = 'WZPZ' } } $formatAnthropicMessageSplat = @{ Role = 'assistant' ToolCall = $standardToolsCall ModelID = $_ } Format-AnthropicMessage @formatAnthropicMessageSplat Formats a message with a tool call to be sent to the Anthropic model. .PARAMETER Role The role of the message sender. Valid values are 'user' or 'assistant'. .PARAMETER Message The message to be sent to the model. .PARAMETER MediaPath File path to local media file. .PARAMETER ToolsResults A list of results from invoking tools recommended by the model in the previous chat turn. .PARAMETER ToolCall The tool call suggested to be used by the model. .PARAMETER ModelID The unique identifier of the model. .PARAMETER NoContextPersist Do not persist the conversation context history. If this parameter is specified, you will not be able to have a continuous conversation with the model. .OUTPUTS System.Management.Automation.PSCustomObject .NOTES The model requires a specific format for the message. This function formats the message accordingly. .COMPONENT pwshBedrock #> function Format-AnthropicMessage { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'The role of the message sender.')] [ValidateSet('user', 'assistant')] [string]$Role, [Parameter(Mandatory = $false, HelpMessage = 'The message to be sent to the model.', ParameterSetName = 'Standard')] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $false, HelpMessage = 'File path to local media file.', ParameterSetName = 'Standard')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string[]]$MediaPath, [Parameter(Mandatory = $true, HelpMessage = 'A list of results from invoking tools recommended by the model in the previous chat turn.', ParameterSetName = 'Result')] [ValidateNotNull()] [PSCustomObject]$ToolsResults, [Parameter(Mandatory = $true, HelpMessage = 'The tool call suggested to be used by the model.', ParameterSetName = 'Call')] [PSCustomObject]$ToolCall, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'anthropic.claude-v2:1', 'anthropic.claude-3-haiku-20240307-v1:0', 'anthropic.claude-3-sonnet-20240229-v1:0', 'anthropic.claude-3-5-sonnet-20240620-v1:0', 'anthropic.claude-3-opus-20240229-v1:0' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [bool]$NoContextPersist = $false ) Write-Verbose -Message 'Formatting Anthropic Message' if ($MediaPath) { Write-Verbose -Message 'Formatting vision message' $obj = [PSCustomObject]@{ role = $Role content = @() } foreach ($media in $MediaPath) { #____________________ # resets $base64 = $null $mediaFileInfo = $null $extension = $null #____________________ Write-Verbose -Message ('Converting media to base64: {0}' -f $media) try { $base64 = Convert-MediaToBase64 -MediaPath $media } catch { throw 'Unable to format Anthropic message. Failed to convert media to base64.' } Write-Verbose -Message ('Getting file info for {0}' -f $media) try { $mediaFileInfo = Get-Item -Path $media -ErrorAction Stop } catch { throw 'Unable to format Anthropic message. Failed to get media file info.' } if ($mediaFileInfo) { $extension = $mediaFileInfo.Extension.TrimStart('.') # special case if ($extension -eq 'jpg') { $extension = 'jpeg' } Write-Debug -Message ('Media extension: {0}' -f $extension) } else { throw 'Unable to format Anthropic message. Media extension not found.' } $obj.content += [PSCustomObject]@{ type = 'image' source = [PSCustomObject]@{ type = 'base64' 'media_type' = 'image/{0}' -f $extension data = $base64 } } } #foreach_MediaPath if ($Message) { $obj.content += [PSCustomObject]@{ type = 'text' text = $Message } } } #if_MediaPath elseif ($Message) { Write-Verbose -Message 'Formatting standard message' $obj = [PSCustomObject]@{ role = $Role content = @( [PSCustomObject]@{ type = 'text' text = $Message } ) } } #elseif_Message elseif ($ToolCall) { Write-Verbose -Message 'Formatting tool call message' $obj = [PSCustomObject]@{ role = $Role content = $ToolCall } } #elseif_ToolCall elseif ($ToolsResults) { Write-Verbose -Message 'Formatting tool results message' $obj = [PSCustomObject]@{ role = $Role content = @() } foreach ($tool in $ToolsResults) { if ($tool.content -is [string]) { $obj.content += [PSCustomObject]@{ type = 'tool_result' tool_use_id = $tool.tool_use_id content = $tool.content } } else { # Initialize the content array for the second object $contentArray = @() # Iterate over the properties of the first object and construct the content array $tool.content.PSObject.Properties | ForEach-Object { $contentArray += [PSCustomObject]@{ type = 'text' text = "$($_.Name) = $($_.Value)" } } $obj.content += [PSCustomObject]@{ type = 'tool_result' tool_use_id = $tool.tool_use_id content = $contentArray } } } #foreach_ToolsResults Write-Debug -Message ($obj.content | Out-String) } #elseif_ToolsResults Write-Debug -Message ($obj | Out-String) if ($NoContextPersist -eq $false) { $contextObj = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } $contextObj.Context.Add($obj) $returnContext = $contextObj.Context } else { $returnContext = $obj } return $returnContext } #Format-AnthropicMessage <# .SYNOPSIS Formats a message to be sent to the Converse API. .DESCRIPTION This function formats a message to be sent to the Converse API. .EXAMPLE Format-ConverseAPI -Role 'User' -Message 'Hello, how are you?' -ModelID 'Converse' This example formats a message to be sent to the Converse API. .EXAMPLE $toolResult = [PSCustomObject]@{ ToolUseId = 'tooluse_ihA1_9blR3S1QJixGq5gwg' Content = [PSCustomObject]@{ restaurant = [PSCustomObject]@{ name = 'Gristmill River Restaurant & Bar' address = '1287 Gruene Rd, New Braunfels, TX 78130' rating = '4.5' cuisine = 'American' budget = '2' } } status = 'success' } $formatConverseAPISplat = @{ Role = 'user' ToolsResults = $toolResult ModelID = 'Converse' } $result = Format-ConverseAPI @formatConverseAPISplat .PARAMETER Role The role of the message sender. .PARAMETER Message The message to be sent to the model. .PARAMETER MediaPath File path to local media file. .PARAMETER DocumentPath File path to local document. .PARAMETER ToolsResults A list of results from invoking tools recommended by the model in the previous chat turn. .PARAMETER ToolCalls The tool calls that were returned by the model. .PARAMETER ModelID The unique identifier of the model. .PARAMETER NoContextPersist Do not persist the conversation context history. If this parameter is specified, you will not be able to have a continuous conversation with the model. .OUTPUTS System.Management.Automation.PSCustomObject .NOTES The model requires a specific format for the message. This function formats the message accordingly. This model uses object based updates to the context instead of a single string. .LINK https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/BedrockRuntime/NBedrockRuntimeModel.html .LINK https://docs.aws.amazon.com/sdkfornet/v3/apidocs/?page=TMessage.html&tocid=Amazon_BedrockRuntime_Model_Message .LINK https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/BedrockRuntime/TContentBlock.html .LINK https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/BedrockRuntime/TToolResultBlock.html .LINK https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/Runtime/TDocument.html .LINK https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/BedrockRuntime/TImageBlock.html .LINK https://docs.aws.amazon.com/sdkfornet/v3/apidocs/?page=TDocumentBlock.html&tocid=Amazon_BedrockRuntime_Model_DocumentBlock .COMPONENT pwshBedrock #> function Format-ConverseAPI { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'The role of the message sender.')] [ValidateSet('user', 'assistant')] [string]$Role, [Parameter(Mandatory = $false, HelpMessage = 'The message to be sent to the model.')] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $false, HelpMessage = 'File path to local media file.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string[]]$MediaPath, [Parameter(Mandatory = $false, HelpMessage = 'File path to local document.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string[]]$DocumentPath, [Parameter(Mandatory = $false, HelpMessage = 'The message construct returned by the model.')] [Amazon.BedrockRuntime.Model.Message]$ReturnMessage, [Parameter(Mandatory = $false, HelpMessage = 'A list of results from invoking tools recommended by the model in the previous chat turn.')] [ValidateNotNull()] [object]$ToolsResults, # [Parameter(Mandatory = $false, # HelpMessage = 'The tool calls that were returned by the model.')] # [ValidateNotNullOrEmpty()] # [object[]]$ToolCalls, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'Converse' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [bool]$NoContextPersist = $false ) Write-Verbose -Message 'Formatting Converse Message' switch ($Role) { 'user' { if ($ToolsResults) { $messageObj = [Amazon.BedrockRuntime.Model.Message]::new() $messageObj.Role = 'user' $messageContentBlock = [Amazon.BedrockRuntime.Model.ContentBlock]::new() $toolResultBlock = [Amazon.BedrockRuntime.Model.ToolResultBlock]::new() $toolResultBlock.Status = $ToolsResults.Status $toolResultBlock.ToolUseId = $ToolsResults.ToolUseId $toolResultContentBlock = [Amazon.BedrockRuntime.Model.ToolResultContentBlock]::new() if ($ToolsResults.Status -eq 'error') { $toolResultContentBlock.Text = $ToolsResults.Content } else { $toolResultContentBlock.Json = [Amazon.Runtime.Documents.Document]::FromObject($ToolsResults.Content) } $toolResultBlock.Content = $toolResultContentBlock $messageContentBlock.ToolResult = $toolResultBlock $messageObj.Content = $messageContentBlock } elseif ($MediaPath) { Write-Verbose -Message 'Formatting vision message' $messageObj = [Amazon.BedrockRuntime.Model.Message]::new() $messageObj.Role = 'user' foreach ($media in $MediaPath) { #____________________ # resets $memoryStream = $null $mediaFileInfo = $null $extension = $null $messageContentBlock = [Amazon.BedrockRuntime.Model.ContentBlock]::new() $imageBlock = $null $imageFormat = $null $imageSource = $null #____________________ Write-Verbose -Message 'Converting media to memory stream' try { $memoryStream = Convert-MediaToMemoryStream -MediaPath $media -ErrorAction Stop } catch { throw 'Unable to format Converse API vision message. Unable to convert media to memory stream.' } Write-Verbose -Message ('Getting file info for {0}' -f $media) try { $mediaFileInfo = Get-Item -Path $media -ErrorAction Stop } catch { throw 'Unable to format Converse API vision message. Failed to get media file info.' } Write-Verbose -Message ('Getting file extension for {0}' -f $media) if ($mediaFileInfo) { $extension = $mediaFileInfo.Extension.TrimStart('.') # special case if ($extension -eq 'jpg') { $extension = 'jpeg' } Write-Debug -Message ('Media extension: {0}' -f $extension) } else { throw 'Unable to format Converse API vision message. Media extension not found.' } $imageBlock = [Amazon.BedrockRuntime.Model.ImageBlock]::new() $imageFormat = [Amazon.BedrockRuntime.ImageFormat]::new($extension) $imageSource = [Amazon.BedrockRuntime.Model.ImageSource]::new() $imageSource.Bytes = $memoryStream $imageBlock.Format = $imageFormat $imageBlock.Source = $imageSource $messageContentBlock.Image = $imageBlock $messageObj.Content.Add($messageContentBlock) } #foreach_MediaPath if ($Message) { $messageContentBlock = [Amazon.BedrockRuntime.Model.ContentBlock]::new() $messageContentBlock.Text = $Message $messageObj.Content.Add($messageContentBlock) } } elseif ($DocumentPath) { Write-Verbose -Message 'Formatting document message' $messageObj = [Amazon.BedrockRuntime.Model.Message]::new() $messageObj.Role = 'user' foreach ($document in $DocumentPath) { #____________________ # resets $memoryStream = $null $documentFileInfo = $null $extension = $null $messageContentBlock = [Amazon.BedrockRuntime.Model.ContentBlock]::new() $imageBlock = $null $imageFormat = $null $imageSource = $null #____________________ Write-Verbose -Message 'Converting document to memory stream' try { $memoryStream = Convert-MediaToMemoryStream -MediaPath $document -ErrorAction Stop } catch { throw 'Unable to format Converse API document message. Unable to convert document to memory stream.' } Write-Verbose -Message ('Getting file info for {0}' -f $document) try { $documentFileInfo = Get-Item -Path $document -ErrorAction Stop } catch { throw 'Unable to format Converse API document message. Failed to get document file info.' } Write-Verbose -Message ('Getting file extension for {0}' -f $document) if ($documentFileInfo) { $extension = $documentFileInfo.Extension.TrimStart('.') # special case Write-Debug -Message ('Media extension: {0}' -f $extension) } else { throw 'Unable to format Converse API document message. Document extension not found.' } $documentBlock = [Amazon.BedrockRuntime.Model.DocumentBlock]::new() $documentFormat = [Amazon.BedrockRuntime.DocumentFormat]::new($extension) $documentSource = [Amazon.BedrockRuntime.Model.DocumentSource]::new() $documentSource.Bytes = $memoryStream $documentBlock.Format = $documentFormat $documentBlock.Name = $documentFileInfo.BaseName $documentBlock.Source = $documentSource $messageContentBlock.Document = $documentBlock $messageObj.Content.Add($messageContentBlock) } #foreach_MediaPath if ($Message) { $messageContentBlock = [Amazon.BedrockRuntime.Model.ContentBlock]::new() $messageContentBlock.Text = $Message $messageObj.Content.Add($messageContentBlock) } } else { $messageObj = [Amazon.BedrockRuntime.Model.Message]::new() $messageObj.Role = 'user' $content = [Amazon.BedrockRuntime.Model.ContentBlock]::new() $content.Text = $Message $messageObj.Content = $content } } 'assistant' { $messageObj = $ReturnMessage } } #switch_role Write-Debug -Message ('Formatted message: {0}' -f ($messageObj | Out-String) ) Write-Debug -Message ('Formatted message Content: {0}' -f ($messageObj.Content | Out-String) ) if ($NoContextPersist -eq $false) { $contextObj = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } $contextObj.Context.Add($messageObj) $returnContext = $contextObj.Context } else { $returnContext = $messageObj } Write-Debug 'out of Format-ConverseAPI' return $returnContext } #Format-ConverseAPI <# .SYNOPSIS Formats a Amazon.BedrockRuntime.Model.Tool to be sent to the Converse API. .DESCRIPTION Formats a Amazon.BedrockRuntime.Model.ToolSpecification to be sent to the Converse API. Converse requires very specific object types for a tool configuration. .EXAMPLE Format-ConverseAPIToolConfig -ToolsConfig $ToolsConfig This example formats a tool configuration to be sent to the Converse API. .PARAMETER ToolsConfig The tool configuration to be formatted. .OUTPUTS Amazon.BedrockRuntime.Model.Tool .NOTES Amazon.BedrockRuntime.Model.Tool Amazon.BedrockRuntime.Model.ToolSpecification Amazon.BedrockRuntime.Model.ToolInputSchema .LINK https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/BedrockRuntime/TTool.html .LINK https://docs.aws.amazon.com/sdkfornet/v3/apidocs/?page=TToolSpecification.html&tocid=Amazon_BedrockRuntime_Model_ToolSpecification .COMPONENT pwshBedrock #> function Format-ConverseAPIToolConfig { [CmdletBinding()] param ( [Parameter(Mandatory = $false, HelpMessage = 'Tool provided to model.')] [ValidateNotNull()] [object[]]$ToolsConfig ) Write-Verbose -Message 'Formatting Converse Tool Config' $allTools = New-Object System.Collections.Generic.List[object] foreach ($toolConfig in $ToolsConfig) { $tool = [Amazon.BedrockRuntime.Model.Tool]::new() $toolspec = [Amazon.BedrockRuntime.Model.ToolSpecification]::new() $toolspec.Name = $toolConfig.Name $toolspec.Description = $toolConfig.Description $toolspecInputSchema = [Amazon.BedrockRuntime.Model.ToolInputSchema]::new() # add a type property set to object on the $toolConfig.Properties object $newPropertiesObj = [ordered]@{ type = 'object' properties = $toolConfig.Properties required = $toolConfig.Required } $toolspecInputSchema.Json = [Amazon.Runtime.Documents.Document]::FromObject($newPropertiesObj) $toolspec.InputSchema = $toolspecInputSchema $tool.ToolSpec = $toolspec $allTools.Add($tool) } return $allTools } #Format-ConverseAPIToolConfig <# .SYNOPSIS Formats a message to be sent to a Meta model. .DESCRIPTION This function formats a message to be sent to a Meta model. .EXAMPLE Format-MetaTextMessage -Role 'User' -Message 'Hello, how are you?' -ModelID 'meta.llama3-1-8b-instruct-v1:0' Formats a text message to be sent to the Meta model. .EXAMPLE Format-MetaTextMessage -Role 'User' -Message 'Hello, how are you?' -ModelID 'meta.llama3-1-8b-instruct-v1:0' -NoContextPersist Formats a text message to be sent to the Meta model without persisting the conversation context history. .EXAMPLE Format-MetaTextMessage -Role 'User' -Message 'Hello, how are you?' -SystemPrompt 'You are a Star Trek trivia expert.' -ModelID 'meta.llama3-1-8b-instruct-v1:0' Formats a text message to be sent to the Meta model with a system prompt. .PARAMETER Role The role of the message sender. Valid values are 'user' or 'assistant'. .PARAMETER Message The message to be sent to the model. .PARAMETER SystemPrompt The system prompt to be sent to the model. .PARAMETER ModelID The unique identifier of the model. .PARAMETER NoContextPersist Do not persist the conversation context history. If this parameter is specified, you will not be able to have a continuous conversation with the model. .OUTPUTS System.Management.Automation.PSCustomObject .NOTES The model requires a specific format for the message. This function formats the message accordingly. The logic in this function actually replaces the context history in memory with the newly crafted message. This is because the logic adds to the string. .COMPONENT pwshBedrock #> function Format-MetaTextMessage { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'The role of the message sender.')] [ValidateSet('User', 'Model')] [string]$Role, [Parameter(Mandatory = $true, HelpMessage = 'The message to be sent to the model.')] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $false, HelpMessage = 'The system prompt to be sent to the model.')] [string]$SystemPrompt, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'meta.llama2-13b-chat-v1', 'meta.llama2-70b-chat-v1', 'meta.llama3-8b-instruct-v1:0', 'meta.llama3-70b-instruct-v1:0', 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [bool]$NoContextPersist = $false ) Write-Verbose -Message 'Formatting Meta Message' # https://huggingface.co/blog/llama2#how-to-prompt-llama-2 $standardLlama2Prompt = @' <s>[INST] <<SYS>> You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. <</SYS>> '@ # https://llama.meta.com/docs/model-cards-and-prompt-formats/meta-llama-3/ $standardLlama3Prompt = @' <|begin_of_text|><|start_header_id|>system<|end_header_id|> You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.<|eot_id|> '@ # we need to determine if this is the first message in the conversation # if it is, we need to create the system prompt scaffolding $contextEval = Get-ModelContext -ModelID $ModelID if ([string]::IsNullOrEmpty($contextEval)) { Write-Debug -Message 'No context found. Creating new context.' $firstMessage = $true $str = '' } else { Write-Debug -Message 'Context found. Using existing context.' $firstMessage = $false $str = $contextEval } if ($ModelID -like '*llama2*') { Write-Debug 'Processing llama2 model' $sysPromptRegex = '(?<=<<SYS>>\r?\n)([\s\S]*?)(?=\r?\n<</SYS>>)' if ($firstMessage -eq $true) { $str = $str + "$standardLlama2Prompt`n`n" + $Message + '[/INST]' } else { if ($Role -eq 'User') { $str = $str + "`n<s>[INST]" + $Message + '[/INST]' } elseif ($Role -eq 'Model') { $str = $str + $Message + '</s>' } } } #if_llama2 elseif ($ModelID -like '*llama3*') { Write-Debug 'Processing llama3 model' $sysPromptRegex = '(?<=system<\|end_header_id\|>\r?\n)([\s\S]*?)(?=<\|eot_id\|>)' if ($firstMessage -eq $true) { $str = $str + "$standardLlama3Prompt`n`n" + $Message + '<|eot_id|><|start_header_id|>assistant<|end_header_id|>' } else { if ($Role -eq 'User') { $str = $str + "`n`n" + $Message + '<|eot_id|><|start_header_id|>assistant<|end_header_id|>' } elseif ($Role -eq 'Model') { $str = $str + "`n`n" + $Message + '<|eot_id|><|start_header_id|>user<|end_header_id|>' } } } #elseif_llama3 if ($SystemPrompt) { Write-Debug -Message 'System prompt provided' Write-Debug -Message ('System prompt: {0}' -f $SystemPrompt) Write-Debug -Message ('System prompt regex: {0}' -f $sysPromptRegex) $str = $str -replace $sysPromptRegex, $SystemPrompt } Write-Debug -Message ('Formatted message: {0}' -f $str) if ($NoContextPersist -eq $false) { $contextObj = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } $contextObj.Context = $str $returnContext = $contextObj.Context } else { $returnContext = $str } Write-Debug 'out of Format-MetaTextMessage' return $returnContext } #Format-MetaTextMessage <# .SYNOPSIS Formats a message to be sent to a Mistral AI model. .DESCRIPTION This function formats a message to be sent to a Mistral AI model. .EXAMPLE Format-MistralAIChatModel -Role 'User' -Message 'Hello, how are you?' -ModelID 'mistral.mistral-large-2407-v1:0' This example formats a message to be sent to the Mistral AI model 'mistral.mistral-large-2407-v1:0'. .PARAMETER Role The role of the message sender. .PARAMETER Message The message to be sent to the model. .PARAMETER ToolsResults A list of results from invoking tools recommended by the model in the previous chat turn. .PARAMETER ToolCalls The tool calls that were returned by the model. .PARAMETER ModelID The unique identifier of the model. .PARAMETER NoContextPersist Do not persist the conversation context history. If this parameter is specified, you will not be able to have a continuous conversation with the model. .OUTPUTS System.Management.Automation.PSCustomObject .NOTES The model requires a specific format for the message. This function formats the message accordingly. This model uses object based updates to the context instead of a single string. .COMPONENT pwshBedrock #> function Format-MistralAIChatModel { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'The role of the message sender.')] [ValidateSet('system', 'user', 'assistant', 'tool')] [string]$Role, [Parameter(Mandatory = $false, HelpMessage = 'The message to be sent to the model.')] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $false, HelpMessage = 'A list of results from invoking tools recommended by the model in the previous chat turn.')] [ValidateNotNull()] [object]$ToolsResults, [Parameter(Mandatory = $false, HelpMessage = 'The tool calls that were returned by the model.')] [ValidateNotNullOrEmpty()] [object[]]$ToolCalls, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [bool]$NoContextPersist = $false ) Write-Verbose -Message 'Formatting Mistral AI Chat Message' # we need to account for a special condition where the import global variable is default set to string # the mistral chat model context is unique in that it is a collection of objects instead of a single string $contextEval = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } if ($contextEval.Context -eq '' -or $null -eq $contextEval.Context -or $contextEval.Context.Count -eq 0) { Write-Debug -Message 'No context found. Creating new object based context.' $contextEval.Context = New-Object System.Collections.Generic.List[object] $firstMessage = $true } else { $firstMessage = $false } switch ($Role) { 'system' { if ($firstMessage -eq $true) { $obj = [PSCustomObject]@{ role = 'system' content = $Message } } else { # we need to determine if the context already has a system message # if it does, we need to replace it with the new system message # if it does not, we need to add the new system message $obj = $contextEval.Context | Where-Object { $_.role -eq 'system' } if ($null -eq $obj) { $obj = [PSCustomObject]@{ role = 'system' content = $Message } } else { $obj.content = $Message return } } } 'user' { $obj = [PSCustomObject]@{ role = 'user' content = $Message } } 'assistant' { if ($ToolCalls) { $obj = [PSCustomObject]@{ role = 'assistant' content = $Message tool_calls = $ToolCalls } } else { $obj = [PSCustomObject]@{ role = 'assistant' content = $Message } } } 'tool' { # we essentially recreate the same object passed in with one important difference # the powershell object in content must be converted to a json string # the upstream ConvertTo-Json for the body payload should not process the content conversion. $obj = [PSCustomObject]@{ role = 'tool' tool_call_id = $ToolsResults.tool_call_id content = $ToolsResults.content | ConvertTo-Json -Compress } } } #switch_role Write-Debug -Message ('Formatted message: {0}' -f ($obj | Out-String) ) if ($NoContextPersist -eq $false) { $contextObj = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } $contextObj.Context.Add($obj) $returnContext = $contextObj.Context } else { $returnContext = $obj } Write-Debug 'out of Format-MistralAIChatModel' return $returnContext } #Format-MistralAIChatModel <# .SYNOPSIS Formats a message to be sent to a Mistral AI model. .DESCRIPTION This function formats a message to be sent to a Mistral AI model. .EXAMPLE Format-MistralAITextMessage -Role 'User' -Message 'Hello, how are you?' -ModelID 'mistral.mistral-7b-instruct-v0:2' This example formats a message to be sent to the Mistral AI model 'mistral.mistral-7b-instruct-v0:2'. .PARAMETER Role The role of the message sender. Valid values are 'user' or 'assistant'. .PARAMETER Message The message to be sent to the model. .PARAMETER ModelID The unique identifier of the model. .PARAMETER NoContextPersist Do not persist the conversation context history. If this parameter is specified, you will not be able to have a continuous conversation with the model. .OUTPUTS System.Management.Automation.PSCustomObject .NOTES The model requires a specific format for the message. This function formats the message accordingly. The logic in this function actually replaces the context history in memory with the newly crafted message. This is because the logic adds to the string. .COMPONENT pwshBedrock #> function Format-MistralAITextMessage { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'The role of the message sender.')] [ValidateSet('User', 'Model')] [string]$Role, [Parameter(Mandatory = $true, HelpMessage = 'The message to be sent to the model.')] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'mistral.mistral-7b-instruct-v0:2', 'mistral.mixtral-8x7b-instruct-v0:1', 'mistral.mistral-small-2402-v1:0', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [bool]$NoContextPersist = $false ) Write-Verbose -Message 'Formatting Meta Message' # we need to determine if this is the first message in the conversation # if it is, we need to create the system prompt scaffolding $contextEval = Get-ModelContext -ModelID $ModelID if ([string]::IsNullOrEmpty($contextEval)) { Write-Debug -Message 'No context found. Creating new context.' $firstMessage = $true $str = '' } else { Write-Debug -Message 'Context found. Using existing context.' $firstMessage = $false $str = $contextEval } if ($firstMessage -eq $true) { $str = $str + '<s>[INST] ' + $Message + ' [/INST]' } else { if ($Role -eq 'User') { $str = $str + "`n[INST] " + $Message + ' [/INST]' } elseif ($Role -eq 'Model') { $str = $str + "`n" + $Message + '</s>' } } Write-Debug -Message ('Formatted message: {0}' -f $str) if ($NoContextPersist -eq $false) { $contextObj = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } $contextObj.Context = $str $returnContext = $contextObj.Context } else { $returnContext = $str } Write-Debug 'out of Format-MistralAITextMessage' return $returnContext } #Format-MistralAITextMessage <# .SYNOPSIS Retrieves the resolution of an image. .DESCRIPTION This function returns the resolution (width and height) of an image using the System.Drawing namespace. It reads the specified image file and outputs its dimensions. .EXAMPLE Get-ImageResolution -MediaPath 'C:\path\to\image.jpg' Gets the resolution of the image located at 'C:\path\to\image.jpg'. .PARAMETER MediaPath File path to local media file. .OUTPUTS System.Management.Automation.PSCustomObject .NOTES This function is a wrapper around the System.Drawing namespace, which is not mockable in tests. .COMPONENT pwshBedrock #> function Get-ImageResolution { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'File path to local media file.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$MediaPath ) Write-Verbose -Message ('Getting resolution for {0} ...' -f $MediaPath) Add-Type -AssemblyName System.Drawing $image = [System.Drawing.Image]::FromFile($MediaPath) # Get the width and height $width = $image.Width $height = $image.Height $obj = [PSCustomObject]@{ Width = $width Height = $height } Write-Debug -Message ('Width: {0}, Height: {1}' -f $width, $height) return $obj } #Get-ImageResolution <# .SYNOPSIS Saves bytes to a file. .DESCRIPTION This function saves bytes to a file using the System.IO.File namespace. It writes the bytes to the specified file path. .EXAMPLE Save-BytesToFile -Base64String $base64 Converts the base64 string to bytes. .PARAMETER ImageBytes Image bytes to save to a media file. .PARAMETER FilePath File path to save the image file. .OUTPUTS None .NOTES This function is a wrapper around the System.IO.File namespace, which is not mockable in tests. .COMPONENT pwshBedrock #> function Save-BytesToFile { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'Image bytes to save to a media file.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [byte[]]$ImageBytes, [Parameter(Mandatory = $true, HelpMessage = 'File path to save the image file.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$FilePath ) Write-Verbose -Message 'Converting from base64' try { [System.IO.File]::WriteAllBytes($FilePath, $ImageBytes) } catch { Write-Warning -Message 'Failed to output bytes to file' throw } Write-Debug -Message 'Out of Save-BytesToFile' } #Save-BytesToFile <# .SYNOPSIS Validates a custom conversation object for use with the Amazon Titan models. .DESCRIPTION Evaluates a custom conversation object to ensure it meets the requirements for use with the Amazon Titan models. It checks the structure of the conversation objects to ensure they are properly formatted. .EXAMPLE Test-AmazonCustomConversation -CustomConversation $customConversation Tests the custom conversation string $customConversation to ensure it meets the requirements for use with the Amazon Titan model. .PARAMETER CustomConversation A properly formatted string that represents a custom conversation. .OUTPUTS System.Boolean .NOTES None .COMPONENT pwshBedrock #> function Test-AmazonCustomConversation { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'A properly formatted string that represents a custom conversation.')] [ValidateNotNull()] [string]$CustomConversation ) # Split the input into lines $lines = $CustomConversation -split "`n" # Initialize expected role (User should be the first) $expectedRole = 'User' # Loop through each line to check the pattern foreach ($line in $lines) { if ($line -match "^$($expectedRole): .*$") { # Alternate between User and Bot if ($expectedRole -eq 'User') { $expectedRole = 'Bot' } else { $expectedRole = 'User' } } else { return $false } } return $true } #Test-AmazonCustomConversation <# .SYNOPSIS Tests if a media file is compatible with Amazon Titan's requirements. .DESCRIPTION Evaluates the specified media file to ensure it meets Amazon Titan's compatibility requirements based on their public documentation. It checks the file's presence, type, and size. If the file is not found, the function returns false. If the file type is not supported, the function returns false. If the file resolution does not meet Amazon Titan's strict requirements, the function returns false. .EXAMPLE Test-AmazonMedia -MediaPath 'C:\path\to\image.jpg' Tests the image located at 'C:\path\to\image.jpg' for Amazon Titan's compatibility. .PARAMETER MediaPath File path to local media file. .OUTPUTS System.Boolean .NOTES Max input image size - 5 MB (only some specific resolutions are supported) Max image size using in/outpainting - 1,408 x 1,408 px Width Height Aspect ratio Price equivalent to 1024 1024 1:1 1024 x 1024 768 768 1:1 512 x 512 512 512 1:1 512 x 512 768 1152 2:3 1024 x 1024 384 576 2:3 512 x 512 1152 768 3:2 1024 x 1024 576 384 3:2 512 x 512 768 1280 3:5 1024 x 1024 384 640 3:5 512 x 512 1280 768 5:3 1024 x 1024 640 384 5:3 512 x 512 896 1152 7:9 1024 x 1024 448 576 7:9 512 x 512 1152 896 9:7 1024 x 1024 576 448 9:7 512 x 512 768 1408 6:11 1024 x 1024 384 704 6:11 512 x 512 1408 768 11:6 1024 x 1024 704 384 11:6 512 x 512 640 1408 5:11 1024 x 1024 320 704 5:11 512 x 512 1408 640 11:5 1024 x 1024 704 320 11:5 512 x 512 1152 640 9:5 1024 x 1024 1173 640 16:9 1024 x 1024 Supported image types - JPEG, JPG, PNG The maximum file size allowed is 5 MB. .COMPONENT pwshBedrock .LINK https://docs.aws.amazon.com/bedrock/latest/userguide/titan-image-models.html .LINK https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-titan-image.html #> function Test-AmazonMedia { [CmdletBinding()] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUseSingularNouns', '', Justification = 'Just a collective noun.')] param ( [Parameter(Mandatory = $false, HelpMessage = 'File path to local media file.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$MediaPath ) $result = $true # Assume success Write-Verbose -Message 'Verifying presence of media...' try { $pathEval = Test-Path -Path $MediaPath -ErrorAction Stop } catch { Write-Error ('Error verifying media path: {0}' -f $MediaPath) Write-Error $_ $result = $false return $result } if ($pathEval -ne $true) { Write-Warning -Message ('The specified media path: {0} was not found.' -f $PhotoPath) $result = $false return $result } #if_testPath else { Write-Verbose -Message 'Path verified.' } #else_testPath Write-Verbose -Message 'Verifying media type...' $supportedMediaExtensions = @( 'JPG' 'JPEG' 'PNG' ) Write-Verbose -Message ('Splitting media path: {0}' -f $MediaPath) $divide = $MediaPath.Split('.') $rawExtension = $divide[$divide.Length - 1] $extension = $rawExtension.ToUpper() Write-Verbose -Message "Verifying discovered extension: $extension" if ($supportedMediaExtensions -notcontains $extension) { Write-Warning -Message ('The specified media type: {0} is not supported.' -f $extension) $result = $false return $result } #if_supportedMediaExtensions else { Write-Verbose -Message 'Media type verified.' } #else_supportedMediaExtensions Write-Verbose -Message 'Verifying media file size...' try { $mediaFileInfo = Get-Item -Path $MediaPath -ErrorAction Stop } catch { Write-Error ('Error verifying media file info: {0}' -f $MediaPath) Write-Error $_ $result = $false return $result } $mediaSize = $mediaFileInfo.Length if ($mediaSize -gt 5MB) { Write-Warning -Message ('The specified media size: {0} exceeds the Amazon Titan maximum allowed image file size of 5MB.' -f $mediaSize) $result = $false return $result } #if_mediaSize else { Write-Verbose -Message 'Media size verified.' } #else_mediaSize #--------------------------------------------------------------- # Define the list of supported width and height combinations # https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-titan-image.html $supportedResolutions = @( @{ Width = 1024; Height = 1024 } @{ Width = 768; Height = 768 } @{ Width = 512; Height = 512 } @{ Width = 768; Height = 1152 } @{ Width = 384; Height = 576 } @{ Width = 1152; Height = 768 } @{ Width = 576; Height = 384 } @{ Width = 768; Height = 1280 } @{ Width = 384; Height = 640 } @{ Width = 1280; Height = 768 } @{ Width = 640; Height = 384 } @{ Width = 896; Height = 1152 } @{ Width = 448; Height = 576 } @{ Width = 1152; Height = 896 } @{ Width = 576; Height = 448 } @{ Width = 768; Height = 1408 } @{ Width = 384; Height = 704 } @{ Width = 1408; Height = 768 } @{ Width = 704; Height = 384 } @{ Width = 640; Height = 1408 } @{ Width = 320; Height = 704 } @{ Width = 1408; Height = 640 } @{ Width = 704; Height = 320 } @{ Width = 1152; Height = 640 } @{ Width = 1173; Height = 640 } ) Write-Verbose -Message 'Verifying media resolution...' Write-Verbose ('Media path: {0}' -f $MediaPath) $resolution = Get-ImageResolution -MediaPath $MediaPath # Check if the resolution matches any of the supported resolutions $matchFound = $false foreach ($supportedResolution in $supportedResolutions) { if ($resolution.width -eq $supportedResolution.Width -and $resolution.height -eq $supportedResolution.Height) { $matchFound = $true break } } if ($matchFound -eq $false) { Write-Warning -Message ('The specified media resolution: {0}x{1} is not supported.' -f $resolution.Width, $resolution.Height) Write-Warning -Message 'https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-titan-image.html' $result = $false return $result } return $result } #Test-AmazonMedia <# .SYNOPSIS Validates a custom conversation object for use with the Anthropic models. .DESCRIPTION Evaluates a custom conversation object to ensure it meets the requirements for use with the Anthropic models. It checks the structure and properties of the conversation objects to ensure they are properly formatted. .EXAMPLE Test-AnthropicCustomConversation -CustomConversation $customConversation Tests the custom conversation object $customConversation to ensure it meets the requirements for use with the Anthropic model. .PARAMETER CustomConversation An array of custom conversation objects. .OUTPUTS System.Boolean .NOTES None .COMPONENT pwshBedrock #> function Test-AnthropicCustomConversation { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'An array of custom conversation objects.')] [ValidateNotNull()] [PSCustomObject[]]$CustomConversation ) $result = $true # Assume success Write-Verbose -Message 'Validating provided custom conversation...' foreach ($conversation in $CustomConversation) { if ([string]::IsNullOrWhiteSpace($conversation.role)) { Write-Error -Message 'Custom conversation object must have a role property.' $result = $false } if ($conversation.role -ne 'user' -and $conversation.role -ne 'assistant') { Write-Error -Message 'role of conversation must be user or assistant' $result = $false } if (-not $conversation.content) { Write-Error -Message 'conversation must contain content property' $result = $false } foreach ($message in $conversation.content) { switch ($message.type) { 'text' { if ($message.text -is [string] -and -not [string]::IsNullOrWhiteSpace($message)) { Write-Verbose -Message 'Custom conversation message is valid.' } else { Write-Error -Message 'Custom conversation message must have a Text property.' $result = $false } } 'image' { $type = $message.source.type $media = $message.source.'media_type' $data = $message.source.data if ($type -ne 'base64' -or $media -ne 'image/jpeg' -or [string]::IsNullOrWhiteSpace($data)) { Write-Error -Message 'Custom conversation image message must have a source property with a type, media_type, and data property.' $result = $false } } Default { Write-Error -Message 'Custom conversation message must have a Type property.' $result = $false } } } #foreach_message } #foreach_conversation return $result } #Test-AnthropicCustomConversation <# .SYNOPSIS Tests if a media file is compatible with Anthropic's requirements. .DESCRIPTION Evaluates the specified media file to ensure it meets Anthropic's compatibility requirements based on their public documentation. It checks the file's presence, type, and size. If the file is not found, the function returns false. If the file type is not supported, the function returns false. If the file resolution exceeds Anthropic's recommendations, the function returns true but issues a warning. .EXAMPLE Test-AnthropicMedia -MediaPath 'C:\path\to\image.jpg' Tests the image located at 'C:\path\to\image.jpg' for Anthropic compatibility. .PARAMETER MediaPath File path to local media file. .OUTPUTS System.Boolean .NOTES Claude can read both text and images in requests. Supported base64 source type for images: image/jpeg, image/png, image/gif, and image/webp media types. For optimal performance, we recommend resizing your images before uploading if they are likely to exceed size or token limits. Images larger than 1568 pixels on any edge or exceeding ~1600 tokens will be scaled down, which may increase latency. Very small images under 200 pixels on any edge may lead to degraded performance. Maximum image sizes accepted by the API that will not be resized for common aspect ratios: - 1:1 1092x1092 px - 3:4 951x1268 px - 2:3 896x1344 px - 9:16 819x1456 px - 1:2 784x1568 px The maximum allowed image file size is 5MB per image. Up to 20 images can be included in a single request via the Messages API. .COMPONENT pwshBedrock #> function Test-AnthropicMedia { [CmdletBinding()] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUseSingularNouns', '', Justification = 'Just a collective noun.')] param ( [Parameter(Mandatory = $false, HelpMessage = 'File path to local media file.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$MediaPath ) $result = $true # Assume success Write-Verbose -Message 'Verifying presence of media...' try { $pathEval = Test-Path -Path $MediaPath -ErrorAction Stop } catch { Write-Error ('Error verifying media path: {0}' -f $MediaPath) Write-Error $_ $result = $false return $result } if ($pathEval -ne $true) { Write-Warning -Message ('The specified media path: {0} was not found.' -f $PhotoPath) $result = $false return $result } #if_testPath else { Write-Verbose -Message 'Path verified.' } #else_testPath Write-Verbose -Message 'Verifying media type...' $supportedMediaExtensions = @( 'JPG' 'JPEG' 'PNG' 'GIF' 'WEBP' ) Write-Verbose -Message ('Splitting media path: {0}' -f $MediaPath) $divide = $MediaPath.Split('.') $rawExtension = $divide[$divide.Length - 1] $extension = $rawExtension.ToUpper() Write-Verbose -Message "Verifying discovered extension: $extension" if ($supportedMediaExtensions -notcontains $extension) { Write-Warning -Message ('The specified media type: {0} is not supported.' -f $extension) $result = $false return $result } #if_supportedMediaExtensions else { Write-Verbose -Message 'Media type verified.' } #else_supportedMediaExtensions Write-Verbose -Message 'Verifying media file size...' try { $mediaFileInfo = Get-Item -Path $MediaPath -ErrorAction Stop } catch { Write-Error ('Error verifying media file info: {0}' -f $MediaPath) Write-Error $_ $result = $false return $result } $mediaSize = $mediaFileInfo.Length if ($mediaSize -gt 5MB) { Write-Warning -Message ('The specified media size: {0} exceeds the Anthropic maximum allowed image file size of 5MB.' -f $mediaSize) $result = $false return $result } #if_mediaSize else { Write-Verbose -Message 'Media size verified.' } #else_mediaSize Write-Verbose -Message 'Verifying media resolution...' $resolution = Get-ImageResolution -MediaPath $MediaPath if ($resolution.Width -gt 1568 -or $resolution.Height -gt 1568) { Write-Warning -Message ('The specified media size: {0}x{1} exceeds the Anthropic recommendation to keep the long edge of the image below 1568.' -f $width, $height) Write-Warning -Message 'The image will be scaled down to meet the size requirements.' Write-Warning -Message 'Scaling down the image may increase latency of time-to-first-token, without giving you any additional model performance.' } #if_size return $result } #Test-AnthropicMedia <# .SYNOPSIS Validates a Tools object for use with the Anthropic models. .DESCRIPTION Evaluates a Tools object to ensure it meets the requirements for use with the Anthropic models. It checks the structure of the tools objects to ensure they are properly formatted. .EXAMPLE $tools = [PSCustomObject]@{ name = 'top_song' description = 'Get the most popular song played on a radio station.' input_schema = [PSCustomObject]@{ type = 'object' properties = [PSCustomObject]@{ sign = [PSCustomObject]@{ type = 'string' description = 'string' } } required = @( 'sign' ) } } Test-AnthropicTool -Tools $tools Tests the Tools object to ensure it meets the requirements for use with the Anthropic models. .PARAMETER Tools Definitions of tools that the model may use. .OUTPUTS System.Boolean .NOTES Not every property is validated. There are hash tables that can contain custom properties. .COMPONENT pwshBedrock #> function Test-AnthropicTool { [CmdletBinding()] param ( [Parameter(Mandatory = $false, HelpMessage = 'Definitions of tools that the model may use.')] [PSCustomObject[]]$Tools ) Write-Verbose -Message 'Validating the Tools object(s)...' foreach ($tool in $Tools) { # Validate main parameters if (-not $tool.PSObject.Properties['name'] -or -not [string]::IsNullOrWhiteSpace($tool.name) -eq $false) { Write-Debug -Message 'The name property is missing or empty.' return $false } if (-not $tool.PSObject.Properties['description'] -or -not [string]::IsNullOrWhiteSpace($tool.description) -eq $false) { Write-Debug -Message 'The description property is missing or empty.' return $false } if (-not $tool.PSObject.Properties['input_schema']) { Write-Debug -Message 'The input_schema property is missing.' return $false } # validate parameter_definitions sub-properties if ([string]::IsNullOrWhiteSpace($tool.input_schema)) { Write-Debug -Message 'The input_schema name sub-property is missing or empty.' return $false } if ([string]::IsNullOrWhiteSpace($tool.input_schema.type)) { Write-Debug -Message 'The input_schema type sub-property is missing or empty.' return $false } if ([string]::IsNullOrWhiteSpace($tool.input_schema.properties)) { Write-Debug -Message 'The input_schema properties type sub-property is missing or empty.' return $false } if ([string]::IsNullOrWhiteSpace($tool.input_schema.required)) { Write-Debug -Message 'The input_schema required properties sub-property is missing or empty.' return $false } } #foreach_tool return $true } #Test-AnthropicTool <# .SYNOPSIS Validates a Tools Results object for use with the Anthropic model. .DESCRIPTION Evaluates a Tools Results object to ensure it meets the requirements for use with the Anthropic model. It checks the structure of the tools results objects to ensure they are properly formatted. .EXAMPLE $toolsResults = [PSCustomObject]@{ tool_use_id = 'string' content = 'string' } Test-AnthropicToolResult -ToolResults $toolsResults Tests the Tools Results object to ensure it meets the requirements for use with the Anthropic model. .PARAMETER ToolResults A list of results from invoking tools recommended by the model in the previous chat turn. .OUTPUTS System.Boolean .NOTES None .COMPONENT pwshBedrock #> function Test-AnthropicToolResult { [CmdletBinding()] param ( [Parameter(Mandatory = $false, HelpMessage = 'A list of results from invoking tools recommended by the model in the previous chat turn.')] [PSCustomObject[]]$ToolResults ) Write-Verbose -Message 'Validating the ToolResults object(s)...' $allToolCallIds = New-Object System.Collections.Generic.List[string] foreach ($toolResult in $ToolResults) { if (-not $toolResult.PSObject.Properties['tool_use_id']) { Write-Debug -Message 'The tool_use_id property is missing.' return $false } if (-not $toolResult.PSObject.Properties['content']) { Write-Debug -Message 'The content property is missing.' return $false } $allToolCallIds.Add($toolResult.tool_use_id) } # each tool call id should be a unique id. we need to check for duplicates # Convert the list to an array and group by the IDs $groupedIds = $allToolCallIds | Group-Object # Check if any group has more than one element $hasDuplicates = $groupedIds | Where-Object { $_.Count -gt 1 } # Determine the result based on the presence of duplicates $hasNoDuplicates = $hasDuplicates.Count -eq 0 if ($hasNoDuplicates -eq $false) { Write-Debug -Message 'The tool_use_id property is not unique.' return $false } return $true } #Test-AnthropicToolResult <# .SYNOPSIS Validates a Chat History object for use with the Cohere Command R models. .DESCRIPTION Evaluates a Chat History object to ensure it meets the requirements for use with the Cohere Command R models. It checks the structure of the conversation objects to ensure they are properly formatted. .EXAMPLE Test-CohereCommandRChatHistory -ChatHistory @( [PSCustomObject]@{ role = 'USER'; message = 'Hello, how are you?' }, [PSCustomObject]@{ role = 'CHATBOT'; message = 'I am fine, thank you. How can I assist you today?' }, [PSCustomObject]@{ role = 'USER'; message = 'I need help with my account.' }, [PSCustomObject]@{ role = 'CHATBOT'; message = 'Sure, I can help with that. What seems to be the issue?' } ) Tests the Chat History to ensure it meets the requirements for use with the Cohere Command R models. .PARAMETER ChatHistory Previous messages between the user and the model .OUTPUTS System.Boolean .NOTES None .COMPONENT pwshBedrock #> function Test-CohereCommandRChatHistory { [CmdletBinding()] param ( [Parameter(Mandatory = $false, HelpMessage = "Previous messages between the user and the model, meant to give the model conversational context for responding to the user's message.")] [ValidateNotNullOrEmpty()] [PSCustomObject[]]$ChatHistory ) Write-Verbose -Message 'Validating the ChatHistory object(s)...' # Initialize a variable to keep track of the expected role sequence $expectedRole = 'USER' # Iterate through each item in the ChatHistory array foreach ($item in $ChatHistory) { Write-Debug -Message ($item | Out-String) # Check if the 'role' is either 'USER' or 'CHATBOT' if ($item.role -ne 'USER' -and $item.role -ne 'CHATBOT') { Write-Debug -Message 'Item role is not USER or CHATBOT.' return $false } # Check if the 'message' is a non-null, non-empty string if ([string]::IsNullOrWhiteSpace($item.message)) { Write-Debug -Message 'Item message is null or empty.' return $false } # Check if the role matches the expected sequence if ($item.role -ne $expectedRole) { Write-Debug -Message 'Item role does not match the expected sequence.' return $false } # Toggle the expected role for the next item if ($expectedRole -eq 'USER') { $expectedRole = 'CHATBOT' } else { $expectedRole = 'USER' } } # If all checks passed, return true return $true } #Test-CohereCommandRChatHistory <# .SYNOPSIS Validates a Tools object for use with the Cohere Command R models. .DESCRIPTION Evaluates a Tools object to ensure it meets the requirements for use with the Cohere Command R models. It checks the structure of the tools objects to ensure they are properly formatted. .EXAMPLE $tools = [PSCustomObject]@{ name = "string" description = "string" parameter_definitions = @{ "parameter name" = [PSCustomObject]@{ description = "string" type = "string" required = $true } } } Test-CohereCommandRTool -Tools $tools Tests the Tools object to ensure it meets the requirements for use with the Cohere Command R models. .PARAMETER Tools A list of available tools (functions) that the model may suggest invoking before producing a text response. .OUTPUTS System.Boolean .NOTES None .COMPONENT pwshBedrock #> function Test-CohereCommandRTool { [CmdletBinding()] param ( [Parameter(Mandatory = $false, HelpMessage = 'A list of available tools (functions) that the model may suggest invoking before producing a text response.')] [PSCustomObject[]]$Tools ) Write-Verbose -Message 'Validating the Tools object(s)...' foreach ($tool in $Tools) { # Validate main parameters if (-not $tool.PSObject.Properties["name"] -or -not [string]::IsNullOrWhiteSpace($tool.name) -eq $false) { Write-Debug -Message 'The name property is missing or empty.' return $false } if (-not $tool.PSObject.Properties["description"] -or -not [string]::IsNullOrWhiteSpace($tool.description) -eq $false) { Write-Debug -Message 'The description property is missing or empty.' return $false } # Validate parameter_definitions if (-not $tool.PSObject.Properties["parameter_definitions"]) { Write-Debug -Message 'The parameter_definitions property is missing.' return $false } # Validate each parameter definition foreach ($parameterName in $tool.parameter_definitions.Keys) { $parameter = $tool.parameter_definitions[$parameterName] if (-not ($parameter -is [PSCustomObject])) { Write-Error "Error: Parameter definition for '$parameterName' is not a PSCustomObject." return $false } # Validate 'description' property within parameter definition if (-not $parameter.description) { Write-Error "Error: 'description' property missing or null in parameter definition for '$parameterName'." return $false } # Validate 'type' property within parameter definition if (-not $parameter.type) { Write-Error "Error: 'type' property missing or null in parameter definition for '$parameterName'." return $false } # Validate 'required' property within parameter definition if (-not ($parameter.required -is [bool])) { Write-Error "Error: 'required' property missing or not a PSProperty in parameter definition for '$parameterName'." return $false } } #foreach_parameterName # # validate parameter_definitions sub-properties # if ([string]::IsNullOrWhiteSpace($tool.'parameter_definitions'.'parameter name'.description)) { # Write-Debug -Message 'The parameter_definitions description sub-property is missing or empty.' # return $false # } # if ([string]::IsNullOrWhiteSpace($tool.'parameter_definitions'.'parameter name'.type)) { # Write-Debug -Message 'The parameter_definitions type sub-property is missing or empty.' # return $false # } # if ($tool.'parameter_definitions'.'parameter name'.required -eq $true -or $tool.'parameter_definitions'.'parameter name'.required -eq $false) { # Write-Debug -Message 'The parameter_definitions required sub-property is valid.' # } # else { # Write-Debug -Message 'The parameter_definitions required sub-property is missing or empty.' # return $false # } } #foreach_tool return $true } #Test-CohereCommandRTool <# .SYNOPSIS Validates a Tools Results object for use with the Cohere Command R models. .DESCRIPTION Evaluates a Tools Results object to ensure it meets the requirements for use with the Cohere Command R models. It checks the structure of the tools results objects to ensure they are properly formatted. .EXAMPLE $toolsResults = [PSCustomObject]@{ call = [PSCustomObject]@{ name = "string" parameters = [PSCustomObject]@{ "parameter name" = "string" } generation_id = "string" } outputs = @( [PSCustomObject]@{ text = "string" } ) } Test-CohereCommandRToolResult -ToolResults $toolsResults Tests the Tools Results object to ensure it meets the requirements for use with the Cohere Command R models. .PARAMETER ToolResults A list of results from invoking tools recommended by the model in the previous chat turn. .OUTPUTS System.Boolean .NOTES None .COMPONENT pwshBedrock #> function Test-CohereCommandRToolResult { [CmdletBinding()] param ( [Parameter(Mandatory = $false, HelpMessage = 'A list of results from invoking tools recommended by the model in the previous chat turn.')] [PSCustomObject[]]$ToolResults ) Write-Verbose -Message 'Validating the ToolResults object(s)...' foreach ($toolResult in $ToolResults) { # Validate call object if (-not $toolResult.PSObject.Properties["call"]) { Write-Debug -Message 'The call property is missing.' return $false } # Validate outputs array if (-not $toolResult.PSObject.Properties["outputs"]) { Write-Debug -Message 'The outputs property is missing.' return $false } $outputs = $toolResult.outputs if (-not ($outputs -is [System.Array])) { Write-Debug -Message 'The outputs property is not an array.' return $false } if ($outputs.Count -eq 0) { Write-Debug -Message 'The outputs array is empty.' return $false } $call = $toolResult.call # Validate call.name if (-not $call.PSObject.Properties["name"] -or -not [string]::IsNullOrWhiteSpace($call.name) -eq $false) { Write-Debug -Message 'The call.name property is missing or empty.' return $false } # Validate call.parameters if (-not $call.PSObject.Properties["parameters"]) { Write-Debug -Message 'The call.parameters property is missing.' return $false } $parameters = $call.parameters foreach ($paramKey in $parameters.PSObject.Properties.Name) { if ([string]::IsNullOrWhiteSpace($parameters.$paramKey)) { Write-Debug -Message "The call.parameters.$paramKey property is missing or empty." return $false } } } #foreach_toolResult return $true } #Test-CohereCommandRToolResult <# .SYNOPSIS Tests if a document file is compatible with Converse API's requirements. .DESCRIPTION Evaluates the specified document file to ensure it meets Converse API's compatibility requirements based on their public documentation. It checks the file's presence, type, and size. If the file is not found, the function returns false. If the file type is not supported, the function returns false. If the file name does not meet the Converse API requirements, the function returns false. .EXAMPLE Test-ConverseAPIDocument -DocumentPath 'C:\path\to\document.pdf' Tests the document located at 'C:\path\to\document.pdf' for Converse API compatibility. .PARAMETER MediaPath File path to local media file. .OUTPUTS System.Boolean .NOTES The name of the document can only contain the following characters: Alphanumeric characters Whitespace characters (no more than one in a row) Hyphens Parentheses Square brackets Each document's size must be no more than 4.5 MB. .LINK https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html - document tab .COMPONENT pwshBedrock #> function Test-ConverseAPIDocument { [CmdletBinding()] param ( [Parameter(Mandatory = $false, HelpMessage = 'File path to local document.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string[]]$DocumentPath ) $result = $true # Assume success Write-Verbose -Message 'Verifying presence of document...' try { $pathEval = Test-Path -Path $DocumentPath -ErrorAction Stop } catch { Write-Error ('Error verifying document path: {0}' -f $DocumentPath) Write-Error $_ $result = $false return $result } if ($pathEval -ne $true) { Write-Warning -Message ('The specified document path: {0} was not found.' -f $PhotoPath) $result = $false return $result } #if_testPath else { Write-Verbose -Message 'Path verified.' } #else_testPath Write-Verbose -Message 'Verifying media type...' $supportedMediaExtensions = @( 'pdf' 'csv' 'doc' 'docx' 'xls' 'xlsx' 'html' 'txt' 'md' ) Write-Verbose -Message ('Splitting document path: {0}' -f $DocumentPath) $divide = $DocumentPath.Split('.') $rawExtension = $divide[$divide.Length - 1] $extension = $rawExtension.ToUpper() Write-Verbose -Message "Verifying discovered extension: $extension" if ($supportedMediaExtensions -notcontains $extension) { Write-Warning -Message ('The specified document type: {0} is not supported.' -f $extension) $result = $false return $result } #if_supportedMediaExtensions else { Write-Verbose -Message 'Document type verified.' } #else_supportedMediaExtensions Write-Verbose -Message 'Verifying document file size...' try { $mediaFileInfo = Get-Item -Path $DocumentPath -ErrorAction Stop } catch { Write-Error ('Error verifying document file info: {0}' -f $DocumentPath) Write-Error $_ $result = $false return $result } $mediaSize = $mediaFileInfo.Length if ($mediaSize -gt 4.5MB) { Write-Warning -Message ('The specified document size: {0} exceeds the Converse API maximum allowed document file size of 4.5MB.' -f $mediaSize) $result = $false return $result } #if_mediaSize else { Write-Verbose -Message 'Document size verified.' } #else_mediaSize Write-Verbose -Message 'Verifying document file name...' $documentName = $mediaFileInfo.BaseName Write-Debug -Message ('Document base name: {0}' -f $documentName) if ($documentName -notmatch '^[a-zA-Z0-9\-\(\)\[\]]+(\s[a-zA-Z0-9\-\(\)\[\]]+)*$') { Write-Warning -Message ('The specified document name: {0} contains invalid characters.' -f $documentName) $result = $false return $result } return $result } #Test-ConverseAPIDocument <# .SYNOPSIS Tests if a media file is compatible with Converse API's requirements. .DESCRIPTION Evaluates the specified media file to ensure it meets Converse API's compatibility requirements based on their public documentation. It checks the file's presence, type, and size. If the file is not found, the function returns false. If the file type is not supported, the function returns false. If the file resolution exceeds Converse API's recommendations, the function returns false. .EXAMPLE Test-ConverseAPIMedia -MediaPath 'C:\path\to\image.jpg' Tests the image located at 'C:\path\to\image.jpg' for Converse API compatibility. .PARAMETER MediaPath File path to local media file. .OUTPUTS System.Boolean .NOTES Each image's size, height, and width must be no more than 3.75 MB, 8,000 px, and 8,000 px, respectively. Supported base64 source type for images: image/jpeg, image/png, image/gif, and image/webp media types. .LINK https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference.html - image tab .COMPONENT pwshBedrock #> function Test-ConverseAPIMedia { [CmdletBinding()] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUseSingularNouns', '', Justification = 'Just a collective noun.')] param ( [Parameter(Mandatory = $false, HelpMessage = 'File path to local media file.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$MediaPath ) $result = $true # Assume success Write-Verbose -Message 'Verifying presence of media...' try { $pathEval = Test-Path -Path $MediaPath -ErrorAction Stop } catch { Write-Error ('Error verifying media path: {0}' -f $MediaPath) Write-Error $_ $result = $false return $result } if ($pathEval -ne $true) { Write-Warning -Message ('The specified media path: {0} was not found.' -f $PhotoPath) $result = $false return $result } #if_testPath else { Write-Verbose -Message 'Path verified.' } #else_testPath Write-Verbose -Message 'Verifying media type...' $supportedMediaExtensions = @( 'JPG' 'JPEG' 'PNG' 'GIF' 'WEBP' ) Write-Verbose -Message ('Splitting media path: {0}' -f $MediaPath) $divide = $MediaPath.Split('.') $rawExtension = $divide[$divide.Length - 1] $extension = $rawExtension.ToUpper() Write-Verbose -Message "Verifying discovered extension: $extension" if ($supportedMediaExtensions -notcontains $extension) { Write-Warning -Message ('The specified media type: {0} is not supported.' -f $extension) $result = $false return $result } #if_supportedMediaExtensions else { Write-Verbose -Message 'Media type verified.' } #else_supportedMediaExtensions Write-Verbose -Message 'Verifying media file size...' try { $mediaFileInfo = Get-Item -Path $MediaPath -ErrorAction Stop } catch { Write-Error ('Error verifying media file info: {0}' -f $MediaPath) Write-Error $_ $result = $false return $result } $mediaSize = $mediaFileInfo.Length if ($mediaSize -gt 3.75MB) { Write-Warning -Message ('The specified media size: {0} exceeds the Converse API maximum allowed image file size of 3.75MB.' -f $mediaSize) $result = $false return $result } #if_mediaSize else { Write-Verbose -Message 'Media size verified.' } #else_mediaSize Write-Verbose -Message 'Verifying media resolution...' $resolution = Get-ImageResolution -MediaPath $MediaPath if ($resolution.Width -gt 8000 -or $resolution.Height -gt 8000) { Write-Warning -Message ('The specified media size: {0}x{1} exceeds the Converse API requirement height and width must be no more than 8,000 px, and 8,000 px, respectively.' -f $width, $height) $result = $false return $result } #if_size return $result } #Test-ConverseAPIMedia <# .SYNOPSIS Validates a Tools object for use with the Converse API. .DESCRIPTION Evaluates a Tools object to ensure it meets the requirements for use with the Converse API. It checks the structure of the tools objects to ensure they are properly formatted. .EXAMPLE $tools = [PSCustomObject]@{ Name = 'restaurant' Description = 'This tool will look up restaurant information in a provided geographic area.' Properties = @{ location = [PSCustomObject]@{ type = 'string' description = 'The geographic location or locale. This could be a city, state, country, or full address.' } cuisine = [PSCustomObject]@{ type = 'string' description = 'The type of cuisine to look up. This could be a specific type of food or a general category like "Italian" or "Mexican". If the user does not specify a cuisine, do not include this parameter in the response.' } budget = [PSCustomObject]@{ type = 'string' description = 'The budget range for the restaurant. This has to be returned as a number from 1 to 5. The user could use words like "cheap", "moderate", or "expensive". They could provide "high end", or refer to a dollar amount like $$ or $$$$.' } rating = [PSCustomObject]@{ type = 'string' description = 'The minimum rating for the restaurant. This has to be returned as a number from 1 to 5. The user may specify phrases like "good" or "excellent", or "highly rated"' } } required = @( 'location' ) } Test-ConverseAPITool -Tools $tools Tests the Tools object to ensure it meets the requirements for use with the Converse API. .PARAMETER Tools Definitions of tools that the model may use. .OUTPUTS System.Boolean .NOTES Not every property is validated. There are hash tables that can contain custom properties. The properties field must be a hashtable. Amazon.Runtime.Documents.Document does not handle the properties field if it is a PSCustomObject. .COMPONENT pwshBedrock #> function Test-ConverseAPITool { [CmdletBinding()] param ( [Parameter(Mandatory = $false, HelpMessage = 'Definitions of tools that the model may use.')] [PSCustomObject[]]$Tools ) Write-Verbose -Message 'Validating the Tools object(s)...' foreach ($tool in $Tools) { # Validate main parameters if (-not $tool.PSObject.Properties['Name']) { Write-Debug -Message 'The Name property is missing or empty.' return $false } if (-not $tool.PSObject.Properties['Description']) { Write-Debug -Message 'The Description property is missing or empty.' return $false } if (-not $tool.PSObject.Properties['Properties']) { Write-Debug -Message 'The Properties property is missing.' return $false } if (-not $tool.PSObject.Properties['required']) { Write-Debug -Message 'The required property is missing.' return $false } if ($tool.Properties.Keys.Count -gt 0) { Write-Debug -Message 'Validating the Properties object...' Write-Debug -Message ('Properties count: {0}' -f $tool.Properties.Keys.Count) foreach ($key in $tool.Properties.Keys) { $value = $tool.Properties[$key] if (-not ($value.PSObject.Properties.Name -contains 'type')) { Write-Debug -Message 'The type property is missing.' return $false } if (-not ($value.PSObject.Properties.Name -contains 'description')) { Write-Debug -Message 'The description property is missing.' return $false } if ($value.type -ne 'string') { Write-Debug -Message 'The type property must be a string.' return $false } if ([string]::IsNullOrWhiteSpace($value.description)) { Write-Debug -Message 'The description property must not be null or whitespace.' return $false } } } else { Write-Debug -Message 'The Properties property is empty.' return $false } } #foreach_tool return $true } #Test-ConverseAPITool <# .SYNOPSIS Validates a Tools Results object for use with the Converse API. .DESCRIPTION Evaluates a Tools Results object to ensure it meets the requirements for use with the Converse API. It checks the structure of the tools results objects to ensure they are properly formatted. .EXAMPLE $toolsResults = [PSCustomObject]@{ role = 'tool' tool_call_id = 'string' content = 'string' } Test-ConverseAPIToolResult -ToolResults $toolsResults Tests the Tools Results object to ensure it meets the requirements for use with the Converse API. .PARAMETER ToolResults A list of results from invoking tools recommended by the model in the previous chat turn. .OUTPUTS System.Boolean .NOTES None .COMPONENT pwshBedrock #> function Test-ConverseAPIToolResult { [CmdletBinding()] param ( [Parameter(Mandatory = $false, HelpMessage = 'A list of results from invoking tools recommended by the model in the previous chat turn.')] [PSCustomObject[]]$ToolResults ) Write-Verbose -Message 'Validating the ToolResults object(s)...' $allToolCallIds = New-Object System.Collections.Generic.List[string] foreach ($toolResult in $ToolResults) { if (-not $toolResult.PSObject.Properties['ToolUseId']) { Write-Debug -Message 'The ToolUseId property is missing.' return $false } if (-not $toolResult.PSObject.Properties['Content']) { Write-Debug -Message 'The Content property is missing.' return $false } if (-not $toolResult.PSObject.Properties['status']) { Write-Debug -Message 'The status property is missing.' return $false } if ($toolResult.status -ne 'success' -and $toolResult.status -ne 'error') { Write-Debug -Message 'The status property is not valid. It must be either "success" or "error".' return $false } if ($toolResult.status -eq 'error') { # content should be a string if ($toolResult.Content -isnot [string]) { Write-Debug -Message 'When tool status is "error", the Content property must be a string.' return $false } } elseif ($toolResult.status -eq 'success') { Write-Debug -Message 'Checking content for success status...' foreach ($content in $toolResult.Content) { Write-Debug -Message 'Checking content object format' # content should be a object or PSCustomObject if ($content -is [string]) { Write-Debug -Message 'When tool status is "success", the Content must contain either an object or PSCustomObject.' return $false } } } $allToolCallIds.Add($toolResult.ToolUseId) } #foreach_toolResult # each tool call id should be a unique id. we need to check for duplicates # Convert the list to an array and group by the IDs $groupedIds = $allToolCallIds | Group-Object # Check if any group has more than one element $hasDuplicates = $groupedIds | Where-Object { $_.Count -gt 1 } # Determine the result based on the presence of duplicates $hasNoDuplicates = $hasDuplicates.Count -eq 0 if ($hasNoDuplicates -eq $false) { Write-Debug -Message 'The tool_call_id property is not unique.' return $false } return $true } #Test-ConverseAPIToolResult <# .SYNOPSIS Validates a Tools object for use with the Mistral AI Chat models. .DESCRIPTION Evaluates a Tools object to ensure it meets the requirements for use with the Mistral AI Chat models. It checks the structure of the tools objects to ensure they are properly formatted. .EXAMPLE $tools = [PSCustomObject]@{ type = "function" function = @{ name = "string" description = "string" parameters = @{ type = "string" properties = @{ sign = @{ type = "string" description = "string" } } required = @( "string" ) } } } Test-MistralAIChatTool -Tools $tools Tests the Tools object to ensure it meets the requirements for use with the Mistral AI Chat models. .PARAMETER Tools Definitions of tools that the model may use. .OUTPUTS System.Boolean .NOTES Not every property is validated. There are hash tables that can contain custom properties. .COMPONENT pwshBedrock #> function Test-MistralAIChatTool { [CmdletBinding()] param ( [Parameter(Mandatory = $false, HelpMessage = 'Definitions of tools that the model may use.')] [PSCustomObject[]]$Tools ) Write-Verbose -Message 'Validating the Tools object(s)...' foreach ($tool in $Tools) { # Validate main parameters if (-not $tool.PSObject.Properties["type"] -or -not [string]::IsNullOrWhiteSpace($tool.type) -eq $false) { Write-Debug -Message 'The type property is missing or empty.' return $false } # Validate parameter_definitions if (-not $tool.PSObject.Properties["function"]) { Write-Debug -Message 'The function property is missing.' return $false } # validate parameter_definitions sub-properties if ([string]::IsNullOrWhiteSpace($tool.function.name)) { Write-Debug -Message 'The function name sub-property is missing or empty.' return $false } if ([string]::IsNullOrWhiteSpace($tool.function.description)) { Write-Debug -Message 'The function description sub-property is missing or empty.' return $false } if ([string]::IsNullOrWhiteSpace($tool.function.parameters)) { Write-Debug -Message 'The function parameters property is missing.' return $false } if ([string]::IsNullOrWhiteSpace($tool.function.parameters.type)) { Write-Debug -Message 'The function parameters type sub-property is missing or empty.' return $false } if ([string]::IsNullOrWhiteSpace($tool.function.parameters.properties)) { Write-Debug -Message 'The function parameters properties sub-property is missing or empty.' return $false } if ([string]::IsNullOrWhiteSpace($tool.function.parameters.required)) { Write-Debug -Message 'The function parameters required sub-property is missing or empty.' return $false } } return $true } #Test-MistralAIChatTool <# .SYNOPSIS Validates a Tools Results object for use with the Mistral AI chat model. .DESCRIPTION Evaluates a Tools Results object to ensure it meets the requirements for use with the Mistral AI chat model. It checks the structure of the tools results objects to ensure they are properly formatted. .EXAMPLE $toolsResults = [PSCustomObject]@{ role = 'tool' tool_call_id = 'string' content = 'string' } Test-MistralAIChatToolResult -ToolResults $toolsResults Tests the Tools Results object to ensure it meets the requirements for use with the Mistral AI chat model. .PARAMETER ToolResults A list of results from invoking tools recommended by the model in the previous chat turn. .OUTPUTS System.Boolean .NOTES None .COMPONENT pwshBedrock #> function Test-MistralAIChatToolResult { [CmdletBinding()] param ( [Parameter(Mandatory = $false, HelpMessage = 'A list of results from invoking tools recommended by the model in the previous chat turn.')] [PSCustomObject[]]$ToolResults ) Write-Verbose -Message 'Validating the ToolResults object(s)...' $allToolCallIds = New-Object System.Collections.Generic.List[string] foreach ($toolResult in $ToolResults) { if (-not $toolResult.PSObject.Properties['role']) { Write-Debug -Message 'The role property is missing.' return $false } if (-not $toolResult.PSObject.Properties['tool_call_id']) { Write-Debug -Message 'The tool_call_id property is missing.' return $false } if (-not $toolResult.PSObject.Properties['content']) { Write-Debug -Message 'The tool_call_id property is missing.' return $false } if ($toolResult.role -ne 'tool') { Write-Debug -Message 'The role property is not set to tool.' return $false } $allToolCallIds.Add($toolResult.tool_call_id) } #foreach_toolResult # each tool call id should be a unique id. we need to check for duplicates # Convert the list to an array and group by the IDs $groupedIds = $allToolCallIds | Group-Object # Check if any group has more than one element $hasDuplicates = $groupedIds | Where-Object { $_.Count -gt 1 } # Determine the result based on the presence of duplicates $hasNoDuplicates = $hasDuplicates.Count -eq 0 if ($hasNoDuplicates -eq $false) { Write-Debug -Message 'The tool_call_id property is not unique.' return $false } return $true } #Test-MistralAIChatToolResult <# .SYNOPSIS Tests if a media file is compatible with Stability AI Diffusion model requirements. .DESCRIPTION Evaluates the specified media file to ensure it meets Stability AI Diffusion model compatibility requirements based on their public documentation. It checks the file's presence, type, and size. If the file is not found, the function returns false. If the file type is not supported, the function returns false. If the file resolution does not meet Stability AI Diffusion model requirements, the function returns false. .EXAMPLE Test-StabilityAIDiffusionMedia -MediaPath 'C:\path\to\image.jpg' Tests the image located at 'C:\path\to\image.jpg' for Stability AI Diffusion model compatibility. .PARAMETER MediaPath File path to local media file. .OUTPUTS System.Boolean .NOTES Supported image types - PNG The value must be one of 1024x1024, 1152x896, 1216x832, 1344x768, 1536x640, 640x1536, 768x1344, 832x1216, 896x1152 Height Measured in pixels. Pixel limit is 1048576, so technically any dimension is allowable within that amount. Width Measured in pixels. Pixel limit is 1048576, so technically any dimension is allowable within that amount. A minimum of 262k pixels and a maximum of 1.04m pixels are recommended when generating images with 512px models, and a minimum of 589k pixels and a maximum of 1.04m pixels for 768px models. The true pixel limit is 1048576. To avoid the dreaded 6N IndexError it is advised to use 64px increments when choosing an aspect ratio. Popular ratio combinations for 512px models include 1536 x 512 and 1536 x 384, while 1536 x 640 and 1024 x 576 are recommended for 768px models. For 512px models, the minimum useful sizes are 192-256 in one dimension. For 768px models the minimum useful size is 384 in one dimension. Generating images under the recommended dimensions may result in undesirable artifacts. .COMPONENT pwshBedrock .LINK https://platform.stability.ai/docs/legacy/grpc-api/features/api-parameters#about-dimensions #> function Test-StabilityAIDiffusionMedia { [CmdletBinding()] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUseSingularNouns', '', Justification = 'Just a collective noun.')] param ( [Parameter(Mandatory = $false, HelpMessage = 'File path to local media file.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$MediaPath ) $result = $true # Assume success Write-Verbose -Message 'Verifying presence of media...' try { $pathEval = Test-Path -Path $MediaPath -ErrorAction Stop } catch { Write-Error ('Error verifying media path: {0}' -f $MediaPath) Write-Error $_ $result = $false return $result } if ($pathEval -ne $true) { Write-Warning -Message ('The specified media path: {0} was not found.' -f $PhotoPath) $result = $false return $result } #if_testPath else { Write-Verbose -Message 'Path verified.' } #else_testPath Write-Verbose -Message 'Verifying media type...' $supportedMediaExtensions = @( 'PNG' ) Write-Verbose -Message ('Splitting media path: {0}' -f $MediaPath) $divide = $MediaPath.Split('.') $rawExtension = $divide[$divide.Length - 1] $extension = $rawExtension.ToUpper() Write-Verbose -Message "Verifying discovered extension: $extension" if ($supportedMediaExtensions -notcontains $extension) { Write-Warning -Message ('The specified media type: {0} is not supported.' -f $extension) $result = $false return $result } #if_supportedMediaExtensions else { Write-Verbose -Message 'Media type verified.' } #else_supportedMediaExtensions # Write-Verbose -Message 'Verifying media file size...' # try { # $mediaFileInfo = Get-Item -Path $MediaPath -ErrorAction Stop # } # catch { # Write-Error ('Error verifying media file info: {0}' -f $MediaPath) # Write-Error $_ # $result = $false # return $result # } # $mediaSize = $mediaFileInfo.Length # if ($mediaSize -gt 5MB) { # Write-Warning -Message ('The specified media size: {0} exceeds the Amazon Titan maximum allowed image file size of 5MB.' -f $mediaSize) # $result = $false # return $result # } #if_mediaSize # else { # Write-Verbose -Message 'Media size verified.' # } #else_mediaSize Write-Verbose -Message 'Verifying media resolution...' Write-Verbose ('Media path: {0}' -f $MediaPath) $resolution = Get-ImageResolution -MediaPath $MediaPath # check if the resolution is within the pixel limit if (($resolution.Width * $resolution.Height -gt 1048576) -or ($resolution.Width * $resolution.Height -lt 262144)) { Write-Warning -Message ('The specified media resolution: {0}x{1} exceeds the Stability AI Diffusion model allowed pixel limit of 1048576.' -f $resolution.Width, $resolution.Height) $result = $false return $result } # # Check if the resolution matches any of the supported resolutions # $matchFound = $false # foreach ($supportedResolution in $supportedResolutions) { # if ($resolution.width -eq $supportedResolution.Width -and $resolution.height -eq $supportedResolution.Height) { # $matchFound = $true # break # } # } # if ($matchFound -eq $false) { # Write-Warning -Message ('The specified media resolution: {0}x{1} is not supported.' -f $resolution.Width, $resolution.Height) # Write-Warning -Message 'https://docs.aws.amazon.com/bedrock/latest/userguide/model-parameters-titan-image.html' # $result = $false # return $result # } return $result } #Test-StabilityAIDiffusionMedia <# .EXTERNALHELP pwshBedrock-help.xml #> function Get-ModelContext { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( # 'ai21.j2-grande-instruct', # 'ai21.j2-jumbo-instruct', 'ai21.jamba-instruct-v1:0', # 'ai21.j2-mid-v1', # 'ai21.j2-ultra-v1', 'amazon.titan-image-generator-v1', 'amazon.titan-text-express-v1', 'amazon.titan-text-lite-v1', 'amazon.titan-text-premier-v1:0', 'amazon.titan-tg1-large', 'anthropic.claude-v2:1', 'anthropic.claude-3-haiku-20240307-v1:0', 'anthropic.claude-3-opus-20240229-v1:0', 'anthropic.claude-3-sonnet-20240229-v1:0', 'anthropic.claude-3-5-sonnet-20240620-v1:0', # 'cohere.command-text-v14', # 'cohere.command-light-text-v14', 'cohere.command-r-v1:0', 'cohere.command-r-plus-v1:0', 'meta.llama2-13b-chat-v1', 'meta.llama2-70b-chat-v1', 'meta.llama3-70b-instruct-v1:0', 'meta.llama3-8b-instruct-v1:0', 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', 'mistral.mistral-small-2402-v1:0', 'mistral.mixtral-8x7b-instruct-v0:1', 'stability.stable-diffusion-xl-v1', 'Converse' )] [string]$ModelID ) Write-Verbose -Message ('Getting current model context for {0}' -f $ModelID) $context = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } return $context.Context } #Get-ModelContext <# .EXTERNALHELP pwshBedrock-help.xml #> function Get-ModelCostEstimate { [CmdletBinding()] param ( [Parameter(Mandatory = $false, HelpMessage = 'The number of input tokens.', ParameterSetName = 'Token')] [ValidateNotNullOrEmpty()] [int]$InputTokenCount, [Parameter(Mandatory = $false, HelpMessage = 'The number of output tokens.', ParameterSetName = 'Token')] [ValidateNotNullOrEmpty()] [int]$OutputTokenCount, [Parameter(Mandatory = $true, HelpMessage = 'Image count returned by the API.', ParameterSetName = 'Image')] [ValidateNotNullOrEmpty()] [int]$ImageCount, [Parameter(Mandatory = $false, HelpMessage = 'Number of steps to run the image model for.', ParameterSetName = 'Image')] [ValidateNotNullOrEmpty()] [int]$Steps, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'ai21.j2-grande-instruct', 'ai21.j2-jumbo-instruct', 'ai21.jamba-instruct-v1:0', 'ai21.j2-mid-v1', 'ai21.j2-ultra-v1', 'amazon.titan-image-generator-v1', 'amazon.titan-text-express-v1', 'amazon.titan-text-lite-v1', 'amazon.titan-text-premier-v1:0', 'amazon.titan-tg1-large', 'anthropic.claude-v2:1', 'anthropic.claude-3-haiku-20240307-v1:0', 'anthropic.claude-3-opus-20240229-v1:0', 'anthropic.claude-3-sonnet-20240229-v1:0', 'anthropic.claude-3-5-sonnet-20240620-v1:0', 'cohere.command-text-v14', 'cohere.command-light-text-v14', 'cohere.command-r-v1:0', 'cohere.command-r-plus-v1:0', 'meta.llama2-13b-chat-v1', 'meta.llama2-70b-chat-v1', 'meta.llama3-70b-instruct-v1:0', 'meta.llama3-8b-instruct-v1:0', 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-small-2402-v1:0', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', 'mistral.mixtral-8x7b-instruct-v0:1', 'stability.stable-diffusion-xl-v1' )] [string]$ModelID ) Write-Verbose -Message ('Getting cost model estimates for {0}' -f $ModelID) if ($ModelID -like 'anthropic*') { $modelInfo = $script:anthropicModelInfo | Where-Object { $_.ModelID -eq $ModelID } } elseif ($ModelID -like 'amazon*') { $modelInfo = $script:amazonModelInfo | Where-Object { $_.ModelID -eq $ModelID } } elseif ($ModelID -like 'ai21*') { $modelInfo = $script:ai21ModelInfo | Where-Object { $_.ModelID -eq $ModelID } } elseif ($ModelID -like 'cohere*') { $modelInfo = $script:cohereModelInfo | Where-Object { $_.ModelID -eq $ModelID } } elseif ($ModelID -like 'meta*') { $modelInfo = $script:metaModelInfo | Where-Object { $_.ModelID -eq $ModelID } } elseif ($ModelID -like 'mistral*') { $modelInfo = $script:mistralAIModelInfo | Where-Object { $_.ModelID -eq $ModelID } } elseif ($ModelID -eq 'stability.stable-diffusion-xl-v1') { $modelInfoRaw = $script:stabilityAIModelInfo | Where-Object { $_.ModelID -eq $ModelID } $modelInfo = [PSCustomObject]@{ ImageCost = 0 } if ($Steps -gt 50) { $modelInfo.ImageCost = $modelInfoRaw.ImageCost.Over50Steps } else { $modelInfo.ImageCost = $modelInfoRaw.ImageCost.Under50Steps } } Write-Debug ($modelInfo | Out-String) switch ($PSCmdlet.ParameterSetName) { Token { Write-Debug ('Calculating token cost. {0} input tokens and {1} output tokens at {2} per 1000 tokens' -f $InputTokenCount, $OutputTokenCount, $modelInfo.InputTokenCost) [float]$inputCost = (($inputTokenCount / 1000 ) * $modelInfo.InputTokenCost) [float]$outputCost = (($OutputTokenCount / 1000 ) * $modelInfo.OutputTokenCost) [float]$total = $inputCost + $outputCost $costObj = [PSCustomObject]@{ Total = $total InputCost = $inputCost OutputCost = $outputCost } } Image { Write-Debug ('Calculating image cost. {0} images at {1} per image' -f $ImageCount, $modelInfo.ImageCost) [float]$imageCost = ($ImageCount * $modelInfo.ImageCost) $costObj = [PSCustomObject]@{ ImageCost = $imageCost } } } #switch_parameterSetName return $costObj } #Get-ModelCostEstimate <# .EXTERNALHELP pwshBedrock-help.xml #> function Get-ModelInfo { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.', ParameterSetName = 'Single')] [ValidateSet( 'ai21.j2-grande-instruct', 'ai21.j2-jumbo-instruct', 'ai21.jamba-instruct-v1:0', 'ai21.j2-mid-v1', 'ai21.j2-ultra-v1', 'amazon.titan-image-generator-v1', 'amazon.titan-text-express-v1', 'amazon.titan-text-lite-v1', 'amazon.titan-text-premier-v1:0', 'amazon.titan-tg1-large', 'anthropic.claude-v2:1', 'anthropic.claude-3-haiku-20240307-v1:0', 'anthropic.claude-3-opus-20240229-v1:0', 'anthropic.claude-3-sonnet-20240229-v1:0', 'anthropic.claude-3-5-sonnet-20240620-v1:0', 'cohere.command-text-v14', 'cohere.command-light-text-v14', 'cohere.command-r-v1:0', 'cohere.command-r-plus-v1:0', 'meta.llama2-13b-chat-v1', 'meta.llama2-70b-chat-v1', 'meta.llama3-70b-instruct-v1:0', 'meta.llama3-8b-instruct-v1:0', 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-small-2402-v1:0', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', 'mistral.mixtral-8x7b-instruct-v0:1', 'stability.stable-diffusion-xl-v1' )] [string]$ModelID, [Parameter(Mandatory = $true, HelpMessage = 'Gets information for all models.', ParameterSetName = 'All')] [switch]$AllModels, [Parameter(Mandatory = $true, HelpMessage = 'Gets information for model(s) from a specific provider.', ParameterSetName = 'Provider')] [ValidateSet( 'Anthropic', 'Amazon', 'AI21 Labs', 'Cohere', 'Meta', 'Mistral AI', 'Stability AI' )] [string]$Provider ) $modelInfo = @() $modelInfo += $script:anthropicModelInfo $modelInfo += $script:amazonModelInfo $modelInfo += $script:ai21ModelInfo $modelInfo += $script:cohereModelInfo $modelInfo += $script:metaModelInfo $modelInfo += $script:mistralAIModelInfo $modelInfo += $script:stabilityAIModelInfo switch ($PSCmdlet.ParameterSetName) { 'Single' { Write-Verbose -Message ('Getting model information for {0}' -f $ModelID) $returnInfo = $modelInfo | Where-Object { $_.ModelID -eq $ModelID } } 'All' { Write-Verbose -Message ('$AllModels is {0}. Retrieving all model info.' -f $AllModels) $returnInfo = $modelInfo } 'Provider' { Write-Verbose -Message ('Getting model(s) information for {0}' -f $Provider) $returnInfo = $modelInfo | Where-Object { $_.ProviderName -eq $Provider } } } Write-Debug -Message ($returnInfo | Out-String) return $returnInfo } #Get-ModelInfo <# .EXTERNALHELP pwshBedrock-help.xml #> function Get-ModelTally { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.', ParameterSetName = 'Single')] [ValidateSet( 'ai21.j2-grande-instruct', 'ai21.j2-jumbo-instruct', 'ai21.jamba-instruct-v1:0', 'ai21.j2-mid-v1', 'ai21.j2-ultra-v1', 'amazon.titan-image-generator-v1', 'amazon.titan-text-express-v1', 'amazon.titan-text-lite-v1', 'amazon.titan-text-premier-v1:0', 'amazon.titan-tg1-large', 'anthropic.claude-v2:1', 'anthropic.claude-3-haiku-20240307-v1:0', 'anthropic.claude-3-opus-20240229-v1:0', 'anthropic.claude-3-sonnet-20240229-v1:0', 'anthropic.claude-3-5-sonnet-20240620-v1:0', 'cohere.command-text-v14', 'cohere.command-light-text-v14', 'cohere.command-r-v1:0', 'cohere.command-r-plus-v1:0', 'meta.llama2-13b-chat-v1', 'meta.llama2-70b-chat-v1', 'meta.llama3-70b-instruct-v1:0', 'meta.llama3-8b-instruct-v1:0', 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-small-2402-v1:0', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', 'mistral.mixtral-8x7b-instruct-v0:1', 'stability.stable-diffusion-xl-v1' )] [string]$ModelID, [Parameter(Mandatory = $true, HelpMessage = 'Gets the tally for all models.', ParameterSetName = 'All')] [switch]$AllModels, [Parameter(Mandatory = $true, HelpMessage = 'Gets the total tallied cost for all models.', ParameterSetName = 'Total')] [switch]$JustTotalCost ) Write-Verbose -Message 'Processing Get-ModelTally' switch ($PSCmdlet.ParameterSetName) { 'Single' { Write-Verbose -Message ('Getting model tally for {0}' -f $ModelID) $modelTally = $Global:pwshBedRockSessionModelTally | Where-Object { $_.ModelID -eq $ModelID } Write-Debug -Message ('ModelTally: {0}' -f $modelTally) return $modelTally } 'All' { Write-Verbose -Message ('AllModels: {0} - getting all models' -f $AllModels) return $Global:pwshBedRockSessionModelTally } 'Total' { Write-Verbose -Message ('JustTotalCost: {0} - getting total cost' -f $JustTotalCost) return $Global:pwshBedRockSessionCostEstimate } } } #Get-ModelTally <# .EXTERNALHELP pwshBedrock-help.xml #> function Get-TokenCountEstimate { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'The text to estimate tokens for.')] [string]$Text ) $normalizedText = $Text.Replace("`r`n", "`n").Replace("`r", "`n") # Calculate character count $charCount = $normalizedText.Length Write-Debug ('Character count: {0}' -f $charCount) Write-Verbose -Message 'Evaluate token estimate based on character count.' # Estimate token count (1 token ≈ 4 characters) $estimatedTokens = [math]::Ceiling($charCount / 4) Write-Verbose -Message ('Estimated tokens: {0}' -f $estimatedTokens) return $estimatedTokens } #Get-TokenCountEstimate <# .EXTERNALHELP pwshBedrock-help.xml #> function Invoke-AI21LabsJambaModel { [CmdletBinding()] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUsePSCredentialType', '', Justification = 'Suppressed to support AWS credential parameter.')] param ( [Parameter(Mandatory = $true, HelpMessage = 'The message to be sent to the model.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $false, HelpMessage = 'Sets the behavior and context for the model in the conversation.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$SystemPrompt, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'ai21.jamba-instruct-v1:0' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Specify if you want the full object returned instead of just the message reply.')] [switch]$ReturnFullObject, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [switch]$NoContextPersist, # model parameters [Parameter(Mandatory = $false, HelpMessage = 'The maximum number of tokens to generate before stopping.')] [ValidateRange(1, 4096)] [int]$MaxTokens = 4096, [Parameter(Mandatory = $false, HelpMessage = 'How much variation to provide in each answer. Setting this value to 0 guarantees the same response to the same question every time. Setting a higher value encourages more variation.')] [ValidateRange(0, 2.0)] [float]$Temperature, [Parameter(Mandatory = $false, HelpMessage = 'Use a lower value to ignore less probable options and decrease the diversity of responses.')] [ValidateRange(0, 1.0)] [float]$TopP, [Parameter(Mandatory = $false, HelpMessage = 'Custom text sequences that cause the model to stop generating.')] [ValidateNotNullOrEmpty()] [string[]]$StopSequences, [Parameter(Mandatory = $false, HelpMessage = 'Number of responses that the model should generate.')] [ValidateRange(1, 16)] [int]$ResponseNumber, # Common Parameters [Parameter(Mandatory = $false, HelpMessage = 'The AWS access key for the user account.')] [string]$AccessKey, [Parameter(Mandatory = $false, HelpMessage = 'An AWSCredentials object instance containing access and secret key information, and optionally a token for session-based credentials.')] [Amazon.Runtime.AWSCredentials]$Credential, [Parameter(Mandatory = $false, HelpMessage = 'The endpoint to make the call against. Not for normal use.')] [string]$EndpointUrl, [Parameter(Mandatory = $false, HelpMessage = 'Used with SAML-based authentication when ProfileName references a SAML role profile.')] [System.Management.Automation.PSCredential]$NetworkCredential, [Parameter(Mandatory = $false, HelpMessage = 'Used to specify the name and location of the ini-format credential file (shared with the AWS CLI and other AWS SDKs)')] [string]$ProfileLocation, [Parameter(Mandatory = $false, HelpMessage = 'The user-defined name of an AWS credentials or SAML-based role profile containing credential information.')] [string]$ProfileName, [Parameter(Mandatory = $false, HelpMessage = 'The system name of an AWS region or an AWSRegion instance.')] [object]$Region, [Parameter(Mandatory = $false, HelpMessage = 'The AWS secret key for the user account.')] [string]$SecretKey, [Parameter(Mandatory = $false, HelpMessage = 'The session token if the access and secret keys are temporary session-based credentials.')] [string]$SessionToken ) $modelInfo = $script:mistralAIModelInfo | Where-Object { $_.ModelId -eq $ModelID } Write-Debug -Message 'Model Info:' Write-Debug -Message ($modelInfo | Out-String) if ($ResponseNumber -gt 1 -and $Temperature -eq 0) { throw 'When generating multiple responses, the Temperature parameter must be set to a value greater than 0.' } # the system prompt must always be the first message in the context, otherwise the model will fail validation # *Note: on subsequent calls, the system prompt will be updated instead of replaced, ensuring the system prompt is always the first message in the context if ($SystemPrompt) { $formatAI21LabsJambaModelSplat = @{ Role = 'system' Message = $SystemPrompt ModelID = $ModelID NoContextPersist = $NoContextPersist } $formattedMessages = Format-AI21LabsJambaModel @formatAI21LabsJambaModelSplat } if ($Message) { $formatAI21LabsJambaModelSplat = @{ Role = 'user' Message = $Message ModelID = $ModelID NoContextPersist = $NoContextPersist } $formattedMessages = Format-AI21LabsJambaModel @formatAI21LabsJambaModelSplat } #region cmdletParams $bodyObj = @{ messages = @( $formattedMessages ) } if ($MaxTokens) { $bodyObj.Add('max_tokens', $MaxTokens) } if ($Temperature) { $bodyObj.Add('temperature', $Temperature) } if ($TopP) { $bodyObj.Add('top_p', $TopP) } if ($StopSequences) { $bodyObj.Add('stop_sequences', $StopSequences) } if ($ResponseNumber) { $bodyObj.Add('n', $ResponseNumber) } $jsonBody = $bodyObj | ConvertTo-Json -Depth 10 [byte[]]$byteArray = [System.Text.Encoding]::UTF8.GetBytes($jsonBody) $cmdletParams = @{ ContentType = 'application/json' ModelId = $ModelID Body = $byteArray } Write-Debug -Message 'Cmdlet Params:' Write-Debug -Message ($cmdletParams | Out-String) Write-Debug -Message 'Body JSON:' Write-Debug -Message ($jsonBody | Out-String) #endregion #region commonParams $commonParams = @{} if ($AccessKey) { $commonParams.Add('AccessKey', $AccessKey) } if ($Credential) { $commonParams.Add('Credential', $Credential) } if ($EndpointUrl) { $commonParams.Add('EndpointUrl', $EndpointUrl) } if ($NetworkCredential) { $commonParams.Add('NetworkCredential', $NetworkCredential) } if ($ProfileLocation) { $commonParams.Add('ProfileLocation', $ProfileLocation) } if ($ProfileName) { $commonParams.Add('ProfileName', $ProfileName) } if ($Region) { $commonParams.Add('Region', $Region) } if ($SecretKey) { $commonParams.Add('SecretKey', $SecretKey) } if ($SessionToken) { $commonParams.Add('SessionToken', $SessionToken) } #endregion try { $rawResponse = Invoke-BDRRModel @cmdletParams @commonParams -ErrorAction Stop } catch { # we need to remove the user context from the global variable if the model is not successfully engaged $context = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } Write-Debug -Message 'Catch Block. Context:' Write-Debug -Message ($context | Out-String) Write-Debug -Message ('Context count: {0}' -f $context.Context.Count) if ($context.Context.Count -le 1) { $context.Context = New-Object System.Collections.Generic.List[object] } else { $context.Context.RemoveAt($context.Context.Count - 1) } $exceptionMessage = $_.Exception.Message if ($exceptionMessage -like "*don't have access*") { Write-Debug -Message 'Specific Error' Write-Warning -Message 'You do not have access to the requested model.' Write-Warning -Message 'In your AWS account, you will need to request access to the model.' Write-Warning -Message 'AWS -> Amazon Bedrock -> Model Access -> Request Access' throw ('No access to model {0}.' -f $ModelID) } else { Write-Debug -Message 'General Error' Write-Debug -Message ($_ | Out-String) Write-Error -Message $_ Write-Error -Message $_.Exception.Message throw } } if ([String]::IsNullOrWhiteSpace($rawResponse)) { throw 'No response from model API.' } Write-Verbose -Message 'Processing response.' try { $jsonBody = ConvertFrom-MemoryStreamToString -MemoryStream $rawResponse.body -ErrorAction Stop } catch { # we need to remove the user context from the global variable if the model is not successfully engaged $context = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } Write-Debug -Message ($context | Out-String) Write-Debug -Message ('Context count: {0}' -f $context.Context.Count) if ($context.Context.Count -le 1) { $context.Context = New-Object System.Collections.Generic.List[object] } else { $context.Context.RemoveAt($context.Context.Count - 1) } Write-Error $_ throw } Write-Debug -Message 'Response JSON:' Write-Debug -Message ($jsonBody | Out-String) Write-Verbose -Message 'Converting response from JSON.' $response = $jsonBody | ConvertFrom-Json if ([string]::IsNullOrWhiteSpace($response.choices.message.content)) { if ($MaxTokens -lt 150) { Write-Warning -Message 'In some cases, the model may return an empty response when the max tokens is set to a low value.' Write-Warning -Message ('MaxTokens on this call was set to {0}.' -f $MaxTokens) Write-Warning -Message 'Try increasing the MaxTokens value and try again.' } throw ('No response text was returned from model API: {0}' -f $ModelID) } if ($NoContextPersist -eq $false) { Write-Verbose -Message 'Adding response to model context history.' # *Note: this model supports multiple responses. By default, only the first response is added to the context. $formatMistralAIChatModelSplat = @{ Role = 'assistant' Message = $response.choices[0].message.content ModelID = $ModelID } Format-AI21LabsJambaModel @formatMistralAIChatModelSplat | Out-Null } Write-Verbose -Message 'Calculating cost estimate.' $message = $formattedMessages | ConvertTo-Json -Depth 10 | Out-String Add-ModelCostEstimate -Usage $response.usage -Message $Message -ModelID $ModelID if ($ReturnFullObject) { return $response } else { # *Note: this model supports multiple responses. By default, only the first response is returned. return $response.choices[0].message.content } } #Invoke-AI21LabsJambaModel <# .EXTERNALHELP pwshBedrock-help.xml #> function Invoke-AI21LabsJurassic2Model { [CmdletBinding()] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUsePSCredentialType', '', Justification = 'Suppressed to support AWS credential parameter.')] param ( [Parameter(Mandatory = $true, HelpMessage = 'The message to be sent to the model.', ParameterSetName = 'Standard')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'ai21.j2-grande-instruct', 'ai21.j2-jumbo-instruct', 'ai21.j2-mid-v1', 'ai21.j2-ultra-v1' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Specify if you want the full object returned instead of just the message reply.')] [switch]$ReturnFullObject, # model parameters [Parameter(Mandatory = $false, HelpMessage = 'The amount of randomness injected into the response.')] [ValidateRange(0.0, 1.0)] [float]$Temperature, [Parameter(Mandatory = $false, HelpMessage = 'Use a lower value to ignore less probable options and decrease the diversity of responses.')] [ValidateRange(0.0, 1.0)] [float]$TopP, [Parameter(Mandatory = $false, HelpMessage = 'The maximum number of tokens to generate before stopping.')] [ValidateRange(1, 8192)] [int]$MaxTokens = 512, [Parameter(Mandatory = $false, HelpMessage = 'Custom text sequences that cause the model to stop generating.')] [ValidateNotNullOrEmpty()] [string[]]$StopSequences, # penalize parameters # Count Penalty [Parameter(Mandatory = $false, HelpMessage = 'A true value applies the penalty to whitespaces and new lines. Proportional to the number of appearances.')] [ValidateRange(0.0, 1.0)] [float]$CountPenaltyScale, [Parameter(Mandatory = $false, HelpMessage = 'A true value applies the penalty to punctuation. Proportional to the number of appearances.')] [bool]$CountPenaltyApplyToWhiteSpaces, [Parameter(Mandatory = $false, HelpMessage = 'A true value applies the penalty to punctuation. Proportional to the number of appearances.')] [bool]$CountPenaltyApplyToPunctuations, [Parameter(Mandatory = $false, HelpMessage = 'A true value applies the penalty to numbers. Proportional to the number of appearances.')] [bool]$CountPenaltyApplyToNumbers, [Parameter(Mandatory = $false, HelpMessage = 'A true value applies the penalty to stop words. Proportional to the number of appearances.')] [bool]$CountPenaltyApplyToStopWords, [Parameter(Mandatory = $false, HelpMessage = 'A true value excludes emojis from the penalty. Proportional to the number of appearances.')] [bool]$CountPenaltyApplyToEmojis, # Presence Penalty [Parameter(Mandatory = $false, HelpMessage = 'A positive penalty value implies reducing the probability of repetition. Larger values correspond to a stronger bias against repetition.')] [ValidateRange(0.0, 5.0)] [float]$PresencePenaltyScale, [Parameter(Mandatory = $false, HelpMessage = 'A true value applies the penalty to whitespaces and new lines.')] [bool]$PresencePenaltyApplyToWhiteSpaces, [Parameter(Mandatory = $false, HelpMessage = 'A true value applies the penalty to punctuation.')] [bool]$PresencePenaltyApplyToPunctuations, [Parameter(Mandatory = $false, HelpMessage = 'A true value applies the penalty to numbers.')] [bool]$PresencePenaltyApplyToNumbers, [Parameter(Mandatory = $false, HelpMessage = 'A true value applies the penalty to stop words.')] [bool]$PresencePenaltyApplyToStopWords, [Parameter(Mandatory = $false, HelpMessage = 'A true value excludes emojis from the penalty.')] [bool]$PresencePenaltyApplyToEmojis, # Frequency Penalty [Parameter(Mandatory = $false, HelpMessage = 'A positive penalty value implies reducing the probability of repetition. Larger values correspond to a stronger bias against repetition.')] [ValidateRange(0, 500)] [int]$FrequencyPenaltyScale, [Parameter(Mandatory = $false, HelpMessage = 'A true value applies the penalty to whitespaces and new lines.')] [bool]$FrequencyPenaltyApplyToWhiteSpaces, [Parameter(Mandatory = $false, HelpMessage = 'A true value applies the penalty to punctuation.')] [bool]$FrequencyPenaltyApplyToPunctuations, [Parameter(Mandatory = $false, HelpMessage = 'A true value applies the penalty to numbers.')] [bool]$FrequencyPenaltyApplyToNumbers, [Parameter(Mandatory = $false, HelpMessage = 'A true value applies the penalty to stop words.')] [bool]$FrequencyPenaltyApplyToStopWords, [Parameter(Mandatory = $false, HelpMessage = 'A true value excludes emojis from the penalty.')] [bool]$FrequencyPenaltyApplyToEmojis, # Common Parameters [Parameter(Mandatory = $false, HelpMessage = 'The AWS access key for the user account.')] [string]$AccessKey, [Parameter(Mandatory = $false, HelpMessage = 'An AWSCredentials object instance containing access and secret key information, and optionally a token for session-based credentials.')] [Amazon.Runtime.AWSCredentials]$Credential, [Parameter(Mandatory = $false, HelpMessage = 'The endpoint to make the call against. Not for normal use.')] [string]$EndpointUrl, [Parameter(Mandatory = $false, HelpMessage = 'Used with SAML-based authentication when ProfileName references a SAML role profile.')] [System.Management.Automation.PSCredential]$NetworkCredential, [Parameter(Mandatory = $false, HelpMessage = 'Used to specify the name and location of the ini-format credential file (shared with the AWS CLI and other AWS SDKs)')] [string]$ProfileLocation, [Parameter(Mandatory = $false, HelpMessage = 'The user-defined name of an AWS credentials or SAML-based role profile containing credential information.')] [string]$ProfileName, [Parameter(Mandatory = $false, HelpMessage = 'The system name of an AWS region or an AWSRegion instance.')] [object]$Region, [Parameter(Mandatory = $false, HelpMessage = 'The AWS secret key for the user account.')] [string]$SecretKey, [Parameter(Mandatory = $false, HelpMessage = 'The session token if the access and secret keys are temporary session-based credentials.')] [string]$SessionToken ) $modelInfo = $script:anthropicModelInfo | Where-Object { $_.ModelId -eq $ModelID } Write-Debug -Message 'Model Info:' Write-Debug -Message ($modelInfo | Out-String) Write-Verbose -Message 'Formatting message for model.' $formattedMessages = $Message #region cmdletParams $bodyObj = @{ prompt = $formattedMessages } if ($Temperature) { $bodyObj.Add('temperature', $Temperature) } if ($TopP) { $bodyObj.Add('topP', $TopP) } if ($MaxTokens) { $bodyObj.Add('maxTokens', $MaxTokens) } if ($StopSequences) { $bodyObj.Add('stopSequences', $StopSequences) } # special case for penalty objects # countPenalty if ( $CountPenaltyScale -or $CountPenaltyApplyToWhiteSpaces -or $CountPenaltyApplyToPunctuations -or $CountPenaltyApplyToNumbers -or $CountPenaltyApplyToStopWords -or $CountPenaltyApplyToEmojis ) { $bodyObj.Add('countPenalty', @{}) } if ($CountPenaltyScale) { $bodyObj.countPenalty.Add('scale', $CountPenaltyScale) } if ($CountPenaltyApplyToWhiteSpaces) { $bodyObj.countPenalty.Add('applyToWhiteSpaces', $CountPenaltyApplyToWhiteSpaces) } if ($CountPenaltyApplyToPunctuations) { $bodyObj.countPenalty.Add('applyToPunctuations', $CountPenaltyApplyToPunctuations) } if ($CountPenaltyApplyToNumbers) { $bodyObj.countPenalty.Add('applyToNumbers', $CountPenaltyApplyToNumbers) } if ($CountPenaltyApplyToStopWords) { $bodyObj.countPenalty.Add('applyToStopWords', $CountPenaltyApplyToStopWords) } if ($CountPenaltyApplyToEmojis) { $bodyObj.countPenalty.Add('applyToEmojis', $CountPenaltyApplyToEmojis) } # presencePenalty if ( $PresencePenaltyScale -or $PresencePenaltyApplyToWhiteSpaces -or $PresencePenaltyApplyToPunctuations -or $PresencePenaltyApplyToNumbers -or $PresencePenaltyApplyToStopWords -or $PresencePenaltyApplyToEmojis ) { $bodyObj.Add('presencePenalty', @{}) } if ($PresencePenaltyScale) { $bodyObj.presencePenalty.Add('scale', $PresencePenaltyScale) } if ($PresencePenaltyApplyToWhiteSpaces) { $bodyObj.presencePenalty.Add('applyToWhiteSpaces', $PresencePenaltyApplyToWhiteSpaces) } if ($PresencePenaltyApplyToPunctuations) { $bodyObj.presencePenalty.Add('applyToPunctuations', $PresencePenaltyApplyToPunctuations) } if ($PresencePenaltyApplyToNumbers) { $bodyObj.presencePenalty.Add('applyToNumbers', $PresencePenaltyApplyToNumbers) } if ($PresencePenaltyApplyToStopWords) { $bodyObj.presencePenalty.Add('applyToStopWords', $PresencePenaltyApplyToStopWords) } if ($PresencePenaltyApplyToEmojis) { $bodyObj.presencePenalty.Add('applyToEmojis', $PresencePenaltyApplyToEmojis) } # frequencyPenalty if ( $FrequencyPenaltyScale -or $FrequencyPenaltyApplyToWhiteSpaces -or $FrequencyPenaltyApplyToPunctuations -or $FrequencyPenaltyApplyToNumbers -or $FrequencyPenaltyApplyToStopWords -or $FrequencyPenaltyApplyToEmojis ) { $bodyObj.Add('frequencyPenalty', @{}) } if ($FrequencyPenaltyScale) { $bodyObj.frequencyPenalty.Add('scale', $FrequencyPenaltyScale) } if ($FrequencyPenaltyApplyToWhiteSpaces) { $bodyObj.frequencyPenalty.Add('applyToWhiteSpaces', $FrequencyPenaltyApplyToWhiteSpaces) } if ($FrequencyPenaltyApplyToPunctuations) { $bodyObj.frequencyPenalty.Add('applyToPunctuations', $FrequencyPenaltyApplyToPunctuations) } if ($FrequencyPenaltyApplyToNumbers) { $bodyObj.frequencyPenalty.Add('applyToNumbers', $FrequencyPenaltyApplyToNumbers) } if ($FrequencyPenaltyApplyToStopWords) { $bodyObj.frequencyPenalty.Add('applyToStopWords', $FrequencyPenaltyApplyToStopWords) } if ($FrequencyPenaltyApplyToEmojis) { $bodyObj.frequencyPenalty.Add('applyToEmojis', $FrequencyPenaltyApplyToEmojis) } $jsonBody = $bodyObj | ConvertTo-Json -Depth 10 [byte[]]$byteArray = [System.Text.Encoding]::UTF8.GetBytes($jsonBody) $cmdletParams = @{ ContentType = 'application/json' ModelId = $ModelID Body = $byteArray } Write-Debug -Message 'Cmdlet Params:' Write-Debug -Message ($cmdletParams | Out-String) Write-Debug -Message 'Body JSON:' Write-Debug -Message ($jsonBody | Out-String) #endregion #region commonParams $commonParams = @{} if ($AccessKey) { $commonParams.Add('AccessKey', $AccessKey) } if ($Credential) { $commonParams.Add('Credential', $Credential) } if ($EndpointUrl) { $commonParams.Add('EndpointUrl', $EndpointUrl) } if ($NetworkCredential) { $commonParams.Add('NetworkCredential', $NetworkCredential) } if ($ProfileLocation) { $commonParams.Add('ProfileLocation', $ProfileLocation) } if ($ProfileName) { $commonParams.Add('ProfileName', $ProfileName) } if ($Region) { $commonParams.Add('Region', $Region) } if ($SecretKey) { $commonParams.Add('SecretKey', $SecretKey) } if ($SessionToken) { $commonParams.Add('SessionToken', $SessionToken) } #endregion try { $rawResponse = Invoke-BDRRModel @cmdletParams @commonParams -ErrorAction Stop } catch { $exceptionMessage = $_.Exception.Message if ($exceptionMessage -like "*don't have access*") { Write-Debug -Message 'Specific Error' Write-Warning -Message 'You do not have access to the requested model.' Write-Warning -Message 'In your AWS account, you will need to request access to the model.' Write-Warning -Message 'AWS -> Amazon Bedrock -> Model Access -> Request Access' throw ('No access to model {0}.' -f $ModelID) } else { Write-Debug -Message 'General Error' Write-Debug -Message ($_ | Out-String) Write-Error -Message $_ Write-Error -Message $_.Exception.Message throw } } if ([String]::IsNullOrWhiteSpace($rawResponse)) { throw 'No response from model API.' } Write-Verbose -Message 'Processing response.' try { $jsonBody = ConvertFrom-MemoryStreamToString -MemoryStream $rawResponse.body -ErrorAction Stop } catch { Write-Error $_ throw } Write-Debug -Message 'Response JSON:' Write-Debug -Message ($jsonBody | Out-String) Write-Verbose -Message 'Converting response from JSON.' $response = $jsonBody | ConvertFrom-Json $completion = $response.completions[0].data.text Write-Verbose -Message 'Calculating cost estimate.' Add-ModelCostEstimate -Usage $response -ModelID $ModelID if ($ReturnFullObject) { return $response } else { return $completion } } #Invoke-AI21LabsJurassic2Model <# .EXTERNALHELP pwshBedrock-help.xml #> function Invoke-AmazonImageModel { [CmdletBinding()] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUsePSCredentialType', '', Justification = 'Suppressed to support AWS credential parameter.')] param ( #_______________________________________________________ # required parameters [Parameter(Mandatory = $true, HelpMessage = 'The local file path to save the generated images.')] [ValidateScript({ if (-Not ($_ | Test-Path -PathType Container)) { throw 'The Path argument must be a folder. File paths are not allowed.' } if (-Not ($_ | Test-Path)) { throw 'File or folder does not exist' } return $true })] $ImagesSavePath, #_______________________________________________________ # image generation parameters [Parameter(Mandatory = $true, HelpMessage = 'A text prompt used to generate the image.', ParameterSetName = 'Generation')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [ValidateLength(0, 512)] [string]$ImagePrompt, [Parameter(Mandatory = $false, HelpMessage = 'Use to control and reproduce results. Determines the initial noise setting.', ParameterSetName = 'Generation')] [ValidateRange(0, 2147483646)] [int]$Seed, #_______________________________________________________ # inpainting parameters [Parameter(Mandatory = $true, HelpMessage = 'File path to local media to be modified.', ParameterSetName = 'InPaint')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$InPaintImagePath, [Parameter(Mandatory = $false, HelpMessage = 'A text prompt to define what to change inside the mask.', ParameterSetName = 'InPaint')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [ValidateLength(0, 512)] [string]$InPaintTextPrompt, [Parameter(Mandatory = $false, HelpMessage = 'A text prompt that defines the mask.', ParameterSetName = 'InPaint')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$InPaintMaskPrompt, [Parameter(Mandatory = $false, HelpMessage = 'File path to local media containing the masked image.', ParameterSetName = 'InPaint')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$InPaintMaskImagePath, #_______________________________________________________ # outpainting parameters [Parameter(Mandatory = $true, HelpMessage = 'File path to local media to be modified.', ParameterSetName = 'OutPaint')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$OutPaintImagePath, [Parameter(Mandatory = $true, HelpMessage = 'A text prompt to define what to change outside the mask.', ParameterSetName = 'OutPaint')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [ValidateLength(0, 512)] [string]$OutPaintTextPrompt, [Parameter(Mandatory = $false, HelpMessage = 'A text prompt that defines the mask.', ParameterSetName = 'OutPaint')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$OutPaintMaskPrompt, [Parameter(Mandatory = $false, HelpMessage = 'File path to local media containing the masked image.', ParameterSetName = 'OutPaint')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$OutPaintMaskImagePath, [Parameter(Mandatory = $false, HelpMessage = 'Specifies whether to allow modification of the pixels inside the mask or not.', ParameterSetName = 'OutPaint')] [ValidateSet( 'DEFAULT', 'PRECISE' )] [string]$OutPaintMode = 'DEFAULT', #_______________________________________________________ # variation parameters [Parameter(Mandatory = $true, HelpMessage = 'File path to local media files for which to generate variations. More than one file path can be provided.', ParameterSetName = 'Variation')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string[]]$VariationImagePath, [Parameter(Mandatory = $true, HelpMessage = 'A text prompt that can define what to preserve and what to change in the image.', ParameterSetName = 'Variation')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [ValidateLength(0, 512)] [string]$VariationTextPrompt, [Parameter(Mandatory = $false, HelpMessage = 'Specifies how similar the generated image should be to the input image.', ParameterSetName = 'Variation')] [ValidateRange(0.2, 1.0)] [float]$SimilarityStrength, #_______________________________________________________ # common image parameters [Parameter(Mandatory = $false, HelpMessage = 'A text prompt to define what not to include in the image.')] [string]$NegativeText, [Parameter(Mandatory = $false, HelpMessage = 'The number of images to generate.')] [ValidateRange(1, 5)] [int]$NumberOfImages, [Parameter(Mandatory = $false, HelpMessage = 'The width of the image in pixels.')] [ValidateSet( 320, 384, 448, 512, 576, 640, 704, 768, 896, 1024, 1152, 1173, 1280, 1408 )] [int]$Width, [Parameter(Mandatory = $false, HelpMessage = 'The height of the image in pixels.')] # [Parameter(ParameterSetName = 'Generation')] # [Parameter(ParameterSetName = 'InPaint')] [ValidateSet( 320, 384, 448, 512, 576, 640, 704, 768, 896, 1024, 1152, 1173, 1280, 1408 )] [int]$Height, [Parameter(Mandatory = $false, HelpMessage = 'Specifies how strongly the generated image should adhere to the prompt.')] [ValidateRange(1.1, 10.0)] [float]$CfgScale, #_______________________________________________________ [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'amazon.titan-image-generator-v1' )] [string]$ModelID = 'amazon.titan-image-generator-v1', [Parameter(Mandatory = $false, HelpMessage = 'Specify if you want the full object returned from the model. This will include the raw base64 image data and other information.')] [switch]$ReturnFullObject, # Common Parameters [Parameter(Mandatory = $false, HelpMessage = 'The AWS access key for the user account.')] [string]$AccessKey, [Parameter(Mandatory = $false, HelpMessage = 'An AWSCredentials object instance containing access and secret key information, and optionally a token for session-based credentials.')] [Amazon.Runtime.AWSCredentials]$Credential, [Parameter(Mandatory = $false, HelpMessage = 'The endpoint to make the call against. Not for normal use.')] [string]$EndpointUrl, [Parameter(Mandatory = $false, HelpMessage = 'Used with SAML-based authentication when ProfileName references a SAML role profile.')] [System.Management.Automation.PSCredential]$NetworkCredential, [Parameter(Mandatory = $false, HelpMessage = 'Used to specify the name and location of the ini-format credential file (shared with the AWS CLI and other AWS SDKs)')] [string]$ProfileLocation, [Parameter(Mandatory = $false, HelpMessage = 'The user-defined name of an AWS credentials or SAML-based role profile containing credential information.')] [string]$ProfileName, [Parameter(Mandatory = $false, HelpMessage = 'The system name of an AWS region or an AWSRegion instance.')] [object]$Region, [Parameter(Mandatory = $false, HelpMessage = 'The AWS secret key for the user account.')] [string]$SecretKey, [Parameter(Mandatory = $false, HelpMessage = 'The session token if the access and secret keys are temporary session-based credentials.')] [string]$SessionToken ) Write-Debug -Message ('Parameter Set Name: {0}' -f $PSCmdlet.ParameterSetName) $modelInfo = $script:amazonModelInfo | Where-Object { $_.ModelId -eq $ModelID } Write-Debug -Message 'Model Info:' Write-Debug -Message ($modelInfo | Out-String) switch ($PSCmdlet.ParameterSetName) { 'Generation' { $bodyObj = @{ taskType = 'TEXT_IMAGE' textToImageParams = @{ text = $ImagePrompt } } if ($NegativeText) { $bodyObj.textToImageParams.Add('negativeText', $NegativeText) } } 'InPaint' { # validate that either $InPaintMaskPrompt or $InPaintMaskImagePath is provided if (-not ($InPaintMaskPrompt -or $InPaintMaskImagePath)) { throw 'Either -InPaintMaskPrompt or -InPaintMaskImagePath is required.' } # validate that both $InPaintMaskPrompt and $InPaintMaskImagePath are not provided if ($InPaintMaskPrompt -and $InPaintMaskImagePath) { throw 'Either -InPaintMaskPrompt or -InPaintMaskImagePath should be provided. Not both.' } Write-Debug -Message 'Validating primary INPAINTING image.' $mediaEval = Test-AmazonMedia -MediaPath $InPaintImagePath if ($mediaEval -ne $true) { throw 'Media file not supported.' } else { Write-Debug -Message 'Primary INPAINTING image is supported.' } Write-Debug -Message 'Converting primary INPAINTING image to base64.' try { $base64 = Convert-MediaToBase64 -MediaPath $InPaintImagePath -ErrorAction Stop } catch { Write-Error $_ throw } $bodyObj = @{ taskType = 'INPAINTING' inPaintingParams = @{ image = $base64 } } if ($InPaintTextPrompt) { $bodyObj.inPaintingParams.Add('text', $InPaintTextPrompt) } if ($InPaintMaskPrompt) { $bodyObj.inPaintingParams.Add('maskPrompt', $InPaintMaskPrompt) } if ($InPaintMaskImagePath) { Write-Debug -Message 'Validating INPAINTING mask image.' $mediaMaskEval = Test-AmazonMedia -MediaPath $InPaintMaskImagePath if ($mediaMaskEval -ne $true) { throw 'Media file not supported.' } else { Write-Debug -Message 'Mask image is supported.' } Write-Debug -Message 'Converting INPAINTING mask image to base64.' try { $base64Mask = Convert-MediaToBase64 -MediaPath $InPaintMaskImagePath -ErrorAction Stop } catch { Write-Error $_ throw } $bodyObj.inPaintingParams.Add('maskImage', $base64Mask) } if ($NegativeText) { $bodyObj.inPaintingParams.Add('negativeText', $NegativeText) } } 'OutPaint' { # validate that either $OutPaintMaskPrompt or $OutPaintMaskImagePath is provided if (-not ($OutPaintMaskPrompt -or $OutPaintMaskImagePath)) { throw 'Either -OutPaintMaskPrompt or -OutPaintMaskImagePath is required.' } # validate that both $OutPaintMaskPrompt and $OutPaintMaskImagePath are not provided if ($OutPaintMaskPrompt -and $OutPaintMaskImagePath) { throw 'Either -OutPaintMaskPrompt or -OutPaintMaskImagePath should be provided. Not both.' } Write-Debug -Message 'Validating primary OUTPAINTING image.' $mediaEval = Test-AmazonMedia -MediaPath $OutPaintImagePath if ($mediaEval -ne $true) { throw 'Media file not supported.' } else { Write-Debug -Message 'Primary OUTPAINTING image is supported.' } Write-Debug -Message 'Converting primary OUTPAINTING image to base64.' try { $base64 = Convert-MediaToBase64 -MediaPath $OutPaintImagePath -ErrorAction Stop } catch { Write-Error $_ throw } $bodyObj = @{ taskType = 'OUTPAINTING' outPaintingParams = @{ image = $base64 outPaintingMode = $OutPaintMode } } if ($OutPaintTextPrompt) { $bodyObj.outPaintingParams.Add('text', $OutPaintTextPrompt) } if ($OutPaintMaskPrompt) { $bodyObj.outPaintingParams.Add('maskPrompt', $OutPaintMaskPrompt) } if ($OutPaintMaskImagePath) { Write-Debug -Message 'Validating OUTPAINTING mask image.' $mediaMaskEval = Test-AmazonMedia -MediaPath $OutPaintMaskImagePath if ($mediaMaskEval -ne $true) { throw 'Media file not supported.' } else { Write-Debug -Message 'OUTPAINTING mask image is supported.' } Write-Debug -Message 'Converting OUTPAINTING mask image to base64.' try { $base64Mask = Convert-MediaToBase64 -MediaPath $OutPaintMaskImagePath -ErrorAction Stop } catch { Write-Error $_ throw } $bodyObj.outPaintingParams.Add('maskImage', $base64Mask) } if ($NegativeText) { $bodyObj.outPaintingParams.Add('negativeText', $NegativeText) } } 'Variation' { $bodyObj = @{ taskType = 'IMAGE_VARIATION' imageVariationParams = @{ # images = @($base64) images = New-Object System.Collections.Generic.List[string] } } foreach ($imagePath in $VariationImagePath) { #------------------------- # resets $mediaEval = $false $base64 = $null #------------------------- $mediaEval = Test-AmazonMedia -MediaPath $imagePath if (-not $mediaEval) { throw 'Media file not supported.' } try { $base64 = Convert-MediaToBase64 -MediaPath $imagePath -ErrorAction Stop } catch { Write-Error $_ throw } $bodyObj.imageVariationParams.images.Add($base64) } if ($VariationTextPrompt) { $bodyObj.imageVariationParams.Add('text', $VariationTextPrompt) } if ($SimilarityStrength) { $bodyObj.imageVariationParams.Add('similarityStrength', $SimilarityStrength) } if ($NegativeText) { $bodyObj.imageVariationParams.Add('negativeText', $NegativeText) } } } #switch_parameterSetName #region common image parameters if ($NumberOfImages -or $Width -or $Height -or $CfgScale) { $bodyObj.Add('imageGenerationConfig', @{}) } if ($NumberOfImages) { $bodyObj.imageGenerationConfig.Add('numberOfImages', $NumberOfImages) } if ($Width) { $bodyObj.imageGenerationConfig.Add('width', $Width) } if ($Height) { $bodyObj.imageGenerationConfig.Add('height', $Height) } if ($CfgScale) { $bodyObj.imageGenerationConfig.Add('cfgScale', $CfgScale) } if ($PSCmdlet.ParameterSetName -eq 'Generation') { if ($Seed) { $bodyObj.imageGenerationConfig.Add('seed', $Seed) } } $jsonBody = $bodyObj | ConvertTo-Json -Depth 10 [byte[]]$byteArray = [System.Text.Encoding]::UTF8.GetBytes($jsonBody) $cmdletParams = @{ ContentType = 'application/json' ModelId = $ModelID Body = $byteArray } Write-Debug -Message 'Cmdlet Params:' Write-Debug -Message ($cmdletParams | Out-String) Write-Debug -Message 'Body JSON:' Write-Debug -Message ($jsonBody | Out-String) #endregion #region commonParams $commonParams = @{} if ($AccessKey) { $commonParams.Add('AccessKey', $AccessKey) } if ($Credential) { $commonParams.Add('Credential', $Credential) } if ($EndpointUrl) { $commonParams.Add('EndpointUrl', $EndpointUrl) } if ($NetworkCredential) { $commonParams.Add('NetworkCredential', $NetworkCredential) } if ($ProfileLocation) { $commonParams.Add('ProfileLocation', $ProfileLocation) } if ($ProfileName) { $commonParams.Add('ProfileName', $ProfileName) } if ($Region) { $commonParams.Add('Region', $Region) } if ($SecretKey) { $commonParams.Add('SecretKey', $SecretKey) } if ($SessionToken) { $commonParams.Add('SessionToken', $SessionToken) } #endregion try { $rawResponse = Invoke-BDRRModel @cmdletParams @commonParams -ErrorAction Stop } catch { $exceptionMessage = $_.Exception.Message if ($exceptionMessage -like "*don't have access*") { Write-Debug -Message 'Specific Error' Write-Warning -Message 'You do not have access to the requested model.' Write-Warning -Message 'In your AWS account, you will need to request access to the model.' Write-Warning -Message 'AWS -> Amazon Bedrock -> Model Access -> Request Access' throw ('No access to model {0}.' -f $ModelID) } elseif ($exceptionMessage -like '*content filters*') { Write-Debug -Message 'Specific Error' Write-Warning -Message 'Your request was blocked by the Amazon Titan content filters.' throw $exceptionMessage } else { Write-Debug -Message 'General Error' Write-Debug -Message ($_ | Out-String) Write-Error -Message $_ Write-Error -Message $_.Exception.Message throw } } if ([String]::IsNullOrWhiteSpace($rawResponse)) { throw 'No response from model API.' } Write-Verbose -Message'Processing response.' try { $jsonBody = ConvertFrom-MemoryStreamToString -MemoryStream $rawResponse.body -ErrorAction Stop } catch { Write-Error $_ throw } Write-Debug -Message 'Response JSON:' Write-Debug -Message ($jsonBody | Out-String) Write-Verbose -Message 'Converting response from JSON.' $response = $jsonBody | ConvertFrom-Json if ([string]::IsNullOrWhiteSpace($response.images)) { Write-Warning -Message 'No images were returned from the model.' } else { $imageCount = $response.images.Count Write-Verbose -Message ('Processing {0} images returned from model.' -f $imageCount) Write-Verbose -Message 'Calculating cost estimate.' Add-ModelCostEstimate -ImageCount $imageCount -ModelID $ModelID foreach ($image in $response.images) { Write-Verbose -Message ('....Processing image {0}.' -f $imageCount) try { $imageBytes = Convert-FromBase64ToByte -Base64String $image -ErrorAction Stop } catch { Write-Error $_ throw } $imageFileName = '{0}-{1}.png' -f 'amazon-titan-image-generator-v1', (Get-Date -Format 'yyyyMMdd-HHmmss') $imageFilePath = [System.IO.Path]::Combine($ImagesSavePath, $imageFileName) Write-Verbose -Message ('Saving image to {0}.' -f $imageFilePath) try { Save-BytesToFile -ImageBytes $imageBytes -FilePath $imageFilePath -ErrorAction Stop } catch { Write-Error $_ throw } Start-Sleep -Milliseconds 5500 #for naming uniqueness $imageCount-- } #foreach_image } if ($ReturnFullObject) { return $response } } #Invoke-AmazonImageModel <# .EXTERNALHELP pwshBedrock-help.xml #> function Invoke-AmazonTextModel { [CmdletBinding()] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUsePSCredentialType', '', Justification = 'Suppressed to support AWS credential parameter.')] param ( [Parameter(Mandatory = $true, HelpMessage = 'The message to be sent to the model.', ParameterSetName = 'Standard')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $true, HelpMessage = 'A properly formatted string that represents a custom conversation.', ParameterSetName = 'PreCraftedMessages')] [ValidateNotNull()] [string]$CustomConversation, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'amazon.titan-text-lite-v1', 'amazon.titan-text-express-v1', 'amazon.titan-text-premier-v1:0', 'amazon.titan-tg1-large' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Specify if you want the full object returned instead of just the message reply.')] [switch]$ReturnFullObject, [Parameter(Mandatory = $false, HelpMessage = 'When specified, the model will have a less conversational response. It will also not persist the conversation context history.')] [switch]$PromptOnly, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [switch]$NoContextPersist, # model parameters [Parameter(Mandatory = $false, HelpMessage = 'The maximum number of tokens to generate before stopping.')] [ValidateRange(1, 8192)] [int]$MaxTokens = 8192, # ! Open issue preventing use of StopSequences parameter # https://github.com/aws/aws-sdk/issues/692 # [Parameter(Mandatory = $false, # HelpMessage = 'Custom text sequences that cause the model to stop generating.')] # [ValidateNotNullOrEmpty()] # [string[]]$StopSequences, [Parameter(Mandatory = $false, HelpMessage = 'The amount of randomness injected into the response.')] [ValidateRange(0.0, 1.0)] [float]$Temperature, [Parameter(Mandatory = $false, HelpMessage = 'Use a lower value to ignore less probable options and decrease the diversity of responses.')] [ValidateRange(0.0, 1.0)] [float]$TopP, # Common Parameters [Parameter(Mandatory = $false, HelpMessage = 'The AWS access key for the user account.')] [string]$AccessKey, [Parameter(Mandatory = $false, HelpMessage = 'An AWSCredentials object instance containing access and secret key information, and optionally a token for session-based credentials.')] [Amazon.Runtime.AWSCredentials]$Credential, [Parameter(Mandatory = $false, HelpMessage = 'The endpoint to make the call against. Not for normal use.')] [string]$EndpointUrl, [Parameter(Mandatory = $false, HelpMessage = 'Used with SAML-based authentication when ProfileName references a SAML role profile.')] [System.Management.Automation.PSCredential]$NetworkCredential, [Parameter(Mandatory = $false, HelpMessage = 'Used to specify the name and location of the ini-format credential file (shared with the AWS CLI and other AWS SDKs)')] [string]$ProfileLocation, [Parameter(Mandatory = $false, HelpMessage = 'The user-defined name of an AWS credentials or SAML-based role profile containing credential information.')] [string]$ProfileName, [Parameter(Mandatory = $false, HelpMessage = 'The system name of an AWS region or an AWSRegion instance.')] [object]$Region, [Parameter(Mandatory = $false, HelpMessage = 'The AWS secret key for the user account.')] [string]$SecretKey, [Parameter(Mandatory = $false, HelpMessage = 'The session token if the access and secret keys are temporary session-based credentials.')] [string]$SessionToken ) $modelInfo = $script:anthropicModelInfo | Where-Object { $_.ModelId -eq $ModelID } Write-Debug -Message 'Model Info:' Write-Debug -Message ($modelInfo | Out-String) switch ($PSCmdlet.ParameterSetName) { 'Standard' { Write-Verbose -Message 'Standard message provided.' $formatAmazonTextMessageSplat = @{ Role = 'User' Message = $Message ModelID = $ModelID NoContextPersist = $NoContextPersist } if (-Not $PromptOnly) { $formattedMessages = Format-AmazonTextMessage @formatAmazonTextMessageSplat } else { $formattedMessages = $Message } } 'PreCraftedMessages' { Write-Verbose -Message 'Custom conversation provided' $conversationEval = Test-AmazonCustomConversation -CustomConversation $CustomConversation if ($conversationEval -ne $true) { throw 'Custom conversation validation failed.' } else { $formattedMessages = $CustomConversation } } } #region cmdletParams $bodyObj = @{ inputText = $formattedMessages } if ($Temperature -or $TopP -or $MaxTokens -ne 512) { $bodyObj.Add('textGenerationConfig', @{}) } # TODO: Add support for StopSequences parameter when AWS SDK issue is resolved if ($Temperature) { $bodyObj.textGenerationConfig.Add('temperature', $Temperature) } if ($TopP) { $bodyObj.textGenerationConfig.Add('topP', $TopP) } if ($MaxTokens -ne 512) { $bodyObj.textGenerationConfig.Add('maxTokenCount', $MaxTokens) } $jsonBody = $bodyObj | ConvertTo-Json -Depth 10 [byte[]]$byteArray = [System.Text.Encoding]::UTF8.GetBytes($jsonBody) $cmdletParams = @{ ContentType = 'application/json' ModelId = $ModelID Body = $byteArray } Write-Debug -Message 'Cmdlet Params:' Write-Debug -Message ($cmdletParams | Out-String) Write-Debug -Message 'Body JSON:' Write-Debug -Message ($jsonBody | Out-String) #endregion #region commonParams $commonParams = @{} if ($AccessKey) { $commonParams.Add('AccessKey', $AccessKey) } if ($Credential) { $commonParams.Add('Credential', $Credential) } if ($EndpointUrl) { $commonParams.Add('EndpointUrl', $EndpointUrl) } if ($NetworkCredential) { $commonParams.Add('NetworkCredential', $NetworkCredential) } if ($ProfileLocation) { $commonParams.Add('ProfileLocation', $ProfileLocation) } if ($ProfileName) { $commonParams.Add('ProfileName', $ProfileName) } if ($Region) { $commonParams.Add('Region', $Region) } if ($SecretKey) { $commonParams.Add('SecretKey', $SecretKey) } if ($SessionToken) { $commonParams.Add('SessionToken', $SessionToken) } #endregion try { $rawResponse = Invoke-BDRRModel @cmdletParams @commonParams -ErrorAction Stop } catch { # we need to remove the user context from the global variable if the model is not successfully engaged $contextObj = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } Write-Debug -Message ($contextObj | Out-String) # Remove the last line of the Context $lastNewlineIndex = $contextObj.Context.LastIndexOf("`n", $contextObj.Context.Length - 2) if ($lastNewlineIndex -eq -1) { # If there's no newline, it means there was only one line $contextObj.Context = '' } else { $contextObj.Context = $contextObj.Context.Substring(0, $lastNewlineIndex + 1) } $exceptionMessage = $_.Exception.Message if ($exceptionMessage -like "*don't have access*") { Write-Debug -Message 'Specific Error' Write-Warning -Message 'You do not have access to the requested model.' Write-Warning -Message 'In your AWS account, you will need to request access to the model.' Write-Warning -Message 'AWS -> Amazon Bedrock -> Model Access -> Request Access' throw ('No access to model {0}.' -f $ModelID) } else { Write-Debug -Message 'General Error' Write-Debug -Message ($_ | Out-String) Write-Error -Message $_ Write-Error -Message $_.Exception.Message throw } } if ([String]::IsNullOrWhiteSpace($rawResponse)) { throw 'No response from model API.' } Write-Verbose -Message 'Processing response.' try { $jsonBody = ConvertFrom-MemoryStreamToString -MemoryStream $rawResponse.body -ErrorAction Stop } catch { # we need to remove the user context from the global variable if the model is not successfully engaged $contextObj = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } Write-Debug -Message ($contextObj | Out-String) # Remove the last line of the Context $lastNewlineIndex = $contextObj.Context.LastIndexOf("`n", $contextObj.Context.Length - 2) if ($lastNewlineIndex -eq -1) { # If there's no newline, it means there was only one line $contextObj.Context = '' } else { $contextObj.Context = $contextObj.Context.Substring(0, $lastNewlineIndex + 1) } Write-Error $_ throw } Write-Debug -Message 'Response JSON:' Write-Debug -Message ($jsonBody | Out-String) Write-Verbose -Message 'Converting response from JSON.' $response = $jsonBody | ConvertFrom-Json if ([string]::IsNullOrWhiteSpace($response.results.outputText)) { if ($MaxTokens -lt 150) { Write-Warning -Message 'In some cases, the model may return an empty response when the max tokens is set to a low value.' Write-Warning -Message ('MaxTokens on this call was set to {0}.' -f $MaxTokens) Write-Warning -Message 'Try increasing the MaxTokens value and try again.' } throw ('No response text was returned from model API: {0}' -f $ModelID) } Write-Verbose -Message 'Calculating cost estimate.' Add-ModelCostEstimate -Usage $response -ModelID $ModelID Write-Verbose -Message 'Adding response to model context history.' $content = $response.results.outputText $formatAnthropicMessageSplat = @{ Role = 'Bot' Message = $content ModelID = $ModelID NoContextPersist = $NoContextPersist } Format-AmazonTextMessage @formatAnthropicMessageSplat | Out-Null if ($ReturnFullObject) { return $response } else { return $content } } #Invoke-AmazonTextModel <# .EXTERNALHELP pwshBedrock-help.xml #> function Invoke-AnthropicModel { [CmdletBinding( DefaultParameterSetName = 'Standard' )] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUsePSCredentialType', '', Justification = 'Suppressed to support AWS credential parameter.')] param ( [Parameter(Mandatory = $false, HelpMessage = 'The message to be sent to the model.', ParameterSetName = 'Standard')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $false, HelpMessage = 'File path to local media file.', ParameterSetName = 'Standard')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string[]]$MediaPath, [Parameter(Mandatory = $true, HelpMessage = 'An array of custom conversation objects.', ParameterSetName = 'PreCraftedMessages')] [ValidateNotNull()] [PSCustomObject[]]$CustomConversation, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'anthropic.claude-v2:1', 'anthropic.claude-3-haiku-20240307-v1:0', 'anthropic.claude-3-sonnet-20240229-v1:0', 'anthropic.claude-3-5-sonnet-20240620-v1:0', 'anthropic.claude-3-opus-20240229-v1:0' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Specify if you want the full object returned instead of just the message reply.')] [switch]$ReturnFullObject, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [switch]$NoContextPersist, # model parameters [Parameter(Mandatory = $false, HelpMessage = 'The maximum number of tokens to generate before stopping.')] [ValidateRange(1, 4096)] [int]$MaxTokens = 4096, # https://docs.anthropic.com/en/docs/system-prompts [Parameter(Mandatory = $false, HelpMessage = 'The system prompt for the request.')] [ValidateNotNullOrEmpty()] [string]$SystemPrompt, [Parameter(Mandatory = $false, HelpMessage = 'Custom text sequences that cause the model to stop generating.')] [ValidateNotNullOrEmpty()] [string[]]$StopSequences, [Parameter(Mandatory = $false, HelpMessage = 'The amount of randomness injected into the response.')] [ValidateRange(0.0, 1.0)] [float]$Temperature, [Parameter(Mandatory = $false, HelpMessage = 'Use nucleus sampling. Not for normal use.')] [ValidateRange(0.0, 1.0)] [float]$TopP, [Parameter(Mandatory = $false, HelpMessage = 'Only sample from the top K options for each subsequent token. Not for normal use.')] [ValidateRange(0, 500)] [int]$TopK, [Parameter(Mandatory = $false, HelpMessage = 'Definitions of tools that the model may use.')] [PSCustomObject[]]$Tools, [Parameter(Mandatory = $false, HelpMessage = 'Specifies how functions are called.')] [ValidateSet('auto', 'any', 'tool')] [string]$ToolChoice, [Parameter(Mandatory = $false, HelpMessage = "The name of the tool that Claude should use to answer the user's question.")] [string]$ToolName, [Parameter(Mandatory = $true, HelpMessage = 'A list of results from invoking tools recommended by the model in the previous chat turn.', ParameterSetName = 'ToolsResultsSet')] [ValidateNotNull()] [PSCustomObject[]]$ToolsResults, # Common Parameters [Parameter(Mandatory = $false, HelpMessage = 'The AWS access key for the user account.')] [string]$AccessKey, [Parameter(Mandatory = $false, HelpMessage = 'An AWSCredentials object instance containing access and secret key information, and optionally a token for session-based credentials.')] [Amazon.Runtime.AWSCredentials]$Credential, [Parameter(Mandatory = $false, HelpMessage = 'The endpoint to make the call against. Not for normal use.')] [string]$EndpointUrl, [Parameter(Mandatory = $false, HelpMessage = 'Used with SAML-based authentication when ProfileName references a SAML role profile.')] [System.Management.Automation.PSCredential]$NetworkCredential, [Parameter(Mandatory = $false, HelpMessage = 'Used to specify the name and location of the ini-format credential file (shared with the AWS CLI and other AWS SDKs)')] [string]$ProfileLocation, [Parameter(Mandatory = $false, HelpMessage = 'The user-defined name of an AWS credentials or SAML-based role profile containing credential information.')] [string]$ProfileName, [Parameter(Mandatory = $false, HelpMessage = 'The system name of an AWS region or an AWSRegion instance.')] [object]$Region, [Parameter(Mandatory = $false, HelpMessage = 'The AWS secret key for the user account.')] [string]$SecretKey, [Parameter(Mandatory = $false, HelpMessage = 'The session token if the access and secret keys are temporary session-based credentials.')] [string]$SessionToken ) $modelInfo = $script:anthropicModelInfo | Where-Object { $_.ModelId -eq $ModelID } Write-Debug -Message 'Model Info:' Write-Debug -Message ($modelInfo | Out-String) if ($ToolChoice -eq 'tool' -and [string]::IsNullOrWhiteSpace($ToolName)) { throw 'ToolName must be specified when ToolChoice is set to tool.' } # tool options are not supported by the anthropic 2 model if ($ModelID -eq 'anthropic.claude-v2:1' ) { if ($Tools -or $ToolChoice -or $ToolName) { throw 'Tool options are not supported by the anthropic 2 model.' } } Write-Debug -Message ('Parameter Set: {0}' -f $PSCmdlet.ParameterSetName) switch ($PSCmdlet.ParameterSetName) { 'Standard' { if ($MediaPath) { Write-Verbose -Message 'Vision message with media path provided.' if ($modelInfo.Vision -ne $true) { Write-Warning -Message ('You provided a media path for model {0}. Vision is not supported for this model.' -f $ModelID) throw 'Vision is not supported for this model.' } if ($MediaPath.Count -gt 20) { throw ('You provided {0} media files. You can only provide up to 20 media files.' -f $MediaPath.Count) } foreach ($media in $MediaPath) { if (-not (Test-AnthropicMedia -MediaPath $media)) { throw ('Media test for {0} failed.' -f $media) } } $formatAnthropicMessageSplat = @{ Role = 'user' Message = $Message ModelID = $ModelID MediaPath = $MediaPath NoContextPersist = $NoContextPersist } $formattedMessages = Format-AnthropicMessage @formatAnthropicMessageSplat } elseif ($Message) { Write-Verbose -Message 'Standard message provided.' $formatAnthropicMessageSplat = @{ Role = 'user' Message = $Message ModelID = $ModelID NoContextPersist = $NoContextPersist } $formattedMessages = Format-AnthropicMessage @formatAnthropicMessageSplat } else { throw 'You must provide either a message or media path.' } } 'PreCraftedMessages' { Write-Verbose -Message 'Custom conversation provided' $conversationEval = Test-AnthropicCustomConversation -CustomConversation $CustomConversation if ($conversationEval -ne $true) { throw 'Custom conversation validation failed.' } else { $formattedMessages = $CustomConversation } } 'ToolsResultsSet' { Write-Verbose -Message 'Tools results provided' if (-not $Tools) { throw 'Tools must be provided when ToolsResults are provided.' } # ToolsResults - must be formed properly $toolsResultsEval = Test-AnthropicToolResult -ToolResults $ToolsResults if ($toolsResultsEval -ne $true) { throw 'Tools results validation failed.' } $formatAnthropicMessageSplat = @{ Role = 'user' ToolsResults = $ToolsResults ModelID = $ModelID NoContextPersist = $NoContextPersist } $formattedMessages += Format-AnthropicMessage @formatAnthropicMessageSplat } } #region cmdletParams $bodyObj = @{ 'anthropic_version' = 'bedrock-2023-05-31' 'max_tokens' = $MaxTokens messages = @( $formattedMessages ) } if ($SystemPrompt) { $bodyObj.Add('system', $SystemPrompt) } if ($StopSequences) { $bodyObj.Add('stop_sequences', $StopSequences) } if ($Temperature) { $bodyObj.Add('temperature', $Temperature) } if ($TopP) { $bodyObj.Add('top_p', $TopP) } if ($TopK) { $bodyObj.Add('top_k', $TopK) } if ($Tools) { $toolsEval = Test-AnthropicTool -Tools $Tools if ($toolsEval -ne $true) { throw 'Tools validation failed.' } $bodyObj.Add('tools', $Tools) } if ($ToolChoice) { $toolChoiceObj = @{ type = $ToolChoice } if ($ToolName) { $toolChoiceObj.Add('name', $ToolName) } $bodyObj.Add('tool_choice', $toolChoiceObj) } $jsonBody = $bodyObj | ConvertTo-Json -Depth 10 [byte[]]$byteArray = [System.Text.Encoding]::UTF8.GetBytes($jsonBody) $cmdletParams = @{ ContentType = 'application/json' ModelId = $ModelID Body = $byteArray } Write-Debug -Message 'Cmdlet Params:' Write-Debug -Message ($cmdletParams | Out-String) Write-Debug -Message 'Body JSON:' Write-Debug -Message ($jsonBody | Out-String) #endregion #region commonParams $commonParams = @{} if ($AccessKey) { $commonParams.Add('AccessKey', $AccessKey) } if ($Credential) { $commonParams.Add('Credential', $Credential) } if ($EndpointUrl) { $commonParams.Add('EndpointUrl', $EndpointUrl) } if ($NetworkCredential) { $commonParams.Add('NetworkCredential', $NetworkCredential) } if ($ProfileLocation) { $commonParams.Add('ProfileLocation', $ProfileLocation) } if ($ProfileName) { $commonParams.Add('ProfileName', $ProfileName) } if ($Region) { $commonParams.Add('Region', $Region) } if ($SecretKey) { $commonParams.Add('SecretKey', $SecretKey) } if ($SessionToken) { $commonParams.Add('SessionToken', $SessionToken) } #endregion try { $rawResponse = Invoke-BDRRModel @cmdletParams @commonParams -ErrorAction Stop } catch { # we need to remove the user context from the global variable if the model is not successfully engaged $context = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } Write-Debug -Message ($context | Out-String) Write-Debug -Message ('Context count: {0}' -f $context.Context.Count) if ($context.Context.Count -le 1) { $context.Context = New-Object System.Collections.Generic.List[object] } else { $context.Context.RemoveAt($context.Context.Count - 1) } $exceptionMessage = $_.Exception.Message if ($exceptionMessage -like "*don't have access*") { Write-Debug -Message 'Specific Error' Write-Warning -Message 'You do not have access to the requested model.' Write-Warning -Message 'In your AWS account, you will need to request access to the model.' Write-Warning -Message 'AWS -> Amazon Bedrock -> Model Access -> Request Access' throw ('No access to model {0}.' -f $ModelID) } else { Write-Debug -Message 'General Error' Write-Debug -Message ($_ | Out-String) Write-Error -Message $_ Write-Error -Message $_.Exception.Message throw } } if ([String]::IsNullOrWhiteSpace($rawResponse)) { throw 'No response from model API.' } Write-Verbose -Message'Processing response.' try { $jsonBody = ConvertFrom-MemoryStreamToString -MemoryStream $rawResponse.body -ErrorAction Stop } catch { # we need to remove the user context from the global variable if the model is not successfully engaged $context = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } Write-Debug -Message ($context | Out-String) Write-Debug -Message ('Context count: {0}' -f $context.Context.Count) if ($context.Context.Count -le 1) { $context.Context = New-Object System.Collections.Generic.List[object] } else { $context.Context.RemoveAt($context.Context.Count - 1) } Write-Error $_ throw } Write-Debug -Message 'Response JSON:' Write-Debug -Message ($jsonBody | Out-String) Write-Verbose -Message 'Converting response from JSON.' $response = $jsonBody | ConvertFrom-Json Write-Verbose -Message 'Calculating cost estimate.' Add-ModelCostEstimate -Usage $response.usage -ModelID $ModelID Write-Verbose -Message 'Adding response to model context history.' if ($response.stop_reason -eq 'tool_use') { Write-Debug -Message 'Tool use detected.' $formatAnthropicMessageSplat = @{ Role = 'assistant' ToolCall = $response.content ModelID = $ModelID NoContextPersist = $NoContextPersist } Format-AnthropicMessage @formatAnthropicMessageSplat | Out-Null } else { Write-Debug -Message ('Stop Reason: {0}' -f $response.stop_reason) if ([string]::IsNullOrWhiteSpace($response.content.text)) { if ($MaxTokens -lt 150) { Write-Warning -Message 'In some cases, the model may return an empty response when the max tokens is set to a low value.' Write-Warning -Message ('MaxTokens on this call was set to {0}.' -f $MaxTokens) Write-Warning -Message 'Try increasing the MaxTokens value and try again.' } throw ('No response text was returned from model API: {0}' -f $ModelID) } $content = $response.content.text $formatAnthropicMessageSplat = @{ Role = 'assistant' Message = $content ModelID = $ModelID NoContextPersist = $NoContextPersist } Format-AnthropicMessage @formatAnthropicMessageSplat | Out-Null } if ($ReturnFullObject) { return $response } else { return $content } } #Invoke-AnthropicModel <# .EXTERNALHELP pwshBedrock-help.xml #> function Invoke-CohereCommandModel { [CmdletBinding()] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUsePSCredentialType', '', Justification = 'Suppressed to support AWS credential parameter.')] param ( [Parameter(Mandatory = $true, HelpMessage = 'The message to be sent to the model.', ParameterSetName = 'Standard')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'cohere.command-text-v14', 'cohere.command-light-text-v14' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Specify if you want the full object returned instead of just the message reply.')] [switch]$ReturnFullObject, # model parameters [Parameter(Mandatory = $false, HelpMessage = 'The amount of randomness injected into the response.')] [ValidateRange(0.0, 5.0)] [float]$Temperature, [Parameter(Mandatory = $false, HelpMessage = 'Use a lower value to ignore less probable options and decrease the diversity of responses.')] [ValidateRange(0.0, 1.0)] [float]$TopP, [Parameter(Mandatory = $false, HelpMessage = 'Specify the number of token choices the model uses to generate the next token.')] [ValidateRange(0, 500)] [int]$TopK, [Parameter(Mandatory = $false, HelpMessage = 'The maximum number of tokens to generate before stopping.')] [ValidateRange(1, 4096)] [int]$MaxTokens = 4096, [Parameter(Mandatory = $false, HelpMessage = 'Custom text sequences that cause the model to stop generating.')] [ValidateNotNullOrEmpty()] [string[]]$StopSequences, [Parameter(Mandatory = $false, HelpMessage = 'Specify how and if the token likelihoods are returned with the response.')] [ValidateSet('GENERATION', 'ALL', 'NONE')] [string]$ReturnLikelihoods, # * not supporting stream responses in pwshBedrock - maybe in the future # [Parameter(Mandatory = $false, # HelpMessage = 'Specify true to return the response piece-by-piece in real-time and false to return the complete response after the process finishes.')] # [bool]$Stream, [Parameter(Mandatory = $false, HelpMessage = 'The maximum number of generations that the model should return.')] [ValidateRange(1, 5)] [int]$Generations, [Parameter(Mandatory = $false, HelpMessage = 'Specifies how the API handles inputs longer than the maximum token length.')] [ValidateSet('NONE', 'START', 'END')] [string]$Truncate, # Common Parameters [Parameter(Mandatory = $false, HelpMessage = 'The AWS access key for the user account.')] [string]$AccessKey, [Parameter(Mandatory = $false, HelpMessage = 'An AWSCredentials object instance containing access and secret key information, and optionally a token for session-based credentials.')] [Amazon.Runtime.AWSCredentials]$Credential, [Parameter(Mandatory = $false, HelpMessage = 'The endpoint to make the call against. Not for normal use.')] [string]$EndpointUrl, [Parameter(Mandatory = $false, HelpMessage = 'Used with SAML-based authentication when ProfileName references a SAML role profile.')] [System.Management.Automation.PSCredential]$NetworkCredential, [Parameter(Mandatory = $false, HelpMessage = 'Used to specify the name and location of the ini-format credential file (shared with the AWS CLI and other AWS SDKs)')] [string]$ProfileLocation, [Parameter(Mandatory = $false, HelpMessage = 'The user-defined name of an AWS credentials or SAML-based role profile containing credential information.')] [string]$ProfileName, [Parameter(Mandatory = $false, HelpMessage = 'The system name of an AWS region or an AWSRegion instance.')] [object]$Region, [Parameter(Mandatory = $false, HelpMessage = 'The AWS secret key for the user account.')] [string]$SecretKey, [Parameter(Mandatory = $false, HelpMessage = 'The session token if the access and secret keys are temporary session-based credentials.')] [string]$SessionToken ) $modelInfo = $script:cohereModelInfo | Where-Object { $_.ModelId -eq $ModelID } Write-Debug -Message 'Model Info:' Write-Debug -Message ($modelInfo | Out-String) Write-Verbose -Message 'Formatting message for model.' $formattedMessages = $Message #region cmdletParams $bodyObj = @{ prompt = $formattedMessages } if ($Temperature) { $bodyObj.Add('temperature', $Temperature) } if ($TopP) { $bodyObj.Add('p', $TopP) } if ($TopK) { $bodyObj.Add('k', $TopK) } if ($MaxTokens) { $bodyObj.Add('max_tokens', $MaxTokens) } if ($StopSequences) { $bodyObj.Add('stop_sequences', $StopSequences) } if ($ReturnLikelihoods) { $bodyObj.Add('return_likelihoods', $ReturnLikelihoods) } if ($Generations) { $bodyObj.Add('num_generations', $Generations) } if ($Truncate) { $bodyObj.Add('truncate', $Truncate) } $jsonBody = $bodyObj | ConvertTo-Json -Depth 10 [byte[]]$byteArray = [System.Text.Encoding]::UTF8.GetBytes($jsonBody) $cmdletParams = @{ ContentType = 'application/json' ModelId = $ModelID Body = $byteArray } Write-Debug -Message 'Cmdlet Params:' Write-Debug -Message ($cmdletParams | Out-String) Write-Debug -Message 'Body JSON:' Write-Debug -Message ($jsonBody | Out-String) #endregion #region commonParams $commonParams = @{} if ($AccessKey) { $commonParams.Add('AccessKey', $AccessKey) } if ($Credential) { $commonParams.Add('Credential', $Credential) } if ($EndpointUrl) { $commonParams.Add('EndpointUrl', $EndpointUrl) } if ($NetworkCredential) { $commonParams.Add('NetworkCredential', $NetworkCredential) } if ($ProfileLocation) { $commonParams.Add('ProfileLocation', $ProfileLocation) } if ($ProfileName) { $commonParams.Add('ProfileName', $ProfileName) } if ($Region) { $commonParams.Add('Region', $Region) } if ($SecretKey) { $commonParams.Add('SecretKey', $SecretKey) } if ($SessionToken) { $commonParams.Add('SessionToken', $SessionToken) } #endregion try { $rawResponse = Invoke-BDRRModel @cmdletParams @commonParams -ErrorAction Stop } catch { $exceptionMessage = $_.Exception.Message if ($exceptionMessage -like "*don't have access*") { Write-Debug -Message 'Specific Error' Write-Warning -Message 'You do not have access to the requested model.' Write-Warning -Message 'In your AWS account, you will need to request access to the model.' Write-Warning -Message 'AWS -> Amazon Bedrock -> Model Access -> Request Access' throw ('No access to model {0}.' -f $ModelID) } else { Write-Debug -Message 'General Error' Write-Debug -Message ($_ | Out-String) Write-Error -Message $_ Write-Error -Message $_.Exception.Message throw } } if ([String]::IsNullOrWhiteSpace($rawResponse)) { throw 'No response from model API.' } Write-Verbose -Message 'Processing response.' try { $jsonBody = ConvertFrom-MemoryStreamToString -MemoryStream $rawResponse.body -ErrorAction Stop } catch { Write-Error $_ throw } Write-Debug -Message 'Response JSON:' Write-Debug -Message ($jsonBody | Out-String) Write-Verbose -Message 'Converting response from JSON.' $response = $jsonBody | ConvertFrom-Json # this model supports creating multiple generations of text foreach ($textGeneration in $response.generations.text) { $completion += $textGeneration } Write-Verbose -Message 'Calculating cost estimate.' Add-ModelCostEstimate -Usage $response -ModelID $ModelID if ($ReturnFullObject) { return $response } else { return $completion } } #Invoke-CohereCommandModel <# .EXTERNALHELP pwshBedrock-help.xml #> function Invoke-CohereCommandRModel { [CmdletBinding()] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUsePSCredentialType', '', Justification = 'Suppressed to support AWS credential parameter.')] param ( [Parameter(Mandatory = $true, ParameterSetName = 'MessageSet', HelpMessage = 'The message to be sent to the model.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'cohere.command-r-v1:0', 'cohere.command-r-plus-v1:0' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Specify if you want the full object returned instead of just the message reply.')] [switch]$ReturnFullObject, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [switch]$NoContextPersist, # model parameters [Parameter(Mandatory = $false, HelpMessage = "Previous messages between the user and the model, meant to give the model conversational context for responding to the user's message.")] [PSCustomObject[]]$ChatHistory, [Parameter(Mandatory = $false, HelpMessage = 'A list of texts that the model can cite to generate a more accurate reply. Each document contains a title and snippet.')] [PSCustomObject[]]$Documents, [Parameter(Mandatory = $false, HelpMessage = "Defaults to false. When true, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's message will be generated.")] [bool]$SearchQueriesOnly, [Parameter(Mandatory = $false, HelpMessage = 'A preamble is a system message that is provided to a model at the beginning of a conversation which dictates how the model should behave throughout. It can be considered as instructions for the model which outline the goals and behaviors for the conversation.')] [string]$Preamble, [Parameter(Mandatory = $false, HelpMessage = 'The maximum number of tokens to generate before stopping.')] [ValidateRange(1, 4000)] [int]$MaxTokens = 4000, [Parameter(Mandatory = $false, HelpMessage = 'The amount of randomness injected into the response.')] [ValidateRange(0.0, 1.0)] [float]$Temperature, [Parameter(Mandatory = $false, HelpMessage = 'Use a lower value to ignore less probable options and decrease the diversity of responses.')] [ValidateRange(0.01, 0.99)] [float]$TopP, [Parameter(Mandatory = $false, HelpMessage = 'Specify the number of token choices the model uses to generate the next token.')] [ValidateRange(0, 500)] [int]$TopK, [Parameter(Mandatory = $false, HelpMessage = "AUTO_PRESERVE_ORDER, some elements from chat_history and documents will be dropped to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved. With prompt_truncation` set to OFF, no elements will be dropped.")] [ValidateSet( 'OFF', 'AUTO_PRESERVE_ORDER' )] [string]$PromptTruncation, [Parameter(Mandatory = $false, HelpMessage = 'Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.')] [ValidateRange(0.0, 1.0)] [float]$FrequencyPenalty, [Parameter(Mandatory = $false, HelpMessage = 'Used to reduce repetitiveness of generated tokens. Similar to frequency_penalty, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.')] [ValidateRange(0.0, 1.0)] [float]$PresencePenalty, [Parameter(Mandatory = $false, HelpMessage = 'If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.')] [int]$Seed, [Parameter(Mandatory = $false, HelpMessage = 'Specify true to return the full prompt that was sent to the model. The default value is false. In the response, the prompt in the prompt field.')] [bool]$ReturnPrompt, [Parameter(Mandatory = $false, HelpMessage = 'A list of available tools (functions) that the model may suggest invoking before producing a text response.')] [PSCustomObject[]]$Tools, [Parameter(Mandatory = $false, ParameterSetName = 'ToolsResultsSet', HelpMessage = 'A list of results from invoking tools recommended by the model in the previous chat turn.')] [PSCustomObject[]]$ToolsResults, [Parameter(Mandatory = $false, HelpMessage = 'Custom text sequences that cause the model to stop generating.')] [ValidateNotNullOrEmpty()] [string[]]$StopSequences, [Parameter(Mandatory = $false, HelpMessage = "Specify true, to send the user's message to the model without any preprocessing, otherwise false.")] [bool]$RawPrompting, # Common Parameters [Parameter(Mandatory = $false, HelpMessage = 'The AWS access key for the user account.')] [string]$AccessKey, [Parameter(Mandatory = $false, HelpMessage = 'An AWSCredentials object instance containing access and secret key information, and optionally a token for session-based credentials.')] [Amazon.Runtime.AWSCredentials]$Credential, [Parameter(Mandatory = $false, HelpMessage = 'The endpoint to make the call against. Not for normal use.')] [string]$EndpointUrl, [Parameter(Mandatory = $false, HelpMessage = 'Used with SAML-based authentication when ProfileName references a SAML role profile.')] [System.Management.Automation.PSCredential]$NetworkCredential, [Parameter(Mandatory = $false, HelpMessage = 'Used to specify the name and location of the ini-format credential file (shared with the AWS CLI and other AWS SDKs)')] [string]$ProfileLocation, [Parameter(Mandatory = $false, HelpMessage = 'The user-defined name of an AWS credentials or SAML-based role profile containing credential information.')] [string]$ProfileName, [Parameter(Mandatory = $false, HelpMessage = 'The system name of an AWS region or an AWSRegion instance.')] [object]$Region, [Parameter(Mandatory = $false, HelpMessage = 'The AWS secret key for the user account.')] [string]$SecretKey, [Parameter(Mandatory = $false, HelpMessage = 'The session token if the access and secret keys are temporary session-based credentials.')] [string]$SessionToken ) $modelInfo = $script:anthropicModelInfo | Where-Object { $_.ModelId -eq $ModelID } Write-Debug -Message 'Model Info:' Write-Debug -Message ($modelInfo | Out-String) # no matter what, message, or message + chat history, we will always pass the message to the model # the difference is that if chat history is provided, we will use that instead of the global context # if chat history is provided, we will not store the message in the global context # if chat history is not provided, we will store the message in the global context # if chat history is not provided, we will still pass ChatHistory to the model using the global context # if chat history is provided, we will pass ChatHistory to the model using the provided chat history #region cmdletParams # if ToolsResults is passed, Tools must also be passed if ($PSCmdlet.ParameterSetName -eq 'ToolsResultsSet' -and -not $Tools) { throw 'Tools must be passed if ToolsResults are passed.' } # we don't need to pass the message if we are only passing tools results if ($PSCmdlet.ParameterSetName -ne 'ToolsResultsSet') { $bodyObj = @{ message = $Message } } else { $bodyObj = @{} } if ($Tools) { Write-Debug -Message 'Tools provided.' # Tools - must be formed properly $toolsEval = Test-CohereCommandRTool -Tools $Tools if ($toolsEval -ne $true) { throw 'Tools validation failed.' } $bodyObj.Add('tools', $Tools) } if ($ToolsResults) { Write-Debug -Message 'ToolsResults provided.' # ToolsResults - must be formed properly $toolsResultsEval = Test-CohereCommandRToolResult -ToolResults $ToolsResults if ($toolsResultsEval -ne $true) { throw 'Tool results validation failed.' } $bodyObj.Add('tool_results', $ToolsResults) } # if the user has provided a chat history, we will use that instead of the global context if ($ChatHistory) { # ChatHistory - must be formed properly $chatHistoryEval = Test-CohereCommandRChatHistory -ChatHistory $ChatHistory if ($chatHistoryEval -ne $true) { throw 'Chat history validation failed.' } # reset the global context Reset-ModelContext -ModelID $ModelID $bodyObj.Add('chat_history', $ChatHistory) } else { # this part is tricky. we only add the chat history if the global context is not empty # this is because if this is the first message to the model, we don't want to pass an empty chat history # also, the caller may be using the NoContextPersist parameter each time, so we need to account for that $contextEval = Get-ModelContext -ModelID $ModelID if ($contextEval) { $bodyObj.Add('chat_history', $contextEval) } } if ($Documents) { $bodyObj.Add('documents', $Documents) } if ($SearchQueriesOnly) { $bodyObj.Add('search_queries_only', $SearchQueriesOnly) } if ($Preamble) { $bodyObj.Add('preamble', $Preamble) } if ($MaxTokens) { $bodyObj.Add('max_tokens', $MaxTokens) } if ($Temperature) { $bodyObj.Add('temperature', $Temperature) } if ($TopP) { $bodyObj.Add('p', $TopP) } if ($TopK) { $bodyObj.Add('k', $TopK) } if ($PromptTruncation) { $bodyObj.Add('prompt_truncation', $PromptTruncation) } if ($FrequencyPenalty) { $bodyObj.Add('frequency_penalty', $FrequencyPenalty) } if ($PresencePenalty) { $bodyObj.Add('presence_penalty', $PresencePenalty) } if ($Seed) { $bodyObj.Add('seed', $Seed) } if ($ReturnPrompt) { $bodyObj.Add('return_prompt', $ReturnPrompt) } if ($StopSequences) { $bodyObj.Add('stop_sequences', $StopSequences) } if ($RawPrompting) { $bodyObj.Add('raw_prompting', $RawPrompting) } $jsonBody = $bodyObj | ConvertTo-Json -Depth 10 [byte[]]$byteArray = [System.Text.Encoding]::UTF8.GetBytes($jsonBody) $cmdletParams = @{ ContentType = 'application/json' ModelId = $ModelID Body = $byteArray } Write-Debug -Message 'Cmdlet Params:' Write-Debug -Message ($cmdletParams | Out-String) Write-Debug -Message 'Body JSON:' Write-Debug -Message ($jsonBody | Out-String) #endregion #region commonParams $commonParams = @{} if ($AccessKey) { $commonParams.Add('AccessKey', $AccessKey) } if ($Credential) { $commonParams.Add('Credential', $Credential) } if ($EndpointUrl) { $commonParams.Add('EndpointUrl', $EndpointUrl) } if ($NetworkCredential) { $commonParams.Add('NetworkCredential', $NetworkCredential) } if ($ProfileLocation) { $commonParams.Add('ProfileLocation', $ProfileLocation) } if ($ProfileName) { $commonParams.Add('ProfileName', $ProfileName) } if ($Region) { $commonParams.Add('Region', $Region) } if ($SecretKey) { $commonParams.Add('SecretKey', $SecretKey) } if ($SessionToken) { $commonParams.Add('SessionToken', $SessionToken) } #endregion try { $rawResponse = Invoke-BDRRModel @cmdletParams @commonParams -ErrorAction Stop } catch { $exceptionMessage = $_.Exception.Message if ($exceptionMessage -like "*don't have access*") { Write-Debug -Message 'Specific Error' Write-Warning -Message 'You do not have access to the requested model.' Write-Warning -Message 'In your AWS account, you will need to request access to the model.' Write-Warning -Message 'AWS -> Amazon Bedrock -> Model Access -> Request Access' throw ('No access to model {0}.' -f $ModelID) } else { Write-Debug -Message 'General Error' Write-Debug -Message ($_ | Out-String) Write-Error -Message $_ Write-Error -Message $_.Exception.Message throw } } if ([String]::IsNullOrWhiteSpace($rawResponse)) { throw 'No response from model API.' } Write-Verbose -Message 'Processing response.' try { $jsonBody = ConvertFrom-MemoryStreamToString -MemoryStream $rawResponse.body -ErrorAction Stop } catch { Write-Error $_ throw } Write-Debug -Message 'Response JSON:' Write-Debug -Message ($jsonBody | Out-String) Write-Verbose -Message 'Converting response from JSON.' $response = $jsonBody | ConvertFrom-Json if ([string]::IsNullOrWhiteSpace($response.text)) { if ($MaxTokens -lt 150) { Write-Warning -Message 'In some cases, the model may return an empty response when the max tokens is set to a low value.' Write-Warning -Message ('MaxTokens on this call was set to {0}.' -f $MaxTokens) Write-Warning -Message 'Try increasing the MaxTokens value and try again.' } throw ('No response text was returned from model API: {0}' -f $ModelID) } Write-Verbose -Message 'Calculating cost estimate.' if ($PSCmdlet.ParameterSetName -eq 'ToolsResultsSet') { # convert the tools results to a string for the cost estimate $Message = $ToolsResults | ConvertTo-Json -Depth 10 } Add-ModelCostEstimate -Usage $response -Message $Message -ModelID $ModelID # in this model, the full chat history is returned in the response if ($NoContextPersist -eq $false -and -Not ([string]::IsNullOrWhiteSpace($response.text))) { Write-Verbose -Message 'Adding response to model context history.' Reset-ModelContext -ModelID $ModelID $contextObj = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } $contextObj.Context.Add($response.chat_history) } if ($ReturnFullObject) { return $response } else { return $response.text } } #Invoke-CohereCommandRModel <# .EXTERNALHELP pwshBedrock-help.xml #> function Invoke-ConverseAPI { [CmdletBinding( DefaultParameterSetName = 'MessageSet' )] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUsePSCredentialType', '', Justification = 'Suppressed to support AWS credential parameter.')] param ( [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( # 'ai21.j2-grande-instruct', # *note: not supported by Converse API # 'ai21.j2-jumbo-instruct', # *note: not supported by Converse API 'ai21.jamba-instruct-v1:0', # 'ai21.j2-mid-v1', # *note: not supported by Converse API # 'ai21.j2-ultra-v1', # *note: not supported by Converse API # 'amazon.titan-image-generator-v1', # *note: not supported by Converse API 'amazon.titan-text-express-v1', 'amazon.titan-text-lite-v1', 'amazon.titan-text-premier-v1:0', 'amazon.titan-tg1-large', 'anthropic.claude-v2:1', 'anthropic.claude-3-haiku-20240307-v1:0', 'anthropic.claude-3-opus-20240229-v1:0', 'anthropic.claude-3-sonnet-20240229-v1:0', 'anthropic.claude-3-5-sonnet-20240620-v1:0', # 'cohere.command-text-v14', # *note: not supported by Converse API # 'cohere.command-light-text-v14', # *note: not supported by Converse API 'cohere.command-r-v1:0', 'cohere.command-r-plus-v1:0', 'meta.llama2-13b-chat-v1', 'meta.llama2-70b-chat-v1', 'meta.llama3-70b-instruct-v1:0', 'meta.llama3-8b-instruct-v1:0', 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', 'mistral.mistral-small-2402-v1:0', 'mistral.mixtral-8x7b-instruct-v0:1' # 'stability.stable-diffusion-xl-v1' # *note: not supported by Converse API )] [string]$ModelID, [Parameter(Mandatory = $false, ParameterSetName = 'MessageSet', HelpMessage = 'The message to be sent to the model.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $false, HelpMessage = 'File path to local media file.', ParameterSetName = 'MessageSet')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string[]]$MediaPath, [Parameter(Mandatory = $false, HelpMessage = 'File path to local document.', ParameterSetName = 'MessageSet')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string[]]$DocumentPath, [Parameter(Mandatory = $false, HelpMessage = 'Specify if you want the full object returned instead of just the message reply.')] [switch]$ReturnFullObject, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [switch]$NoContextPersist, #_____________________________________________________________________________________ # base set of inference parameters [Parameter(Mandatory = $false, HelpMessage = 'The maximum number of tokens to allow in the generated response.')] [int]$MaxTokens, [Parameter(Mandatory = $false, HelpMessage = 'A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating the response.')] [ValidateNotNullOrEmpty()] [string[]]$StopSequences, [Parameter(Mandatory = $false, HelpMessage = 'The likelihood of the model selecting higher-probability options while generating a response.')] [float]$Temperature, [Parameter(Mandatory = $false, HelpMessage = 'The percentage of most-likely candidates that the model considers for the next token.')] [float]$TopP, #_____________________________________________________________________________________ # model parameters [Parameter(Mandatory = $false, HelpMessage = 'Sets the behavior and context for the model in the conversation.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$SystemPrompt, [Parameter(Mandatory = $false, HelpMessage = 'Definitions of tools that the model may use.')] [PSCustomObject[]]$Tools, [Parameter(Mandatory = $false, HelpMessage = 'Specifies how tool functions are called.')] [ValidateSet('auto', 'any', 'tool')] [string]$ToolChoice, [Parameter(Mandatory = $false, HelpMessage = "The name of the tool that model should use to answer the user's question.")] [string]$ToolName, [Parameter(Mandatory = $true, ParameterSetName = 'ToolsResultsSet', HelpMessage = 'A list of results from invoking tools recommended by the model in the previous chat turn.')] [ValidateNotNull()] [PSCustomObject[]]$ToolsResults, [Parameter(Mandatory = $false, HelpMessage = 'The identifier for the guardrail.')] [ValidatePattern('^[a-zA-Z0-9]+$')] [string]$GuardrailID, [Parameter(Mandatory = $false, HelpMessage = 'The version of the guardrail. ')] [ValidatePattern('^([1-9][0-9]{0,7})|(DRAFT)$')] [string]$GuardrailVersion, [Parameter(Mandatory = $false, HelpMessage = 'The trace behavior for the guardrail.')] [ValidateSet('enabled', 'disabled')] [string]$GuardrailTrace, [Parameter(Mandatory = $false, HelpMessage = 'Additional inference parameters that the model supports, beyond the base set of inference parameters that Converse supports.')] [PSObject]$AdditionalModelRequestField, [Parameter(Mandatory = $false, HelpMessage = 'Additional model parameters field paths to return in the response.')] [string[]]$AdditionalModelResponseFieldPath, #_____________________________________________________________________________________ # Common Parameters [Parameter(Mandatory = $false, HelpMessage = 'The AWS access key for the user account.')] [string]$AccessKey, [Parameter(Mandatory = $false, HelpMessage = 'An AWSCredentials object instance containing access and secret key information, and optionally a token for session-based credentials.')] [Amazon.Runtime.AWSCredentials]$Credential, [Parameter(Mandatory = $false, HelpMessage = 'The endpoint to make the call against. Not for normal use.')] [string]$EndpointUrl, [Parameter(Mandatory = $false, HelpMessage = 'Used with SAML-based authentication when ProfileName references a SAML role profile.')] [System.Management.Automation.PSCredential]$NetworkCredential, [Parameter(Mandatory = $false, HelpMessage = 'Used to specify the name and location of the ini-format credential file (shared with the AWS CLI and other AWS SDKs)')] [string]$ProfileLocation, [Parameter(Mandatory = $false, HelpMessage = 'The user-defined name of an AWS credentials or SAML-based role profile containing credential information.')] [string]$ProfileName, [Parameter(Mandatory = $false, HelpMessage = 'The system name of an AWS region or an AWSRegion instance.')] [object]$Region, [Parameter(Mandatory = $false, HelpMessage = 'The AWS secret key for the user account.')] [string]$SecretKey, [Parameter(Mandatory = $false, HelpMessage = 'The session token if the access and secret keys are temporary session-based credentials.')] [string]$SessionToken ) $modelInfo = Get-ModelInfo -ModelID $ModelID Write-Debug -Message 'Using Converse API to call model.' Write-Debug -Message 'Model Info:' Write-Debug -Message ($modelInfo | Out-String) if ($SystemPrompt -and $modelInfo.SystemPrompt -eq $false) { throw ('Model {0} does not support system prompts.' -f $ModelID) } if ($Tools -and $modelInfo.ToolUse -eq $false) { throw ('Model {0} does not support tools use.' -f $ModelID) } if ($ToolChoice -eq 'tool' -and [string]::IsNullOrWhiteSpace($ToolName)) { throw 'ToolName must be specified when ToolChoice is set to tool.' } if ($PSBoundParameters.ContainsKey('GuardrailID') -or $PSBoundParameters.ContainsKey('GuardrailVersion') -or $PSBoundParameters.ContainsKey('GuardrailTrace')) { # Ensure that all three specific parameters are provided Write-Debug -Message ($PSBoundParameters | Out-String) if (-not ($PSBoundParameters.ContainsKey('GuardrailID')) -or -not ($PSBoundParameters.ContainsKey('GuardrailVersion')) -or -not ($PSBoundParameters.ContainsKey('GuardrailTrace'))) { throw 'If any of the GuardrailID, GuardrailVersion, or GuardrailTrace parameters are provided, all three must be provided.' } $guardrailUse = $true } Write-Debug -Message ('Parameter Set: {0}' -f $PSCmdlet.ParameterSetName) if ($PSCmdlet.ParameterSetName -eq 'MessageSet') { if ($MediaPath) { Write-Debug -Message 'Media path provided.' if ($modelInfo.Vision -ne $true) { Write-Warning -Message ('You provided a media path for model {0}. Vision is not supported for this model.' -f $ModelID) throw 'Vision is not supported for this model.' } Write-Debug -Message ('Media Path Count: {0}' -f $MediaPath.Count) if ($MediaPath.Count -gt 20) { throw ('You provided {0} media files. You can only provide up to 20 media files.' -f $MediaPath.Count) } foreach ($media in $MediaPath) { if (-not (Test-ConverseAPIMedia -MediaPath $media)) { throw ('Media test for {0} failed.' -f $media) } } $formatConverseAPISplat = @{ Role = 'user' ModelID = 'Converse' MediaPath = $MediaPath NoContextPersist = $NoContextPersist } if ($Message) { $formatConverseAPISplat.Add('Message', $Message) } $formattedUserMessage = Format-ConverseAPI @formatConverseAPISplat } elseif ($DocumentPath) { Write-Debug -Message 'Document path provided.' if ($modelInfo.Document -ne $true) { Write-Warning -Message ('You provided a document path for model {0}. Document is not supported for this model.' -f $ModelID) throw 'Document is not supported for this model.' } if ($DocumentPath.Count -gt 5) { throw ('You provided {0} documents. You can only provide up to 5 documents.' -f $DocumentPath.Count) } foreach ($document in $DocumentPath) { if (-not (Test-ConverseAPIDocument -DocumentPath $document)) { throw ('Document test for {0} failed.' -f $document) } } $formatConverseAPISplat = @{ Role = 'user' ModelID = 'Converse' DocumentPath = $DocumentPath NoContextPersist = $NoContextPersist } if ($Message) { $formatConverseAPISplat.Add('Message', $Message) } $formattedUserMessage = Format-ConverseAPI @formatConverseAPISplat } elseif ($Message) { Write-Debug -Message 'Message provided.' $formatConverseAPISplat = @{ Role = 'user' Message = $Message ModelID = 'Converse' NoContextPersist = $NoContextPersist } $formattedUserMessage = Format-ConverseAPI @formatConverseAPISplat } else { throw 'You must provide either a message, media path, or document path.' } } elseif ($PSCmdlet.ParameterSetName -eq 'ToolsResultsSet') { Write-Debug -Message 'ToolsResultsSet' # ToolsResults - must be formed properly $toolsResultsEval = Test-ConverseAPIToolResult -ToolResults $ToolsResults if ($toolsResultsEval -ne $true) { throw 'Tool results validation failed.' } foreach ($toolResult in $ToolsResults) { $formatConverseAPISplat = @{ Role = 'user' ToolsResults = $ToolsResults ModelID = 'Converse' NoContextPersist = $NoContextPersist } $formattedToolsResults += Format-ConverseAPI @formatConverseAPISplat } } if ($NoContextPersist -eq $true) { $formattedMessages = @( $formattedUserMessage $formattedToolsResults ) } else { $formattedMessages = Get-ModelContext -ModelID 'Converse' } #region cmdletParams <# https://docs.aws.amazon.com/powershell/latest/reference/items/Invoke-BDRRConverse.html -ModelId <String> -AdditionalModelRequestField <PSObject> -AdditionalModelResponseFieldPath <String[]> -ToolChoice_Any <AnyToolChoice> -ToolChoice_Auto <AutoToolChoice> -GuardrailConfig_GuardrailIdentifier <String> -GuardrailConfig_GuardrailVersion <String> -InferenceConfig_MaxToken <Int32> -Message <Message[]> -Tool_Name <String> -InferenceConfig_StopSequence <String[]> -System <SystemContentBlock[]> -InferenceConfig_Temperature <Single> -ToolConfig_Tool <Tool[]> -InferenceConfig_TopP <Single> -GuardrailConfig_Trace <GuardrailTrace> -Select <String> -PassThru <SwitchParameter> -Force <SwitchParameter> -ClientConfig <AmazonBedrockRuntimeConfig> #> $invokeBDRRConverseSplat = @{ ModelId = $ModelID } if ($formattedMessages) { $invokeBDRRConverseSplat.Add('Message', $formattedMessages) } if ($Tools) { Write-Debug -Message 'Tools provided.' # Tools - must be formed properly $toolsEval = Test-ConverseAPITool -Tools $Tools if ($toolsEval -ne $true) { throw 'Tools validation failed.' } $allTools = Format-ConverseAPIToolConfig -ToolsConfig $Tools $invokeBDRRConverseSplat.Add('ToolConfig_Tool', $allTools) } <# ToolChoice is only supported by Anthropic Claude 3 models and by Mistral AI Mistral Large. Error example: Invoke-BDRRConverse: This model doesn't support the toolConfig.toolChoice.any field. Remove toolConfig.toolChoice.any and try again. #> if ($ToolChoice) { switch ($ToolChoice) { 'any' { Write-Debug -Message 'ToolChoice: Any' $anyTool = [Amazon.BedrockRuntime.Model.AnyToolChoice]::new() $invokeBDRRConverseSplat.Add('ToolChoice_Any', $anyTool) } 'auto' { Write-Debug -Message 'ToolChoice: Auto' $autoTool = [Amazon.BedrockRuntime.Model.AutoToolChoice]::new() $invokeBDRRConverseSplat.Add('ToolChoice_Auto', $autoTool) } 'tool' { Write-Debug -Message 'ToolChoice: Tool' $invokeBDRRConverseSplat.Add('Tool_Name', $ToolName) } } } if ($guardrailUse -eq $true) { $invokeBDRRConverseSplat.Add('GuardrailConfig_GuardrailIdentifier', $GuardrailID) $invokeBDRRConverseSplat.Add('GuardrailConfig_GuardrailVersion', $GuardrailVersion) $guardRailTrace = [Amazon.BedrockRuntime.GuardrailTrace]::new($GuardrailTrace) $invokeBDRRConverseSplat.Add('GuardrailConfig_Trace', $guardRailTrace) } #_____________________________________ if ($MaxTokens) { $invokeBDRRConverseSplat.Add('InferenceConfig_MaxToken', $MaxTokens) } if ($StopSequences) { $invokeBDRRConverseSplat.Add('InferenceConfig_StopSequence', $StopSequences) } if ($Temperature) { $invokeBDRRConverseSplat.Add('InferenceConfig_Temperature', $Temperature) } if ($TopP) { $invokeBDRRConverseSplat.Add('InferenceConfig_TopP', $TopP) } #_____________________________________ if ($SystemPrompt) { # TODO: Add support for https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/BedrockRuntime/TGuardrailConverseContentBlock.html # https://docs.aws.amazon.com/sdkfornet/v3/apidocs/items/BedrockRuntime/TSystemContentBlock.html $systemContentBlock = [Amazon.BedrockRuntime.Model.SystemContentBlock]::new() $systemContentBlock.Text = $SystemPrompt $invokeBDRRConverseSplat.Add('System', $systemContentBlock) } if ($AdditionalModelRequestField) { $invokeBDRRConverseSplat.Add('AdditionalModelRequestField', $AdditionalModelRequestField) } if ($AdditionalModelResponseFieldPath) { $invokeBDRRConverseSplat.Add('AdditionalModelResponseFieldPath', $AdditionalModelResponseFieldPath) } Write-Debug -Message 'Cmdlet Params:' Write-Debug -Message ($invokeBDRRConverseSplat | Out-String) #endregion #region commonParams $commonParams = @{} if ($AccessKey) { $commonParams.Add('AccessKey', $AccessKey) } if ($Credential) { $commonParams.Add('Credential', $Credential) } if ($EndpointUrl) { $commonParams.Add('EndpointUrl', $EndpointUrl) } if ($NetworkCredential) { $commonParams.Add('NetworkCredential', $NetworkCredential) } if ($ProfileLocation) { $commonParams.Add('ProfileLocation', $ProfileLocation) } if ($ProfileName) { $commonParams.Add('ProfileName', $ProfileName) } if ($Region) { $commonParams.Add('Region', $Region) } if ($SecretKey) { $commonParams.Add('SecretKey', $SecretKey) } if ($SessionToken) { $commonParams.Add('SessionToken', $SessionToken) } #endregion try { $rawResponse = Invoke-BDRRConverse @invokeBDRRConverseSplat @commonParams -ErrorAction Stop } catch { # we need to remove the user context from the global variable if the model is not successfully engaged $context = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq 'Converse' } Write-Debug -Message 'Catch Block. Context:' Write-Debug -Message ($context | Out-String) Write-Debug -Message ('Context count: {0}' -f $context.Context.Count) if ($context.Context.Count -le 1) { Write-Debug -Message 'Resetting context.' $context.Context = New-Object System.Collections.Generic.List[object] } else { Write-Debug -Message 'Removing context entry.' $context.Context.RemoveAt($context.Context.Count - 1) } $exceptionMessage = $_.Exception.Message if ($exceptionMessage -like "*don't have access*") { Write-Debug -Message 'Specific Error' Write-Warning -Message 'You do not have access to the requested model.' Write-Warning -Message 'In your AWS account, you will need to request access to the model.' Write-Warning -Message 'AWS -> Amazon Bedrock -> Model Access -> Request Access' throw ('No access to model {0}.' -f $ModelID) } elseif ($exceptionMessage -like "*doesn't support the model*") { # This action doesn't support the model that you provided. Try again with a supported text or chat model. Write-Debug -Message 'Specific Error' Write-Warning -Message 'The Converse API does not support all foundational models.' throw ('Converse API does not support {0} for this action. Try again with a supported text or chat model.' -f $ModelID) } else { Write-Debug -Message 'General Error' Write-Debug -Message ($_ | Out-String) Write-Error -Message $_ Write-Error -Message $_.Exception.Message throw } } if ([String]::IsNullOrWhiteSpace($rawResponse)) { throw 'No response from model API.' } $response = $rawResponse # end_turn | tool_use | max_tokens | stop_sequence | guardrail_intervened | content_filtered Write-Debug -Message ('Stop Reason: {0}' -f $response.StopReason) # $toolUse = $false switch ($response.StopReason) { 'end_turn' { Write-Debug -Message 'End of turn detected.' } 'tool_use' { Write-Debug -Message 'Tool calls detected.' # $toolUse = $true } 'max_tokens' { Write-Debug -Message 'Max tokens reached.' Write-Warning -Message ('The model reached the maximum token limit of {0}.' -f $MaxTokens) } 'stop_sequence' { Write-Debug -Message 'Stop sequence detected.' Write-Warning -Message 'The model stopped generating the response due to a stop sequence.' } 'guardrail_intervened' { Write-Debug -Message 'Guardrail intervened.' Write-Warning -Message 'The model stopped generating the response due to a guardrail.' } 'content_filtered' { Write-Debug -Message 'Content filtered.' Write-Warning -Message 'The model stopped generating the response due to content filtering.' } } if ($NoContextPersist -eq $false ) { Write-Verbose -Message 'Adding response to model context history.' $formatConverseAPISplat = @{ Role = 'assistant' ReturnMessage = $response.Output.Message ModelID = 'Converse' } Format-ConverseAPI @formatConverseAPISplat | Out-Null } Write-Verbose -Message 'Calculating cost estimate.' Add-ModelCostEstimate -Usage $response.Usage -ModelID $ModelID -Converse if ($ReturnFullObject) { return $response } else { return $response.Output.Message.Content.Text } } #Invoke-ConverseAPI <# .EXTERNALHELP pwshBedrock-help.xml #> function Invoke-MetaModel { [CmdletBinding()] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUsePSCredentialType', '', Justification = 'Suppressed to support AWS credential parameter.')] param ( [Parameter(Mandatory = $true, HelpMessage = 'The message to be sent to the model.', ParameterSetName = 'Standard')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'meta.llama2-13b-chat-v1', 'meta.llama2-70b-chat-v1', 'meta.llama3-8b-instruct-v1:0', 'meta.llama3-70b-instruct-v1:0', 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Specify if you want the full object returned instead of just the message reply.')] [switch]$ReturnFullObject, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [switch]$NoContextPersist, # model parameters [Parameter(Mandatory = $false, HelpMessage = 'The system prompt for the request.')] [string]$SystemPrompt, [Parameter(Mandatory = $false, HelpMessage = 'The maximum number of tokens to generate before stopping.')] [ValidateRange(1, 2048)] [int]$MaxTokens = 2048, [Parameter(Mandatory = $false, HelpMessage = 'The amount of randomness injected into the response.')] [ValidateRange(0.0, 1.0)] [float]$Temperature, [Parameter(Mandatory = $false, HelpMessage = 'Use a lower value to ignore less probable options and decrease the diversity of responses.')] [ValidateRange(0.0, 1.0)] [float]$TopP, # Common Parameters [Parameter(Mandatory = $false, HelpMessage = 'The AWS access key for the user account.')] [string]$AccessKey, [Parameter(Mandatory = $false, HelpMessage = 'An AWSCredentials object instance containing access and secret key information, and optionally a token for session-based credentials.')] [Amazon.Runtime.AWSCredentials]$Credential, [Parameter(Mandatory = $false, HelpMessage = 'The endpoint to make the call against. Not for normal use.')] [string]$EndpointUrl, [Parameter(Mandatory = $false, HelpMessage = 'Used with SAML-based authentication when ProfileName references a SAML role profile.')] [System.Management.Automation.PSCredential]$NetworkCredential, [Parameter(Mandatory = $false, HelpMessage = 'Used to specify the name and location of the ini-format credential file (shared with the AWS CLI and other AWS SDKs)')] [string]$ProfileLocation, [Parameter(Mandatory = $false, HelpMessage = 'The user-defined name of an AWS credentials or SAML-based role profile containing credential information.')] [string]$ProfileName, [Parameter(Mandatory = $false, HelpMessage = 'The system name of an AWS region or an AWSRegion instance.')] [object]$Region, [Parameter(Mandatory = $false, HelpMessage = 'The AWS secret key for the user account.')] [string]$SecretKey, [Parameter(Mandatory = $false, HelpMessage = 'The session token if the access and secret keys are temporary session-based credentials.')] [string]$SessionToken ) $modelInfo = $script:anthropicModelInfo | Where-Object { $_.ModelId -eq $ModelID } Write-Debug -Message 'Model Info:' Write-Debug -Message ($modelInfo | Out-String) # before we format the message (which creates context), we need to store the current context # this can be used to restore the context if the model fails to respond $originalContext = Get-ModelContext -ModelID $ModelID if ([string]::IsNullOrEmpty($originalContext)) { Write-Debug -Message 'No original context' $originalContext = '' } $formatMetaTextMessageSplat = @{ Role = 'User' Message = $Message ModelID = $ModelID NoContextPersist = $NoContextPersist } if ($SystemPrompt) { $formatMetaTextMessageSplat.Add('SystemPrompt', $SystemPrompt) } $formattedMessages = Format-MetaTextMessage @formatMetaTextMessageSplat #region cmdletParams $bodyObj = @{ prompt = $formattedMessages } if ($Temperature) { $bodyObj.Add('temperature', $Temperature) } if ($TopP) { $bodyObj.Add('top_p', $TopP) } if ($MaxTokens -ne 512) { $bodyObj.Add('max_gen_len', $MaxTokens) } $jsonBody = $bodyObj | ConvertTo-Json -Depth 10 [byte[]]$byteArray = [System.Text.Encoding]::UTF8.GetBytes($jsonBody) $cmdletParams = @{ ContentType = 'application/json' ModelId = $ModelID Body = $byteArray } Write-Debug -Message 'Cmdlet Params:' Write-Debug -Message ($cmdletParams | Out-String) Write-Debug -Message 'Body JSON:' Write-Debug -Message ($jsonBody | Out-String) #endregion #region commonParams $commonParams = @{} if ($AccessKey) { $commonParams.Add('AccessKey', $AccessKey) } if ($Credential) { $commonParams.Add('Credential', $Credential) } if ($EndpointUrl) { $commonParams.Add('EndpointUrl', $EndpointUrl) } if ($NetworkCredential) { $commonParams.Add('NetworkCredential', $NetworkCredential) } if ($ProfileLocation) { $commonParams.Add('ProfileLocation', $ProfileLocation) } if ($ProfileName) { $commonParams.Add('ProfileName', $ProfileName) } if ($Region) { $commonParams.Add('Region', $Region) } if ($SecretKey) { $commonParams.Add('SecretKey', $SecretKey) } if ($SessionToken) { $commonParams.Add('SessionToken', $SessionToken) } #endregion try { $rawResponse = Invoke-BDRRModel @cmdletParams @commonParams -ErrorAction Stop } catch { # we need to reset the user context if the model fails to respond $contextObj = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } Write-Debug -Message ($contextObj | Out-String) $contextObj.Context = $originalContext $exceptionMessage = $_.Exception.Message if ($exceptionMessage -like "*don't have access*") { Write-Debug -Message 'Specific Error' Write-Warning -Message 'You do not have access to the requested model.' Write-Warning -Message 'In your AWS account, you will need to request access to the model.' Write-Warning -Message 'AWS -> Amazon Bedrock -> Model Access -> Request Access' throw ('No access to model {0}.' -f $ModelID) } else { Write-Debug -Message 'General Error' Write-Debug -Message ($_ | Out-String) Write-Error -Message $_ Write-Error -Message $_.Exception.Message throw } } if ([String]::IsNullOrWhiteSpace($rawResponse)) { throw 'No response from model API.' } Write-Verbose -Message 'Processing response.' try { $jsonBody = ConvertFrom-MemoryStreamToString -MemoryStream $rawResponse.body -ErrorAction Stop } catch { # we need to reset the user context if the model fails to respond $contextObj = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } Write-Debug -Message ($contextObj | Out-String) $contextObj.Context = $originalContext Write-Error $_ throw } Write-Debug -Message 'Response JSON:' Write-Debug -Message ($jsonBody | Out-String) Write-Verbose -Message 'Converting response from JSON.' $response = $jsonBody | ConvertFrom-Json if ([string]::IsNullOrWhiteSpace($response.generation)) { if ($MaxTokens -lt 150) { Write-Warning -Message 'In some cases, the model may return an empty response when the max tokens is set to a low value.' Write-Warning -Message ('MaxTokens on this call was set to {0}.' -f $MaxTokens) Write-Warning -Message 'Try increasing the MaxTokens value and try again.' } throw ('No response text was returned from model API: {0}' -f $ModelID) } Write-Verbose -Message 'Calculating cost estimate.' Add-ModelCostEstimate -Usage $response -ModelID $ModelID Write-Verbose -Message 'Adding response to model context history.' $content = $response.generation $formatMetaTextMessageSplat = @{ Role = 'Model' Message = $content ModelID = $ModelID NoContextPersist = $NoContextPersist } if ($SystemPrompt) { $formatMetaTextMessageSplat.Add('SystemPrompt', $SystemPrompt) } Format-MetaTextMessage @formatMetaTextMessageSplat | Out-Null if ($ReturnFullObject) { return $response } else { return $content } } #Invoke-MetaModel <# .EXTERNALHELP pwshBedrock-help.xml #> function Invoke-MistralAIChatModel { [CmdletBinding()] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUsePSCredentialType', '', Justification = 'Suppressed to support AWS credential parameter.')] param ( [Parameter(Mandatory = $true, ParameterSetName = 'MessageSet', HelpMessage = 'The message to be sent to the model.')] [Parameter(Mandatory = $true, ParameterSetName = 'CombinedSet', HelpMessage = 'The message to be sent to the model.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $true, ParameterSetName = 'SystemPromptSet', HelpMessage = 'Sets the behavior and context for the model in the conversation.')] [Parameter(Mandatory = $true, ParameterSetName = 'CombinedSet', HelpMessage = 'Sets the behavior and context for the model in the conversation.')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$SystemPrompt, [Parameter(Mandatory = $true, ParameterSetName = 'ToolsResultsSet', HelpMessage = 'A list of results from invoking tools recommended by the model in the previous chat turn.')] [ValidateNotNull()] [PSCustomObject[]]$ToolsResults, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-small-2402-v1:0', 'mistral.mistral-large-2407-v1:0' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Specify if you want the full object returned instead of just the message reply.')] [switch]$ReturnFullObject, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [switch]$NoContextPersist, # model parameters [Parameter(Mandatory = $false, HelpMessage = 'Definitions of tools that the model may use.')] [PSCustomObject[]]$Tools, [Parameter(Mandatory = $false, HelpMessage = "Specifies how functions are called. If set to none the model won''t call a function and will generate a message instead. If set to auto the model can choose to either generate a message or call a function. If set to any the model is forced to call a function.")] [ValidateSet('auto', 'any', 'none')] [string]$ToolChoice, [Parameter(Mandatory = $false, HelpMessage = 'The maximum number of tokens to generate before stopping.')] [ValidateRange(1, 8192)] [int]$MaxTokens = 8192, [Parameter(Mandatory = $false, HelpMessage = 'The amount of randomness injected into the response.')] [ValidateRange(0.0, 1.0)] [float]$Temperature, [Parameter(Mandatory = $false, HelpMessage = 'Use a lower value to ignore less probable options and decrease the diversity of responses.')] [ValidateRange(0.01, 0.99)] [float]$TopP, # Common Parameters [Parameter(Mandatory = $false, HelpMessage = 'The AWS access key for the user account.')] [string]$AccessKey, [Parameter(Mandatory = $false, HelpMessage = 'An AWSCredentials object instance containing access and secret key information, and optionally a token for session-based credentials.')] [Amazon.Runtime.AWSCredentials]$Credential, [Parameter(Mandatory = $false, HelpMessage = 'The endpoint to make the call against. Not for normal use.')] [string]$EndpointUrl, [Parameter(Mandatory = $false, HelpMessage = 'Used with SAML-based authentication when ProfileName references a SAML role profile.')] [System.Management.Automation.PSCredential]$NetworkCredential, [Parameter(Mandatory = $false, HelpMessage = 'Used to specify the name and location of the ini-format credential file (shared with the AWS CLI and other AWS SDKs)')] [string]$ProfileLocation, [Parameter(Mandatory = $false, HelpMessage = 'The user-defined name of an AWS credentials or SAML-based role profile containing credential information.')] [string]$ProfileName, [Parameter(Mandatory = $false, HelpMessage = 'The system name of an AWS region or an AWSRegion instance.')] [object]$Region, [Parameter(Mandatory = $false, HelpMessage = 'The AWS secret key for the user account.')] [string]$SecretKey, [Parameter(Mandatory = $false, HelpMessage = 'The session token if the access and secret keys are temporary session-based credentials.')] [string]$SessionToken ) $modelInfo = $script:mistralAIModelInfo | Where-Object { $_.ModelId -eq $ModelID } Write-Debug -Message 'Model Info:' Write-Debug -Message ($modelInfo | Out-String) if ( $PSCmdlet.ParameterSetName -eq 'MessageSet' -or $PSCmdlet.ParameterSetName -eq 'SystemPromptSet' -or $PSCmdlet.ParameterSetName -eq 'CombinedSet') { Write-Debug -Message $PSCmdlet.ParameterSetName # the system prompt must always be the first message in the context, otherwise the model will fail validation # *Note: on subsequent calls, the system prompt will be updated instead of replaced, ensuring the system prompt is always the first message in the context if ($SystemPrompt) { $formatMistralAIChatSplat = @{ Role = 'system' Message = $SystemPrompt ModelID = $ModelID NoContextPersist = $NoContextPersist } $formattedSystemMessage = Format-MistralAIChatModel @formatMistralAIChatSplat } if ($Message) { $formatMistralAIChatSplat = @{ Role = 'user' Message = $Message ModelID = $ModelID NoContextPersist = $NoContextPersist } $formattedUserMessage = Format-MistralAIChatModel @formatMistralAIChatSplat } } elseif ($PSCmdlet.ParameterSetName -eq 'ToolsResultsSet') { Write-Debug -Message 'ToolsResultsSet' # ToolsResults - must be formed properly $toolsResultsEval = Test-MistralAIChatToolResult -ToolResults $ToolsResults if ($toolsResultsEval -ne $true) { throw 'Tool results validation failed.' } foreach ($toolResult in $ToolsResults) { $formatMistralAIChatSplat = @{ Role = 'tool' ToolsResults = $toolResult ModelID = $ModelID NoContextPersist = $NoContextPersist } $formattedToolsResults += Format-MistralAIChatModel @formatMistralAIChatSplat } } if ($NoContextPersist -eq $true) { $formattedMessages = @( $formattedUserMessage $formattedSystemMessage $formattedToolsResults ) } else { $formattedMessages = Get-ModelContext -ModelID $ModelID } #region cmdletParams $bodyObj = @{ messages = @( $formattedMessages ) } if ($Tools) { # Tools - must be formed properly $toolsEval = Test-MistralAIChatTool -Tools $Tools if ($toolsEval -ne $true) { throw 'Tools validation failed.' } $bodyObj.Add('tools', $Tools) } if ($ToolChoice) { $bodyObj.Add('tool_choice', $Documents) } if ($MaxTokens) { $bodyObj.Add('max_tokens', $MaxTokens) } if ($Temperature) { $bodyObj.Add('temperature', $Temperature) } if ($TopP) { $bodyObj.Add('top_p', $TopP) } # at this point in memory, the messages context is still in object form $jsonBody = $bodyObj | ConvertTo-Json -Depth 10 [byte[]]$byteArray = [System.Text.Encoding]::UTF8.GetBytes($jsonBody) $cmdletParams = @{ ContentType = 'application/json' ModelId = $ModelID Body = $byteArray } Write-Debug -Message 'Cmdlet Params:' Write-Debug -Message ($cmdletParams | Out-String) Write-Debug -Message 'Body JSON:' Write-Debug -Message ($jsonBody | Out-String) #endregion #region commonParams $commonParams = @{} if ($AccessKey) { $commonParams.Add('AccessKey', $AccessKey) } if ($Credential) { $commonParams.Add('Credential', $Credential) } if ($EndpointUrl) { $commonParams.Add('EndpointUrl', $EndpointUrl) } if ($NetworkCredential) { $commonParams.Add('NetworkCredential', $NetworkCredential) } if ($ProfileLocation) { $commonParams.Add('ProfileLocation', $ProfileLocation) } if ($ProfileName) { $commonParams.Add('ProfileName', $ProfileName) } if ($Region) { $commonParams.Add('Region', $Region) } if ($SecretKey) { $commonParams.Add('SecretKey', $SecretKey) } if ($SessionToken) { $commonParams.Add('SessionToken', $SessionToken) } #endregion try { $rawResponse = Invoke-BDRRModel @cmdletParams @commonParams -ErrorAction Stop } catch { # we need to remove the user context from the global variable if the model is not successfully engaged $context = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } Write-Debug -Message 'Catch Block. Context:' Write-Debug -Message ($context | Out-String) Write-Debug -Message ('Context count: {0}' -f $context.Context.Count) if ($context.Context.Count -le 1) { Write-Debug -Message 'Resetting context.' $context.Context = New-Object System.Collections.Generic.List[object] } else { $context.Context.RemoveAt($context.Context.Count - 1) # special case if two messages were loaded into the context if ($PSCmdlet.ParameterSetName -eq 'CombinedSet') { $context.Context.RemoveAt($context.Context.Count - 1) } } $exceptionMessage = $_.Exception.Message if ($exceptionMessage -like "*don't have access*") { Write-Debug -Message 'Specific Error' Write-Warning -Message 'You do not have access to the requested model.' Write-Warning -Message 'In your AWS account, you will need to request access to the model.' Write-Warning -Message 'AWS -> Amazon Bedrock -> Model Access -> Request Access' throw ('No access to model {0}.' -f $ModelID) } else { Write-Debug -Message 'General Error' Write-Debug -Message ($_ | Out-String) Write-Error -Message $_ Write-Error -Message $_.Exception.Message throw } } if ([String]::IsNullOrWhiteSpace($rawResponse)) { throw 'No response from model API.' } Write-Verbose -Message 'Processing response.' try { $jsonBody = ConvertFrom-MemoryStreamToString -MemoryStream $rawResponse.body -ErrorAction Stop } catch { # we need to remove the user context from the global variable if the model is not successfully engaged $context = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } Write-Debug -Message ($context | Out-String) Write-Debug -Message ('Context count: {0}' -f $context.Context.Count) if ($context.Context.Count -le 1) { Write-Debug -Message 'Resetting context.' $context.Context = New-Object System.Collections.Generic.List[object] } else { $context.Context.RemoveAt($context.Context.Count - 1) # special case if two messages were loaded into the context if ($PSCmdlet.ParameterSetName -eq 'CombinedSet') { $context.Context.RemoveAt($context.Context.Count - 1) } } Write-Error $_ throw } Write-Debug -Message 'Response JSON:' Write-Debug -Message ($jsonBody | Out-String) Write-Verbose -Message 'Converting response from JSON.' $response = $jsonBody | ConvertFrom-Json # in this model null content responses are expected when the assistant is returning tool_calls if ($response.choices.stop_reason -eq 'tool_calls') { Write-Verbose -Message 'Tool calls detected.' # determine if tool_calls is null if ($response.choices.message.tool_calls.Count -eq 0) { Write-Warning -Message 'Tool calls detected but no tool calls were returned.' throw 'No tool calls were returned from model API.' } if ($NoContextPersist -eq $false) { $formatMistralAIChatSplat = @{ Role = 'assistant' ToolCalls = $response.choices.message.tool_calls ModelID = $ModelID NoContextPersist = $NoContextPersist } Format-MistralAIChatModel @formatMistralAIChatSplat | Out-Null } } #if_tool_calls else { if ([string]::IsNullOrWhiteSpace($response.choices.message.content)) { if ($MaxTokens -lt 150) { Write-Warning -Message 'In some cases, the model may return an empty response when the max tokens is set to a low value.' Write-Warning -Message ('MaxTokens on this call was set to {0}.' -f $MaxTokens) Write-Warning -Message 'Try increasing the MaxTokens value and try again.' } throw ('No response text was returned from model API: {0}' -f $ModelID) } if ($NoContextPersist -eq $false) { Write-Verbose -Message 'Adding response to model context history.' $formatMistralAIChatModelSplat = @{ Role = 'assistant' Message = $response.choices.message.content ModelID = $ModelID } Format-MistralAIChatModel @formatMistralAIChatModelSplat | Out-Null } } #else_tool_calls Write-Verbose -Message 'Calculating cost estimate.' $message = $formattedMessages | ConvertTo-Json -Depth 10 | Out-String Add-ModelCostEstimate -Usage $response -Message $Message -ModelID $ModelID if ($ReturnFullObject) { return $response } else { return $response.choices.message.content } } #Invoke-MistralAIChatModel <# .EXTERNALHELP pwshBedrock-help.xml #> function Invoke-MistralAIModel { [CmdletBinding()] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUsePSCredentialType', '', Justification = 'Suppressed to support AWS credential parameter.')] param ( [Parameter(Mandatory = $true, HelpMessage = 'The message to be sent to the model.', ParameterSetName = 'Standard')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$Message, [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'mistral.mistral-7b-instruct-v0:2', 'mistral.mixtral-8x7b-instruct-v0:1', 'mistral.mistral-small-2402-v1:0', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0' )] [string]$ModelID, [Parameter(Mandatory = $false, HelpMessage = 'Specify if you want the full object returned instead of just the message reply.')] [switch]$ReturnFullObject, [Parameter(Mandatory = $false, HelpMessage = 'Do not persist the conversation context history.')] [switch]$NoContextPersist, # model parameters [Parameter(Mandatory = $false, HelpMessage = 'The maximum number of tokens to generate before stopping.')] [ValidateRange(1, 2048)] [int]$MaxTokens = 2048, [Parameter(Mandatory = $false, HelpMessage = 'Custom text sequences that cause the model to stop generating.')] [ValidateNotNullOrEmpty()] [string[]]$StopSequences, [Parameter(Mandatory = $false, HelpMessage = 'The amount of randomness injected into the response.')] [ValidateRange(0.0, 1.0)] [float]$Temperature, [Parameter(Mandatory = $false, HelpMessage = 'Controls the diversity of text that the model generates by setting the percentage of most-likely candidates that the model considers for the next token.')] [ValidateRange(0.0, 1.0)] [float]$TopP, [Parameter(Mandatory = $false, HelpMessage = 'Controls the number of most-likely candidates that the model considers for the next token.')] [ValidateRange(1, 200)] [int]$TopK, # Common Parameters [Parameter(Mandatory = $false, HelpMessage = 'The AWS access key for the user account.')] [string]$AccessKey, [Parameter(Mandatory = $false, HelpMessage = 'An AWSCredentials object instance containing access and secret key information, and optionally a token for session-based credentials.')] [Amazon.Runtime.AWSCredentials]$Credential, [Parameter(Mandatory = $false, HelpMessage = 'The endpoint to make the call against. Not for normal use.')] [string]$EndpointUrl, [Parameter(Mandatory = $false, HelpMessage = 'Used with SAML-based authentication when ProfileName references a SAML role profile.')] [System.Management.Automation.PSCredential]$NetworkCredential, [Parameter(Mandatory = $false, HelpMessage = 'Used to specify the name and location of the ini-format credential file (shared with the AWS CLI and other AWS SDKs)')] [string]$ProfileLocation, [Parameter(Mandatory = $false, HelpMessage = 'The user-defined name of an AWS credentials or SAML-based role profile containing credential information.')] [string]$ProfileName, [Parameter(Mandatory = $false, HelpMessage = 'The system name of an AWS region or an AWSRegion instance.')] [object]$Region, [Parameter(Mandatory = $false, HelpMessage = 'The AWS secret key for the user account.')] [string]$SecretKey, [Parameter(Mandatory = $false, HelpMessage = 'The session token if the access and secret keys are temporary session-based credentials.')] [string]$SessionToken ) $modelInfo = $script:anthropicModelInfo | Where-Object { $_.ModelId -eq $ModelID } Write-Debug -Message 'Model Info:' Write-Debug -Message ($modelInfo | Out-String) # before we format the message (which creates context), we need to store the current context # this can be used to restore the context if the model fails to respond $originalContext = Get-ModelContext -ModelID $ModelID if ([string]::IsNullOrEmpty($originalContext)) { Write-Debug -Message 'No original context' $originalContext = '' } $formatMistralAITextMessageSplat = @{ Role = 'User' Message = $Message ModelID = $ModelID NoContextPersist = $NoContextPersist } $formattedMessages = Format-MistralAITextMessage @formatMistralAITextMessageSplat #region cmdletParams $bodyObj = @{ prompt = $formattedMessages } if ($MaxTokens -ne 512) { $bodyObj.Add('max_tokens', $MaxTokens) } if ($StopSequences) { $bodyObj.Add('stop', $StopSequences) } if ($Temperature) { $bodyObj.Add('temperature', $Temperature) } if ($TopP) { $bodyObj.Add('top_p', $TopP) } if ($TopK) { $bodyObj.Add('top_k', $TopK) } $jsonBody = $bodyObj | ConvertTo-Json -Depth 10 [byte[]]$byteArray = [System.Text.Encoding]::UTF8.GetBytes($jsonBody) $cmdletParams = @{ ContentType = 'application/json' ModelId = $ModelID Body = $byteArray } Write-Debug -Message 'Cmdlet Params:' Write-Debug -Message ($cmdletParams | Out-String) Write-Debug -Message 'Body JSON:' Write-Debug -Message ($jsonBody | Out-String) #endregion #region commonParams $commonParams = @{} if ($AccessKey) { $commonParams.Add('AccessKey', $AccessKey) } if ($Credential) { $commonParams.Add('Credential', $Credential) } if ($EndpointUrl) { $commonParams.Add('EndpointUrl', $EndpointUrl) } if ($NetworkCredential) { $commonParams.Add('NetworkCredential', $NetworkCredential) } if ($ProfileLocation) { $commonParams.Add('ProfileLocation', $ProfileLocation) } if ($ProfileName) { $commonParams.Add('ProfileName', $ProfileName) } if ($Region) { $commonParams.Add('Region', $Region) } if ($SecretKey) { $commonParams.Add('SecretKey', $SecretKey) } if ($SessionToken) { $commonParams.Add('SessionToken', $SessionToken) } #endregion try { $rawResponse = Invoke-BDRRModel @cmdletParams @commonParams -ErrorAction Stop } catch { # we need to reset the user context if the model fails to respond $contextObj = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } Write-Debug -Message ($contextObj | Out-String) $contextObj.Context = $originalContext $exceptionMessage = $_.Exception.Message if ($exceptionMessage -like "*don't have access*") { Write-Debug -Message 'Specific Error' Write-Warning -Message 'You do not have access to the requested model.' Write-Warning -Message 'In your AWS account, you will need to request access to the model.' Write-Warning -Message 'AWS -> Amazon Bedrock -> Model Access -> Request Access' throw ('No access to model {0}.' -f $ModelID) } else { Write-Debug -Message 'General Error' Write-Debug -Message ($_ | Out-String) Write-Error -Message $_ Write-Error -Message $_.Exception.Message throw } } if ([String]::IsNullOrWhiteSpace($rawResponse)) { throw 'No response from model API.' } Write-Verbose -Message 'Processing response.' try { $jsonBody = ConvertFrom-MemoryStreamToString -MemoryStream $rawResponse.body -ErrorAction Stop } catch { # we need to reset the user context if the model fails to respond $contextObj = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } Write-Debug -Message ($contextObj | Out-String) $contextObj.Context = $originalContext Write-Error $_ throw } Write-Debug -Message 'Response JSON:' Write-Debug -Message ($jsonBody | Out-String) Write-Verbose -Message 'Converting response from JSON.' $response = $jsonBody | ConvertFrom-Json if ([string]::IsNullOrWhiteSpace($response.outputs.text)) { if ($MaxTokens -lt 150) { Write-Warning -Message 'In some cases, the model may return an empty response when the max tokens is set to a low value.' Write-Warning -Message ('MaxTokens on this call was set to {0}.' -f $MaxTokens) Write-Warning -Message 'Try increasing the MaxTokens value and try again.' } throw ('No response text was returned from model API: {0}' -f $ModelID) } Write-Verbose -Message 'Calculating cost estimate.' Add-ModelCostEstimate -Usage $response -ModelID $ModelID -Message $Message Write-Verbose -Message 'Adding response to model context history.' $content = $response.outputs.text $formatMistralAITextMessageSplat = @{ Role = 'Model' Message = $content ModelID = $ModelID NoContextPersist = $NoContextPersist } Format-MistralAITextMessage @formatMistralAITextMessageSplat | Out-Null if ($ReturnFullObject) { return $response } else { return $content } } #Invoke-MistralAIModel <# .EXTERNALHELP pwshBedrock-help.xml #> function Invoke-StabilityAIDiffusionModel { [CmdletBinding(DefaultParameterSetName = 'SimplePromptTextToImage')] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUsePSCredentialType', '', Justification = 'Suppressed to support AWS credential parameter.')] param ( #_______________________________________________________ # required parameters [Parameter(Mandatory = $true, HelpMessage = 'The local file path to save the generated images.')] [ValidateScript({ if (-Not ($_ | Test-Path -PathType Container)) { throw 'The Path argument must be a folder. File paths are not allowed.' } if (-Not ($_ | Test-Path)) { throw 'File or folder does not exist' } return $true })] $ImagesSavePath, [Parameter(Mandatory = $true, HelpMessage = 'A text prompt used to generate the image.', ParameterSetName = 'SimplePrompt')] [Parameter(Mandatory = $true, ParameterSetName = 'SimplePromptTextToImage')] [Parameter(Mandatory = $true, ParameterSetName = 'SimplePromptImageToImage')] [Parameter(Mandatory = $true, ParameterSetName = 'SimplePromptImageToImageMask')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string[]]$ImagePrompt, [Parameter(Mandatory = $false, HelpMessage = 'Use a negative prompt to tell the model to avoid certain concepts.', ParameterSetName = 'SimplePrompt')] [Parameter(Mandatory = $false, ParameterSetName = 'SimplePromptTextToImage')] [Parameter(Mandatory = $false, ParameterSetName = 'SimplePromptImageToImage')] [Parameter(Mandatory = $false, ParameterSetName = 'SimplePromptImageToImageMask')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string[]]$NegativePrompt, [Parameter(Mandatory = $true, HelpMessage = 'Provide a set of weighted custom prompts to guide the generation of the image.', ParameterSetName = 'CustomPrompt')] [Parameter(Mandatory = $true, ParameterSetName = 'CustomPromptTextToImage')] [Parameter(Mandatory = $true, ParameterSetName = 'CustomPromptImageToImage')] [Parameter(Mandatory = $true, ParameterSetName = 'CustomPromptImageToImageMask')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [object[]]$CustomPrompt, #_______________________________________________________ # text-to-image parameters [Parameter(Mandatory = $false, HelpMessage = 'The width of the image in pixels.', ParameterSetName = 'TextToImage')] [Parameter(Mandatory = $false, ParameterSetName = 'SimplePromptTextToImage')] [Parameter(Mandatory = $false, ParameterSetName = 'CustomPromptTextToImage')] [ValidateSet( 1024, 1152, 1216, 1344, 1536, 640, 768, 832, 896 )] [int]$Width, [Parameter(Mandatory = $false, HelpMessage = 'The height of the image in pixels.', ParameterSetName = 'TextToImage')] [Parameter(Mandatory = $false, ParameterSetName = 'SimplePromptTextToImage')] [Parameter(Mandatory = $false, ParameterSetName = 'CustomPromptTextToImage')] [ValidateSet( 1024, 896, 832, 768, 640, 1536, 1344, 1216, 1152 )] [int]$Height, #_______________________________________________________ # image-to-image parameters [Parameter(Mandatory = $true, HelpMessage = 'File path to image that you want to use to initialize the diffusion process', ParameterSetName = 'ImageToImage')] [Parameter(Mandatory = $true, ParameterSetName = 'SimplePromptImageToImage')] [Parameter(Mandatory = $true, ParameterSetName = 'CustomPromptImageToImage')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$InitImagePath, [Parameter(Mandatory = $false, HelpMessage = 'Determines whether to use image_strength or step_schedule_* to control how much influence the image in init_image has on the result.', ParameterSetName = 'ImageToImage')] [Parameter(Mandatory = $false, ParameterSetName = 'SimplePromptImageToImage')] [Parameter(Mandatory = $false, ParameterSetName = 'CustomPromptImageToImage')] [ValidateSet( 'IMAGE_STRENGTH', 'STEP_SCHEDULE' )] [string]$InitImageMode, [Parameter(Mandatory = $false, HelpMessage = 'Determines how much influence the source image in init_image has on the diffusion process. Values close to 1 yield images very similar to the source image. Values close to 0 yield images very different than the source image.', ParameterSetName = 'ImageToImage')] [Parameter(Mandatory = $false, ParameterSetName = 'SimplePromptImageToImage')] [Parameter(Mandatory = $false, ParameterSetName = 'CustomPromptImageToImage')] [ValidateRange(0, 1.0)] [float]$ImageStrength, #_______________________________________________________ # image-to-image-masking parameters [Parameter(Mandatory = $true, HelpMessage = 'File path to image that you want to use to initialize the mask diffusion process', ParameterSetName = 'ImageToImage')] [Parameter(Mandatory = $true, ParameterSetName = 'SimplePromptImageToImageMask')] [Parameter(Mandatory = $true, ParameterSetName = 'CustomPromptImageToImageMask')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$InitMaskImagePath, [Parameter(Mandatory = $true, HelpMessage = ' Determines where to source the mask from.', ParameterSetName = 'ImageToImageMask')] [Parameter(Mandatory = $true, ParameterSetName = 'SimplePromptImageToImageMask')] [Parameter(Mandatory = $true, ParameterSetName = 'CustomPromptImageToImageMask')] [ValidateSet( 'MASK_IMAGE_WHITE', 'MASK_IMAGE_BLACK', 'INIT_IMAGE_ALPHA' )] [string]$MaskSource, [Parameter(Mandatory = $true, HelpMessage = 'File path to image that you want to use as a mask for the source image in init_image. Must be the same dimensions as the source image.', ParameterSetName = 'ImageToImageMask')] [Parameter(Mandatory = $true, ParameterSetName = 'SimplePromptImageToImageMask')] [Parameter(Mandatory = $true, ParameterSetName = 'CustomPromptImageToImageMask')] [ValidateNotNull()] [ValidateNotNullOrEmpty()] [string]$MaskImagePath, #_______________________________________________________ # common image parameters [Parameter(Mandatory = $false, HelpMessage = 'Determines how much the final image portrays the prompt. Use a lower number to increase randomness in the generation.')] [ValidateRange(0, 35)] [float]$CfgScale, [Parameter(Mandatory = $false, HelpMessage = 'CLIP Guidance is a technique that uses the CLIP neural network to guide the generation of images to be more in-line with your included prompt, which often results in improved coherency.')] [ValidateSet( 'FAST_BLUE', 'FAST_GREEN', 'NONE', 'SIMPLE SLOW', 'SLOWER', 'SLOWEST' )] [string]$ClipGuidancePreset, [Parameter(Mandatory = $false, HelpMessage = 'The sampler to use for the diffusion process. If this value is omitted, the model automatically selects an appropriate sampler for you.')] [ValidateSet( 'DDIM', 'DDPM', 'K_DPMPP_2M', 'K_DPMPP_2S_ANCESTRAL', 'K_DPM_2', 'K_DPM_2_ANCESTRAL', 'K_EULER', 'K_EULER_ANCESTRAL', 'K_HEUN K_LMS' )] [string]$Sampler, [Parameter(Mandatory = $false, HelpMessage = 'The number of image to generate. Currently Amazon Bedrock supports generating one image. If you supply a value for samples, the value must be one.')] [ValidateRange(0, 1)] [int]$Samples, [Parameter(Mandatory = $false, HelpMessage = "The seed determines the initial noise setting. Use the same seed and the same settings as a previous run to allow inference to create a similar image. If you don't set this value, or the value is 0, it is set as a random number.")] [ValidateRange(0, 4294967295)] [int]$Seed, [Parameter(Mandatory = $false, HelpMessage = 'Generation step determines how many times the image is sampled. More steps can result in a more accurate result.')] [ValidateRange(10, 50)] [int]$Steps, [Parameter(Mandatory = $false, HelpMessage = 'A style preset that guides the image model towards a particular style. This list of style presets is subject to change.')] [ValidateSet( '3d-model', 'analog-film', 'anime', 'cinematic', 'comic-book', 'digital-art', 'enhance', 'fantasy-art', 'isometric', 'line-art', 'low-poly', 'modeling-compound', 'neon-punk', 'origami', 'photographic', 'pixel-art', 'tile-texture' )] [string]$StylePreset, #_______________________________________________________ [Parameter(Mandatory = $false, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( 'stability.stable-diffusion-xl-v1' )] [string]$ModelID = 'stability.stable-diffusion-xl-v1', [Parameter(Mandatory = $false, HelpMessage = 'Specify if you want the full object returned from the model. This will include the raw base64 image data and other information.')] [switch]$ReturnFullObject, # Common Parameters [Parameter(Mandatory = $false, HelpMessage = 'The AWS access key for the user account.')] [string]$AccessKey, [Parameter(Mandatory = $false, HelpMessage = 'An AWSCredentials object instance containing access and secret key information, and optionally a token for session-based credentials.')] [Amazon.Runtime.AWSCredentials]$Credential, [Parameter(Mandatory = $false, HelpMessage = 'The endpoint to make the call against. Not for normal use.')] [string]$EndpointUrl, [Parameter(Mandatory = $false, HelpMessage = 'Used with SAML-based authentication when ProfileName references a SAML role profile.')] [System.Management.Automation.PSCredential]$NetworkCredential, [Parameter(Mandatory = $false, HelpMessage = 'Used to specify the name and location of the ini-format credential file (shared with the AWS CLI and other AWS SDKs)')] [string]$ProfileLocation, [Parameter(Mandatory = $false, HelpMessage = 'The user-defined name of an AWS credentials or SAML-based role profile containing credential information.')] [string]$ProfileName, [Parameter(Mandatory = $false, HelpMessage = 'The system name of an AWS region or an AWSRegion instance.')] [object]$Region, [Parameter(Mandatory = $false, HelpMessage = 'The AWS secret key for the user account.')] [string]$SecretKey, [Parameter(Mandatory = $false, HelpMessage = 'The session token if the access and secret keys are temporary session-based credentials.')] [string]$SessionToken ) Write-Debug -Message ('Parameter Set Name: {0}' -f $PSCmdlet.ParameterSetName) $modelInfo = $script:amazonModelInfo | Where-Object { $_.ModelId -eq $ModelID } Write-Debug -Message 'Model Info:' Write-Debug -Message ($modelInfo | Out-String) if ($ClipGuidancePreset -and $Sampler) { Write-Debug -Message 'Validating Sampler' if ($Sampler -notlike '*ANCESTRAL*') { throw 'CLIP Guidance only supports ancestral samplers.' } } if ($Width -or $Height) { # width and height must match one of the supported combinations $supportedSizes = @( '1024x1024', '1152x896', '1216x832', '1344x768', '1536x640', '640x1536', '768x1344', '832x1216', '896x1152' ) $size = '{0}x{1}' -f $Width, $Height Write-Debug -Message ('Size Evaluation: {0}' -f $size) if ($size -notin $supportedSizes) { throw 'Width and Height must match one of the supported combinations.' } } $bodyObj = @{} if ($CustomPrompt) { Write-Debug -Message 'Adding CustomPrompt to body object.' $bodyObj.Add('text_prompts', @($CustomPrompt)) } elseif ($ImagePrompt) { Write-Debug -Message 'Adding ImagePrompt to body object.' $bodyObj.Add('text_prompts', (New-Object System.Collections.Generic.List[object])) foreach ($prompt in $ImagePrompt) { $bodyObj.text_prompts.Add(@{ text = $prompt weight = 1 }) } foreach ($prompt in $NegativePrompt) { $bodyObj.text_prompts.Add(@{ text = $prompt weight = -1 }) } } if ($PSCmdlet.ParameterSetName -eq 'SimplePromptTextToImage' -or $PSCmdlet.ParameterSetName -eq 'CustomPromptTextToImage') { $bodyObj.Add('height', $Height) $bodyObj.Add('width', $Width) } elseif ($PSCmdlet.ParameterSetName -eq 'SimplePromptImageToImage' -or $PSCmdlet.ParameterSetName -eq 'CustomPromptImageToImage') { Write-Debug -Message 'Validating InitImage' $mediaEval = Test-StabilityAIDiffusionMedia -MediaPath $InitImagePath if ($mediaEval -ne $true) { throw 'Media file not supported.' } else { Write-Debug -Message 'InitImage is supported.' } Write-Debug -Message 'Converting InitImage to base64.' try { $base64 = Convert-MediaToBase64 -MediaPath $InitImagePath -ErrorAction Stop } catch { Write-Error $_ throw } $bodyObj.Add('init_image', $base64) if ($InitImageMode) { $bodyObj.Add('init_image_mode', $InitImageMode) } if ($ImageStrength) { $bodyObj.Add('image_strength', $ImageStrength) } } elseif ($PSCmdlet.ParameterSetName -eq 'SimplePromptImageToImageMask' -or $PSCmdlet.ParameterSetName -eq 'CustomPromptImageToImageMask') { Write-Debug -Message 'Validating Init MaskImage' $mediaEval = Test-StabilityAIDiffusionMedia -MediaPath $InitMaskImagePath if ($mediaEval -ne $true) { throw 'Media file not supported.' } else { Write-Debug -Message 'Init MaskImage is supported.' } Write-Debug -Message 'Converting Init MaskImage to base64.' try { $base64 = Convert-MediaToBase64 -MediaPath $InitMaskImagePath -ErrorAction Stop } catch { Write-Error $_ throw } $bodyObj.Add('init_image', $base64) Write-Debug -Message 'Validating MaskImage' $mediaEval = Test-StabilityAIDiffusionMedia -MediaPath $MaskImagePath if ($mediaEval -ne $true) { throw 'Mask file not supported.' } else { Write-Debug -Message 'MaskImage is supported.' } Write-Debug -Message 'Converting MaskImage to base64.' try { $base64 = Convert-MediaToBase64 -MediaPath $MaskImagePath -ErrorAction Stop } catch { Write-Error $_ throw } $bodyObj.Add('mask_image', $base64) $bodyObj.Add('mask_source', $MaskSource) } #region common image parameters if ($CfgScale) { $bodyObj.Add('cfg_scale', $CfgScale) } if ($ClipGuidancePreset) { $bodyObj.Add('clip_guidance_preset', $ClipGuidancePreset) } if ($Sampler) { $bodyObj.Add('sampler', $Sampler) } if ($Samples) { $bodyObj.Add('samples', $Samples) } if ($Seed) { $bodyObj.Add('seed', $Seed) } if ($Steps) { $bodyObj.Add('steps', $Steps) } if ($StylePreset) { $bodyObj.Add('style_preset', $StylePreset) } #endregion $jsonBody = $bodyObj | ConvertTo-Json -Depth 10 [byte[]]$byteArray = [System.Text.Encoding]::UTF8.GetBytes($jsonBody) $cmdletParams = @{ ContentType = 'application/json' ModelId = $ModelID Body = $byteArray } Write-Debug -Message 'Cmdlet Params:' Write-Debug -Message ($cmdletParams | Out-String) Write-Debug -Message 'Body JSON:' Write-Debug -Message ($jsonBody | Out-String) #region commonParams $commonParams = @{} if ($AccessKey) { $commonParams.Add('AccessKey', $AccessKey) } if ($Credential) { $commonParams.Add('Credential', $Credential) } if ($EndpointUrl) { $commonParams.Add('EndpointUrl', $EndpointUrl) } if ($NetworkCredential) { $commonParams.Add('NetworkCredential', $NetworkCredential) } if ($ProfileLocation) { $commonParams.Add('ProfileLocation', $ProfileLocation) } if ($ProfileName) { $commonParams.Add('ProfileName', $ProfileName) } if ($Region) { $commonParams.Add('Region', $Region) } if ($SecretKey) { $commonParams.Add('SecretKey', $SecretKey) } if ($SessionToken) { $commonParams.Add('SessionToken', $SessionToken) } #endregion try { $rawResponse = Invoke-BDRRModel @cmdletParams @commonParams -ErrorAction Stop } catch { $exceptionMessage = $_.Exception.Message if ($exceptionMessage -like "*don't have access*") { Write-Debug -Message 'Specific Error' Write-Warning -Message 'You do not have access to the requested model.' Write-Warning -Message 'In your AWS account, you will need to request access to the model.' Write-Warning -Message 'AWS -> Amazon Bedrock -> Model Access -> Request Access' throw ('No access to model {0}.' -f $ModelID) } else { Write-Debug -Message 'General Error' Write-Debug -Message ($_ | Out-String) Write-Error -Message $_ Write-Error -Message $_.Exception.Message throw } } if ([String]::IsNullOrWhiteSpace($rawResponse)) { throw 'No response from model API.' } Write-Verbose -Message'Processing response.' try { $jsonBody = ConvertFrom-MemoryStreamToString -MemoryStream $rawResponse.body -ErrorAction Stop } catch { Write-Error $_ throw } Write-Debug -Message 'Response JSON:' Write-Debug -Message ($jsonBody | Out-String) Write-Verbose -Message 'Converting response from JSON.' $response = $jsonBody | ConvertFrom-Json $artifactCount = ($response.artifacts | Measure-Object).Count Write-Debug -Message ('Artifacts Count: {0}' -f $artifactCount) if ($artifactCount -eq 0) { Write-Warning -Message 'No images were returned from the model.' } else { if ($response.artifacts.finishReason -eq 'CONTENT_FILTERED') { Write-Warning -Message 'The content was filtered by the model.' Write-Warning -Message 'An image was still generated, but it may be blurred, blanked out, or in an undesired state.' } $imageCount = $artifactCount Write-Verbose -Message ('Processing {0} images returned from model.' -f $imageCount) Write-Verbose -Message 'Calculating cost estimate.' Add-ModelCostEstimate -ImageCount $imageCount -Steps $Steps -ModelID $ModelID foreach ($image in $response.artifacts) { Write-Verbose -Message ('....Processing image {0}.' -f $imageCount) try { $imageBytes = Convert-FromBase64ToByte -Base64String $image.base64 -ErrorAction Stop } catch { Write-Error $_ throw } $imageFileName = '{0}-{1}.png' -f 'stability.stable-diffusion-xl-v1', (Get-Date -Format 'yyyyMMdd-HHmmss') $imageFilePath = [System.IO.Path]::Combine($ImagesSavePath, $imageFileName) Write-Verbose -Message ('Saving image to {0}.' -f $imageFilePath) try { Save-BytesToFile -ImageBytes $imageBytes -FilePath $imageFilePath -ErrorAction Stop } catch { Write-Error $_ throw } Start-Sleep -Milliseconds 5500 #for naming uniqueness $imageCount-- } #foreach_image } if ($ReturnFullObject) { return $response } } #Invoke-StabilityAIDiffusionModel <# .EXTERNALHELP pwshBedrock-help.xml #> function Reset-ModelContext { [CmdletBinding(ConfirmImpact = 'Low', SupportsShouldProcess = $true)] [Diagnostics.CodeAnalysis.SuppressMessageAttribute('PSUseDeclaredVarsMoreThanAssignments', '', Justification = 'The purpose of this function is to reset variables, not use them')] param ( [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.', ParameterSetName = 'Single')] [ValidateSet( 'Converse', # 'ai21.j2-grande-instruct', # 'ai21.j2-jumbo-instruct', 'ai21.jamba-instruct-v1:0', # 'ai21.j2-mid-v1', # 'ai21.j2-ultra-v1', 'amazon.titan-image-generator-v1', 'amazon.titan-text-express-v1', 'amazon.titan-text-lite-v1', 'amazon.titan-text-premier-v1:0', 'amazon.titan-tg1-large', 'anthropic.claude-v2:1', 'anthropic.claude-3-haiku-20240307-v1:0', 'anthropic.claude-3-sonnet-20240229-v1:0', 'anthropic.claude-3-opus-20240229-v1:0', 'anthropic.claude-3-5-sonnet-20240620-v1:0', # 'cohere.command-text-v14', # 'cohere.command-light-text-v14', 'cohere.command-r-v1:0', 'cohere.command-r-plus-v1:0', 'meta.llama2-13b-chat-v1', 'meta.llama2-70b-chat-v1', 'meta.llama3-70b-instruct-v1:0', 'meta.llama3-8b-instruct-v1:0', 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', 'mistral.mistral-small-2402-v1:0', 'mistral.mixtral-8x7b-instruct-v0:1', 'stability.stable-diffusion-xl-v1' )] [string]$ModelID, [Parameter(Mandatory = $true, HelpMessage = 'Resets the message context for all models.', ParameterSetName = 'All')] [switch]$AllModels, [Parameter(Mandatory = $false, HelpMessage = 'Skip confirmation')] [switch]$Force ) Begin { if (-not $PSBoundParameters.ContainsKey('Verbose')) { $VerbosePreference = $PSCmdlet.SessionState.PSVariable.GetValue('VerbosePreference') } if (-not $PSBoundParameters.ContainsKey('Confirm')) { $ConfirmPreference = $PSCmdlet.SessionState.PSVariable.GetValue('ConfirmPreference') } if (-not $PSBoundParameters.ContainsKey('WhatIf')) { $WhatIfPreference = $PSCmdlet.SessionState.PSVariable.GetValue('WhatIfPreference') } Write-Verbose -Message ('[{0}] Confirm={1} ConfirmPreference={2} WhatIf={3} WhatIfPreference={4}' -f $MyInvocation.MyCommand, $Confirm, $ConfirmPreference, $WhatIf, $WhatIfPreference) Write-Verbose -Message ('ParameterSetName: {0}' -f $PSCmdlet.ParameterSetName) } #begin Process { Write-Verbose -Message 'Processing Reset-ModelContext' switch ($PSCmdlet.ParameterSetName) { 'Single' { if ($Force -or $PSCmdlet.ShouldProcess($ModelID, 'Reset-ModelContext')) { Write-Verbose -Message ('Resetting message context for {0}' -f $ModelID) $context = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $ModelID } Write-Debug -Message ($context | Out-String) if ($modelID -eq 'amazon.titan-text-express-v1' -or $modelID -eq 'amazon.titan-text-lite-v1' -or $modelID -eq 'amazon.titan-tg1-large') { $context.Context = '' } else { $context.Context = New-Object System.Collections.Generic.List[object] } } } 'All' { if ($Force -or $PSCmdlet.ShouldProcess('AllModels', 'Reset-ModelContext')) { Write-Verbose -Message 'Resetting message context for all models.' $allModelInfo = Get-ModelInfo -AllModels $allModelIDs = ($allModelInfo | Where-Object { $_.ModelId -ne 'amazon.titan-image-generator-v1' -and $_.ModelId -notlike 'ai21.j2*' -and $_.ModelId -ne 'cohere.command-text-v14' -and $_.ModelId -ne 'cohere.command-light-text-v14' }).ModelID foreach ($model in $allModelIDs) { $context = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $model } Write-Debug -Message ($context | Out-String) if ($model -eq 'amazon.titan-text-express-v1' -or $model -eq 'amazon.titan-text-lite-v1' -or $model -eq 'amazon.titan-tg1-large' -or $model -eq 'meta.llama2-13b-chat-v1' -or $model -eq 'meta.llama2-70b-chat-v1' -or $model -eq 'meta.llama3-8b-instruct-v1:0' -or $model -eq 'meta.llama3-70b-instruct-v1:0' -or $model -eq 'meta.llama3-1-8b-instruct-v1:0' -or $model -eq 'meta.llama3-1-70b-instruct-v1:0' -or $model -eq 'mistral.mistral-7b-instruct-v0:2' -or $model -eq 'mistral.mixtral-8x7b-instruct-v0:1' -or $model -eq 'mistral.mistral-large-2402-v1:0' -or $model -eq 'mistral.mistral-large-2407-v1:0' -or $model -eq 'mistral.mistral-small-2402-v1:0') { Write-Debug -Message ('Resetting message context for {0}' -f $model) $context.Context = '' Write-Debug -Message ($context | Out-String) } else { Write-Debug -Message ('Resetting message context for {0}' -f $model) $context.Context = New-Object System.Collections.Generic.List[object] Write-Debug -Message ($context | Out-String) } } # also reset Converse $context = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq 'Converse' } Write-Debug -Message ($context | Out-String) $context.Context = New-Object System.Collections.Generic.List[object] } } } } End { Write-Verbose -Message 'Reset-ModelContext complete' } } #Reset-ModelContext <# .EXTERNALHELP pwshBedrock-help.xml #> function Reset-ModelTally { [CmdletBinding(ConfirmImpact = 'Low', SupportsShouldProcess = $true)] param ( [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.', ParameterSetName = 'Single')] [ValidateSet( 'ai21.j2-grande-instruct', 'ai21.j2-jumbo-instruct', 'ai21.jamba-instruct-v1:0', 'ai21.j2-mid-v1', 'ai21.j2-ultra-v1', 'amazon.titan-image-generator-v1', 'amazon.titan-text-express-v1', 'amazon.titan-text-lite-v1', 'amazon.titan-text-premier-v1:0', 'amazon.titan-tg1-large', 'anthropic.claude-v2:1', 'anthropic.claude-3-haiku-20240307-v1:0', 'anthropic.claude-3-opus-20240229-v1:0', 'anthropic.claude-3-sonnet-20240229-v1:0', 'anthropic.claude-3-5-sonnet-20240620-v1:0', 'cohere.command-text-v14', 'cohere.command-light-text-v14', 'cohere.command-r-v1:0', 'cohere.command-r-plus-v1:0', 'meta.llama2-13b-chat-v1', 'meta.llama2-70b-chat-v1', 'meta.llama3-70b-instruct-v1:0', 'meta.llama3-8b-instruct-v1:0', 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-small-2402-v1:0', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', 'mistral.mixtral-8x7b-instruct-v0:1', 'stability.stable-diffusion-xl-v1' )] [string]$ModelID, [Parameter(Mandatory = $true, HelpMessage = 'Resets the tally for all models.', ParameterSetName = 'All')] [switch]$AllModels, [Parameter(Mandatory = $false, HelpMessage = 'Skip confirmation')] [switch]$Force ) Begin { if (-not $PSBoundParameters.ContainsKey('Verbose')) { $VerbosePreference = $PSCmdlet.SessionState.PSVariable.GetValue('VerbosePreference') } if (-not $PSBoundParameters.ContainsKey('Confirm')) { $ConfirmPreference = $PSCmdlet.SessionState.PSVariable.GetValue('ConfirmPreference') } if (-not $PSBoundParameters.ContainsKey('WhatIf')) { $WhatIfPreference = $PSCmdlet.SessionState.PSVariable.GetValue('WhatIfPreference') } Write-Verbose -Message ('[{0}] Confirm={1} ConfirmPreference={2} WhatIf={3} WhatIfPreference={4}' -f $MyInvocation.MyCommand, $Confirm, $ConfirmPreference, $WhatIf, $WhatIfPreference) Write-Verbose -Message ('ParameterSetName: {0}' -f $PSCmdlet.ParameterSetName) } #begin Process { Write-Verbose -Message 'Processing Reset-ModelTally' switch ($PSCmdlet.ParameterSetName) { 'Single' { if ($Force -or $PSCmdlet.ShouldProcess($ModelID, 'Reset-ModelTally')) { Write-Verbose -Message ('Resetting model tally for {0}' -f $ModelID) $modelTally = $Global:pwshBedRockSessionModelTally | Where-Object { $_.ModelID -eq $ModelID } $modelTally.TotalCost = 0 $modelTally.InputTokenCount = 0 $modelTally.OutputTokenCount = 0 $modelTally.InputTokenCost = 0 $modelTally.OutputTokenCost = 0 Write-Debug -Message ($modelTally | Out-String) } } 'All' { if ($Force -or $PSCmdlet.ShouldProcess('AllModels', 'Reset-ModelTally')) { Write-Verbose -Message 'Resetting all model tallies' $Global:pwshBedRockSessionCostEstimate = 0 $Global:pwshBedRockSessionModelTally | ForEach-Object { # if the object has the ImageCount property, we will reset an image object, otherwise we will reset a token object if ($null -ne $_.ImageCount) { $_.ImageCount = 0 $_.ImageCost = 0 } else { $_.TotalCost = 0 $_.InputTokenCount = 0 $_.OutputTokenCount = 0 $_.InputTokenCost = 0 $_.OutputTokenCost = 0 } } Write-Debug -Message ('Total cost estimate: {0}' -f $Global:pwshBedRockSessionCostEstimate) Write-Debug -Message ($Global:pwshBedRockSessionModelTally | Out-String) } } } } End { Write-Verbose -Message 'Reset-ModelTally complete' } } #Reset-ModelTally <# .EXTERNALHELP pwshBedrock-help.xml #> function Save-ModelContext { [CmdletBinding()] param ( [Parameter(Mandatory = $true, HelpMessage = 'The unique identifier of the model.')] [ValidateSet( # 'ai21.j2-grande-instruct', # 'ai21.j2-jumbo-instruct', 'ai21.jamba-instruct-v1:0', # 'ai21.j2-mid-v1', # 'ai21.j2-ultra-v1', 'amazon.titan-image-generator-v1', 'amazon.titan-text-express-v1', 'amazon.titan-text-lite-v1', 'amazon.titan-text-premier-v1:0', 'amazon.titan-tg1-large', 'anthropic.claude-v2:1', 'anthropic.claude-3-haiku-20240307-v1:0', 'anthropic.claude-3-sonnet-20240229-v1:0', 'anthropic.claude-3-opus-20240229-v1:0', 'anthropic.claude-3-5-sonnet-20240620-v1:0', # 'cohere.command-text-v14', # 'cohere.command-light-text-v14', 'cohere.command-r-v1:0', 'cohere.command-r-plus-v1:0', 'meta.llama2-13b-chat-v1', 'meta.llama2-70b-chat-v1', 'meta.llama3-70b-instruct-v1:0', 'meta.llama3-8b-instruct-v1:0', 'meta.llama3-1-8b-instruct-v1:0', 'meta.llama3-1-70b-instruct-v1:0', 'mistral.mistral-7b-instruct-v0:2', 'mistral.mistral-large-2402-v1:0', 'mistral.mistral-large-2407-v1:0', 'mistral.mistral-small-2402-v1:0', 'mistral.mixtral-8x7b-instruct-v0:1', 'stability.stable-diffusion-xl-v1' )] [string]$ModelID, [ValidateScript({ if (-Not ($_ | Test-Path -PathType Container)) { throw 'The Path argument must be a folder. File paths are not allowed.' } if (-Not ($_ | Test-Path)) { throw 'File or folder does not exist' } return $true })] [Parameter(Mandatory = $true, HelpMessage = 'File path to save the context to.')] [string]$FilePath ) $context = Get-ModelContext -ModelID $ModelID $exportObject = [PSCustomObject]@{ ModelID = $ModelID Context = $context } # check if null or whitespace if (-not ($null -eq $context)) { # some model ids have a colon in them, which is not allowed in file names # remove the colon and replace with a hyphen $modelIDFile = $ModelID -replace ':', '-' Write-Debug -Message ('Adjusted ModelID: {0}' -f $modelIDFile) $fileName = '{0}-{1}.xml' -f $modelIDFile, (Get-Date -Format 'yyyyMMdd-HHmmss') $outFilePath = [System.IO.Path]::Combine($FilePath, $fileName) Write-Verbose -Message ('Saving context to {0}.' -f $outFilePath) try { ConvertTo-Clixml -InputObject $exportObject | Out-File -FilePath $outFilePath -Force -ErrorAction Stop } catch { Write-Error -Message ('Failed to save context to {0}.' -f $FilePath) throw $_ } } else { Write-Warning -Message ('No context was found for {0}.' -f $ModelID) } } #Save-ModelContext <# .EXTERNALHELP pwshBedrock-help.xml #> function Set-ModelContextFromFile { [CmdletBinding(ConfirmImpact = 'Low', SupportsShouldProcess = $true)] param ( [ValidateScript({ if (-Not ($_ | Test-Path -PathType Leaf)) { throw 'The Path argument must be a file. Folder paths are not allowed.' } if (-Not ($_ | Test-Path)) { throw 'File or folder does not exist' } return $true })] [Parameter(Mandatory = $true, HelpMessage = 'File path to retrieve model context from.')] [string]$FilePath, [Parameter(Mandatory = $false, HelpMessage = 'Skip confirmation')] [switch]$Force ) Begin { if (-not $PSBoundParameters.ContainsKey('Verbose')) { $VerbosePreference = $PSCmdlet.SessionState.PSVariable.GetValue('VerbosePreference') } if (-not $PSBoundParameters.ContainsKey('Confirm')) { $ConfirmPreference = $PSCmdlet.SessionState.PSVariable.GetValue('ConfirmPreference') } if (-not $PSBoundParameters.ContainsKey('WhatIf')) { $WhatIfPreference = $PSCmdlet.SessionState.PSVariable.GetValue('WhatIfPreference') } Write-Verbose -Message ('[{0}] Confirm={1} ConfirmPreference={2} WhatIf={3} WhatIfPreference={4}' -f $MyInvocation.MyCommand, $Confirm, $ConfirmPreference, $WhatIf, $WhatIfPreference) Write-Verbose -Message ('ParameterSetName: {0}' -f $PSCmdlet.ParameterSetName) } #begin Process { Write-Verbose -Message 'Processing Set-ModelContextFromFile' Write-Verbose -Message ('Loading context from {0}' -f $FilePath) try { $rawXML = Get-Content -Path $FilePath -Raw -ErrorAction Stop } catch { Write-Error ('Error reading file {0}: {1}' -f $FilePath, $_.Exception.Message) throw $_ } if ($null -eq $rawXML) { throw ('{0} returned null content' -f $FilePath) } try { $contextObj = ConvertFrom-Clixml -String $rawXML -ErrorAction Stop } catch { Write-Error ('Error converting XML from {0}: {1}' -f $FilePath, $_.Exception.Message) throw $_ } Write-Verbose 'Validating context object' if ($null -eq $contextObj -or $null -eq $contextObj.ModelID -or $null -eq $contextObj.Context) { throw ('{0} returned a null object when converting from XML' -f $FilePath) } Write-Verbose -Message ('Validating ModelID {0} is supported' -f $contextObj.ModelID) $allModelIDs = (Get-ModelInfo -AllModels).ModelID if ($allModelIDs -notcontains $contextObj.ModelID) { throw ('ModelID {0} not found in the list of supported models' -f $contextObj.ModelID) } $context = $Global:pwshBedrockModelContext | Where-Object { $_.ModelID -eq $contextObj.ModelID } if ($Force -or $PSCmdlet.ShouldProcess($contextObj.ModelID, 'Set-ModelContextFromFile')) { Write-Verbose -Message ('Resetting message context for {0}' -f $contextObj.ModelID) $context.Context = $contextObj.Context } } End { Write-Verbose -Message 'Set-ModelContextFromFile complete' } } #Set-ModelContextFromFile |