diff --git a/.mock/definition/api.yml b/.mock/definition/api.yml index 1d878839..a7923d94 100644 --- a/.mock/definition/api.yml +++ b/.mock/definition/api.yml @@ -1,12 +1,12 @@ name: api -auth: HeaderAuthScheme -auth-schemes: - HeaderAuthScheme: - name: apiKey - header: X-Hume-Api-Key - type: optional +error-discrimination: + strategy: status-code default-environment: Production environments: - Production: https://api.hume.ai -error-discrimination: - strategy: status-code + Production: https://api.hume.ai +auth: HeaderAuthScheme +auth-schemes: + HeaderAuthScheme: + name: apiKey + header: X-Hume-Api-Key + type: optional diff --git a/.mock/definition/empathic-voice/__package__.yml b/.mock/definition/empathic-voice/__package__.yml index 17805372..cb0abcfc 100644 --- a/.mock/definition/empathic-voice/__package__.yml +++ b/.mock/definition/empathic-voice/__package__.yml @@ -1,2541 +1,2696 @@ +errors: + BadRequestError: + status-code: 400 + type: ErrorResponse + docs: Bad Request types: - ReturnUserDefinedToolToolType: - enum: - - BUILTIN - - FUNCTION - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like web - search, or `FUNCTION` for user-defined tools. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnUserDefinedToolVersionType: - enum: - - FIXED - - LATEST - docs: >- - Versioning method for a Tool. Either `FIXED` for using a fixed version - number or `LATEST` for auto-updating to the latest version. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnUserDefinedTool: - docs: A specific tool version returned from the server - properties: - tool_type: - type: ReturnUserDefinedToolToolType - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like - web search, or `FUNCTION` for user-defined tools. - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Tool. - - - Tools, as well as Configs and Prompts, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine tools and revert to previous versions if needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - version_type: - type: ReturnUserDefinedToolVersionType - docs: >- - Versioning method for a Tool. Either `FIXED` for using a fixed version - number or `LATEST` for auto-updating to the latest version. - version_description: - type: optional - docs: An optional description of the Tool version. - name: - type: string - docs: Name applied to all versions of a particular Tool. - created_on: - type: long - docs: >- - Time at which the Tool was created. Measured in seconds since the Unix - epoch. - modified_on: - type: long - docs: >- - Time at which the Tool was last modified. Measured in seconds since - the Unix epoch. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the Tool errors. - description: - type: optional - docs: >- - An optional description of what the Tool does, used by the - supplemental LLM to choose when and how to call the function. - parameters: - type: string - docs: >- - Stringified JSON defining the parameters used by this version of the - Tool. - - - These parameters define the inputs needed for the Tool’s execution, - including the expected data type and description for each input field. - Structured as a stringified JSON schema, this format ensures the tool - receives data in the expected format. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnPromptVersionType: - enum: - - FIXED - - LATEST - docs: >- - Versioning method for a Prompt. Either `FIXED` for using a fixed version - number or `LATEST` for auto-updating to the latest version. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnPrompt: - docs: A Prompt associated with this Config. - properties: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Prompt. - - - Prompts, as well as Configs and Tools, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine prompts and revert to previous versions if needed. - - - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - version_type: - type: ReturnPromptVersionType - docs: >- - Versioning method for a Prompt. Either `FIXED` for using a fixed - version number or `LATEST` for auto-updating to the latest version. - version_description: - type: optional - docs: An optional description of the Prompt version. - name: - type: string - docs: Name applied to all versions of a particular Prompt. - created_on: - type: long - docs: >- - Time at which the Prompt was created. Measured in seconds since the - Unix epoch. - modified_on: - type: long - docs: >- - Time at which the Prompt was last modified. Measured in seconds since - the Unix epoch. - text: - type: string - docs: >- - Instructions used to shape EVI’s behavior, responses, and style. - - - You can use the Prompt to define a specific goal or role for EVI, - specifying how it should act or what it should focus on during the - conversation. For example, EVI can be instructed to act as a customer - support representative, a fitness coach, or a travel advisor, each - with its own set of behaviors and response styles. - - - For help writing a system prompt, see our [Prompting - Guide](/docs/empathic-voice-interface-evi/prompting). - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedCustomVoice: - docs: A custom voice specifications posted to the server - properties: - name: - type: string - docs: >- - String with the name of the voice to use. Maximum length of 75 - characters. Will be converted to all-uppercase. - base_voice: - type: string - docs: The voice the custom voice is based off of. - speech_rate_multiplier: - type: optional - docs: The speech rate multiplier for this custom voice. - parameter_model: - type: string - docs: >- - The name of the parameter model used to define which attributes are - used by `parameters`. - parameters: - type: optional>> - docs: Voice specification for a Config. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnCustomVoice: - docs: A custom voice specification returned from the server - properties: - id: - type: string - docs: Identifier for a Custom Voice. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Custom Voice. Version numbers should be integers. - The combination of custom_voice_id and version number is unique. - name: - type: string - docs: >- - String with the name of the voice to use. Maximum length of 75 - characters. Will be converted to all-uppercase. - created_on: - type: long - docs: The timestamp when the first version of this prompt was created. - modified_on: - type: long - docs: The timestamp when this version of the prompt was created. - base_voice: - type: string - docs: The voice the custom voice is based off of. - speech_rate_multiplier: - type: optional - docs: The speech rate multiplier for this custom voice. - parameter_model: - type: string - docs: >- - The name of the parameter model used to define which attributes are - used by `parameters`. - parameters: - type: map - docs: Voice specification for a Config. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedBuiltinToolName: - enum: - - web_search - - hang_up - docs: >- - Name of the built-in tool to use. Hume supports the following built-in - tools: - - - - **web_search:** enables EVI to search the web for up-to-date information - when applicable. - - - **hang_up:** closes the WebSocket connection when appropriate (e.g., - after detecting a farewell in the conversation). - - - For more information, see our guide on [using built-in - tools](/docs/empathic-voice-interface-evi/tool-use#using-built-in-tools). - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedBuiltinTool: - docs: A configuration of a built-in tool to be posted to the server - properties: - name: - type: PostedBuiltinToolName - docs: >- - Name of the built-in tool to use. Hume supports the following built-in - tools: - - - - **web_search:** enables EVI to search the web for up-to-date - information when applicable. - - - **hang_up:** closes the WebSocket connection when appropriate (e.g., - after detecting a farewell in the conversation). - - - For more information, see our guide on [using built-in - tools](/docs/empathic-voice-interface-evi/tool-use#using-built-in-tools). - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the Tool errors. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedEllmModel: - docs: A eLLM model configuration to be posted to the server - properties: - allow_short_responses: - type: optional - docs: |- - Boolean indicating if the eLLM is allowed to generate short responses. - - If omitted, short responses from the eLLM are enabled by default. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedEventMessageSpec: - docs: Settings for a specific event_message to be posted to the server - properties: - enabled: - type: boolean - docs: >- - Boolean indicating if this event message is enabled. - - - If set to `true`, a message will be sent when the circumstances for - the specific event are met. - text: - type: optional - docs: >- - Text to use as the event message when the corresponding event occurs. - If no text is specified, EVI will generate an appropriate message - based on its current context and the system prompt. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedEventMessageSpecs: - docs: >- - Collection of event messages returned by the server. - - - Event messages are sent by the server when specific events occur during a - chat session. These messages are used to configure behaviors for EVI, such - as controlling how EVI starts a new conversation. - properties: - on_new_chat: - type: optional - docs: >- - Specifies the initial message EVI provides when a new chat is started, - such as a greeting or welcome message. - on_inactivity_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is about to be - disconnected due to a user inactivity timeout, such as a message - mentioning a lack of user input for a period of time. - - - Enabling an inactivity message allows developers to use this message - event for "checking in" with the user if they are not responding to - see if they are still active. - - - If the user does not respond in the number of seconds specified in the - `inactivity_timeout` field, then EVI will say the message and the user - has 15 seconds to respond. If they respond in time, the conversation - will continue; if not, the conversation will end. - - - However, if the inactivity message is not enabled, then reaching the - inactivity timeout will immediately end the connection. - on_max_duration_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is disconnected due - to reaching the maximum chat duration, such as a message mentioning - the time limit for the chat has been reached. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedLanguageModelModelProvider: - enum: - - OPEN_AI - - CUSTOM_LANGUAGE_MODEL - - ANTHROPIC - - FIREWORKS - - GROQ - - GOOGLE + ErrorResponse: + properties: + error: optional + message: optional + source: + openapi: stenographer-openapi.json + ReturnUserDefinedToolToolType: + enum: + - BUILTIN + - FUNCTION + docs: >- + Type of Tool. Either `BUILTIN` for natively implemented tools, like web + search, or `FUNCTION` for user-defined tools. + source: + openapi: stenographer-openapi.json + ReturnUserDefinedToolVersionType: + enum: + - FIXED + - LATEST + docs: >- + Versioning method for a Tool. Either `FIXED` for using a fixed version + number or `LATEST` for auto-updating to the latest version. + source: + openapi: stenographer-openapi.json + ReturnUserDefinedTool: + docs: A specific tool version returned from the server + properties: + tool_type: + type: ReturnUserDefinedToolToolType + docs: >- + Type of Tool. Either `BUILTIN` for natively implemented tools, like + web search, or `FUNCTION` for user-defined tools. + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Tool. + + + Tools, Configs, Custom Voices, and Prompts are versioned. This + versioning system supports iterative development, allowing you to + progressively refine tools and revert to previous versions if needed. + + + Version numbers are integer values representing different iterations + of the Tool. Each update to the Tool increments its version number. + version_type: + type: ReturnUserDefinedToolVersionType + docs: >- + Versioning method for a Tool. Either `FIXED` for using a fixed version + number or `LATEST` for auto-updating to the latest version. + version_description: + type: optional + docs: An optional description of the Tool version. + name: + type: string + docs: Name applied to all versions of a particular Tool. + created_on: + type: long + docs: >- + Time at which the Tool was created. Measured in seconds since the Unix + epoch. + modified_on: + type: long + docs: >- + Time at which the Tool was last modified. Measured in seconds since + the Unix epoch. + fallback_content: + type: optional + docs: >- + Optional text passed to the supplemental LLM in place of the tool call + result. The LLM then uses this text to generate a response back to the + user, ensuring continuity in the conversation if the Tool errors. + description: + type: optional + docs: >- + An optional description of what the Tool does, used by the + supplemental LLM to choose when and how to call the function. + parameters: + type: string + docs: >- + Stringified JSON defining the parameters used by this version of the + Tool. + + + These parameters define the inputs needed for the Tool’s execution, + including the expected data type and description for each input field. + Structured as a stringified JSON schema, this format ensures the tool + receives data in the expected format. + source: + openapi: stenographer-openapi.json + ReturnPromptVersionType: + enum: + - FIXED + - LATEST + docs: >- + Versioning method for a Prompt. Either `FIXED` for using a fixed version + number or `LATEST` for auto-updating to the latest version. + source: + openapi: stenographer-openapi.json + ReturnPrompt: + docs: A Prompt associated with this Config. + properties: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Prompt. + + + Prompts, Configs, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine prompts and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Prompt. Each update to the Prompt increments its version + number. + version_type: + type: ReturnPromptVersionType + docs: >- + Versioning method for a Prompt. Either `FIXED` for using a fixed + version number or `LATEST` for auto-updating to the latest version. + version_description: + type: optional + docs: An optional description of the Prompt version. + name: + type: string + docs: Name applied to all versions of a particular Prompt. + created_on: + type: long + docs: >- + Time at which the Prompt was created. Measured in seconds since the + Unix epoch. + modified_on: + type: long + docs: >- + Time at which the Prompt was last modified. Measured in seconds since + the Unix epoch. + text: + type: string + docs: >- + Instructions used to shape EVI’s behavior, responses, and style. + + + You can use the Prompt to define a specific goal or role for EVI, + specifying how it should act or what it should focus on during the + conversation. For example, EVI can be instructed to act as a customer + support representative, a fitness coach, or a travel advisor, each + with its own set of behaviors and response styles. + + + For help writing a system prompt, see our [Prompting + Guide](/docs/empathic-voice-interface-evi/prompting). + source: + openapi: stenographer-openapi.json + PostedCustomVoiceBaseVoice: + enum: + - ITO + - KORA + - DACHER + - AURA + - FINN + - STELLA + - WHIMSY + docs: Specifies the base voice used to create the Custom Voice. + source: + openapi: stenographer-openapi.json + PostedCustomVoiceParameters: + docs: >- + The specified attributes of a Custom Voice. + + + If no parameters are specified then all attributes will be set to their + defaults, meaning no modfications will be made to the base voice. + properties: + gender: + type: optional + docs: >- + The vocalization of gender, ranging between masculine and feminine. + + + The default value is `0`, with a minimum of `-100` (more masculine) + and a maximum of `100` (more feminine). A value of `0` leaves this + parameter unchanged from the base voice. + huskiness: + type: optional + docs: >- + The texture of the voice, ranging between bright and husky. + + + The default value is `0`, with a minimum of `-100` (brighter) and a + maximum of `100` (huskier). A value of `0` leaves this parameter + unchanged from the base voice. + nasality: + type: optional + docs: >- + The openness of the voice, ranging between resonant and nasal. + + + The default value is `0`, with a minimum of `-100` (more resonant) and + a maximum of `100` (more nasal). A value of `0` leaves this parameter + unchanged from the base voice. + pitch: + type: optional + docs: >- + The frequency of the voice, ranging between low and high. + + + The default value is `0`, with a minimum of `-100` (lower) and a + maximum of `100` (higher). A value of `0` leaves this parameter + unchanged from the base voice. + source: + openapi: stenographer-openapi.json + PostedCustomVoice: + docs: >- + A Custom Voice specification to be associated with this Config. + + + If a Custom Voice specification is not provided then the + [name](/reference/empathic-voice-interface-evi/configs/create-config#request.body.voice.name) + of a base voice or previously created Custom Voice must be provided. + + See our [Voices guide](/docs/empathic-voice-interface-evi/voices) for a tutorial on how to craft a Custom Voice. + properties: + name: + type: string + docs: >- + The name of the Custom Voice. Maximum length of 75 characters. Will be + converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE + VOICE") + base_voice: + type: PostedCustomVoiceBaseVoice + docs: Specifies the base voice used to create the Custom Voice. + parameter_model: + type: literal<"20240715-4parameter"> + docs: >- + The name of the parameter model used to define which attributes are + used by the `parameters` field. Currently, only `20240715-4parameter` + is supported as the parameter model. + parameters: + type: optional + docs: >- + The specified attributes of a Custom Voice. + + + If no parameters are specified then all attributes will be set to + their defaults, meaning no modfications will be made to the base + voice. + source: + openapi: stenographer-openapi.json + ReturnCustomVoiceBaseVoice: + enum: + - ITO + - KORA + - DACHER + - AURA + - FINN + - STELLA + - WHIMSY + docs: The base voice used to create the Custom Voice. + source: + openapi: stenographer-openapi.json + ReturnCustomVoiceParameters: + docs: >- + The specified attributes of a Custom Voice. If a parameter's value is `0` + (default), it will not be included in the response. + properties: + gender: + type: optional + docs: >- + The vocalization of gender, ranging between masculine and feminine. + + + The default value is `0`, with a minimum of `-100` (more masculine) + and a maximum of `100` (more feminine). A value of `0` leaves this + parameter unchanged from the base voice. + huskiness: + type: optional + docs: >- + The texture of the voice, ranging between bright and husky. + + + The default value is `0`, with a minimum of `-100` (brighter) and a + maximum of `100` (huskier). A value of `0` leaves this parameter + unchanged from the base voice. + nasality: + type: optional + docs: >- + The openness of the voice, ranging between resonant and nasal. + + + The default value is `0`, with a minimum of `-100` (more resonant) and + a maximum of `100` (more nasal). A value of `0` leaves this parameter + unchanged from the base voice. + pitch: + type: optional + docs: >- + The frequency of the voice, ranging between low and high. + + + The default value is `0`, with a minimum of `-100` (lower) and a + maximum of `100` (higher). A value of `0` leaves this parameter + unchanged from the base voice. + source: + openapi: stenographer-openapi.json + ReturnCustomVoice: + docs: A Custom Voice specification associated with this Config. + properties: + id: + type: string + docs: Identifier for a Custom Voice. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Custom Voice. + + + Custom Voices, Prompts, Configs, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Custom Voice. Each update to the Custom Voice increments its + version number. + name: + type: string + docs: The name of the Custom Voice. Maximum length of 75 characters. + created_on: + type: long + docs: >- + Time at which the Custom Voice was created. Measured in seconds since + the Unix epoch. + modified_on: + type: long + docs: >- + Time at which the Custom Voice was last modified. Measured in seconds + since the Unix epoch. + base_voice: + type: ReturnCustomVoiceBaseVoice + docs: The base voice used to create the Custom Voice. + parameter_model: + type: literal<"20240715-4parameter"> + docs: >- + The name of the parameter model used to define which attributes are + used by the `parameters` field. Currently, only `20240715-4parameter` + is supported as the parameter model. + parameters: + type: ReturnCustomVoiceParameters + docs: >- + The specified attributes of a Custom Voice. If a parameter's value is + `0` (default), it will not be included in the response. + source: + openapi: stenographer-openapi.json + PostedBuiltinToolName: + enum: + - web_search + - hang_up + docs: >- + Name of the built-in tool to use. Hume supports the following built-in + tools: + + + - **web_search:** enables EVI to search the web for up-to-date information + when applicable. + + - **hang_up:** closes the WebSocket connection when appropriate (e.g., + after detecting a farewell in the conversation). + + + For more information, see our guide on [using built-in + tools](/docs/empathic-voice-interface-evi/tool-use#using-built-in-tools). + source: + openapi: stenographer-openapi.json + PostedBuiltinTool: + docs: A configuration of a built-in tool to be posted to the server + properties: + name: + type: PostedBuiltinToolName + docs: >- + Name of the built-in tool to use. Hume supports the following built-in + tools: + + + - **web_search:** enables EVI to search the web for up-to-date + information when applicable. + + - **hang_up:** closes the WebSocket connection when appropriate (e.g., + after detecting a farewell in the conversation). + + + For more information, see our guide on [using built-in + tools](/docs/empathic-voice-interface-evi/tool-use#using-built-in-tools). + fallback_content: + type: optional + docs: >- + Optional text passed to the supplemental LLM in place of the tool call + result. The LLM then uses this text to generate a response back to the + user, ensuring continuity in the conversation if the Tool errors. + source: + openapi: stenographer-openapi.json + PostedConfigPromptSpec: + docs: >- + Identifies which prompt to use in a a config OR how to create a new prompt + to use in the config + properties: + id: + type: optional + docs: Identifier for a Prompt. Formatted as a UUID. + version: + type: optional + docs: >- + Version number for a Prompt. Version numbers should be integers. The + combination of configId and version number is unique. + text: + type: optional + docs: Text used to create a new prompt for a particular config. + source: + openapi: stenographer-openapi.json + PostedEllmModel: + docs: A eLLM model configuration to be posted to the server + properties: + allow_short_responses: + type: optional + docs: |- + Boolean indicating if the eLLM is allowed to generate short responses. + + If omitted, short responses from the eLLM are enabled by default. + source: + openapi: stenographer-openapi.json + PostedEventMessageSpec: + docs: Settings for a specific event_message to be posted to the server + properties: + enabled: + type: boolean + docs: >- + Boolean indicating if this event message is enabled. + + + If set to `true`, a message will be sent when the circumstances for + the specific event are met. + text: + type: optional + docs: >- + Text to use as the event message when the corresponding event occurs. + If no text is specified, EVI will generate an appropriate message + based on its current context and the system prompt. + source: + openapi: stenographer-openapi.json + PostedEventMessageSpecs: + docs: >- + Collection of event messages returned by the server. + + + Event messages are sent by the server when specific events occur during a + chat session. These messages are used to configure behaviors for EVI, such + as controlling how EVI starts a new conversation. + properties: + on_new_chat: + type: optional + docs: >- + Specifies the initial message EVI provides when a new chat is started, + such as a greeting or welcome message. + on_inactivity_timeout: + type: optional + docs: >- + Specifies the message EVI provides when the chat is about to be + disconnected due to a user inactivity timeout, such as a message + mentioning a lack of user input for a period of time. + + + Enabling an inactivity message allows developers to use this message + event for "checking in" with the user if they are not responding to + see if they are still active. + + + If the user does not respond in the number of seconds specified in the + `inactivity_timeout` field, then EVI will say the message and the user + has 15 seconds to respond. If they respond in time, the conversation + will continue; if not, the conversation will end. + + + However, if the inactivity message is not enabled, then reaching the + inactivity timeout will immediately end the connection. + on_max_duration_timeout: + type: optional + docs: >- + Specifies the message EVI provides when the chat is disconnected due + to reaching the maximum chat duration, such as a message mentioning + the time limit for the chat has been reached. + source: + openapi: stenographer-openapi.json + PostedLanguageModelModelProvider: + enum: + - OPEN_AI + - CUSTOM_LANGUAGE_MODEL + - ANTHROPIC + - FIREWORKS + - GROQ + - GOOGLE + docs: The provider of the supplemental language model. + source: + openapi: stenographer-openapi.json + PostedLanguageModel: + docs: A LanguageModel to be posted to the server + properties: + model_provider: + type: optional docs: The provider of the supplemental language model. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedLanguageModel: - docs: A LanguageModel to be posted to the server - properties: - model_provider: - type: optional - docs: The provider of the supplemental language model. - model_resource: - type: optional - docs: String that specifies the language model to use with `model_provider`. - temperature: - type: optional - docs: >- - The model temperature, with values between 0 to 1 (inclusive). - - - Controls the randomness of the LLM’s output, with values closer to 0 - yielding focused, deterministic responses and values closer to 1 - producing more creative, diverse responses. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedPromptSpec: - docs: A Prompt associated with this Config. - properties: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: optional - docs: >- - Version number for a Prompt. - - - Prompts, as well as Configs and Tools, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine prompts and revert to previous versions if needed. - - - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedTimeoutSpec: - docs: Settings for a specific timeout to be posted to the server - properties: - enabled: - type: boolean - docs: Boolean indicating if this event message is enabled. - duration_secs: - type: optional - docs: Duration in seconds for the timeout. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedTimeoutSpecsInactivity: - docs: >- - Specifies the duration of user inactivity (in seconds) after which the EVI - WebSocket connection will be automatically disconnected. Default is 600 - seconds (10 minutes). - - - Accepts a minimum value of 1 second and a maximum value of 1,800 seconds. - properties: - enabled: - type: boolean - docs: >- - Boolean indicating if this timeout is enabled. - - - If set to false, EVI will not timeout due to a specified duration of - user inactivity being reached. However, the conversation will - eventually disconnect after 1,800 seconds (30 minutes), which is the - maximum WebSocket duration limit for EVI. - duration_secs: - type: optional - docs: >- - Duration in seconds for the timeout (e.g. 600 seconds represents 10 - minutes). - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedTimeoutSpecsMaxDuration: - docs: >- - Specifies the maximum allowed duration (in seconds) for an EVI WebSocket - connection before it is automatically disconnected. Default is 1,800 - seconds (30 minutes). - - - Accepts a minimum value of 1 second and a maximum value of 1,800 seconds. - properties: - enabled: - type: boolean - docs: >- - Boolean indicating if this timeout is enabled. - - - If set to false, EVI will not timeout due to a specified maximum - duration being reached. However, the conversation will eventually - disconnect after 1,800 seconds (30 minutes), which is the maximum - WebSocket duration limit for EVI. - duration_secs: - type: optional - docs: >- - Duration in seconds for the timeout (e.g. 600 seconds represents 10 - minutes). - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedTimeoutSpecs: - docs: >- - Collection of timeout specifications returned by the server. - - - Timeouts are sent by the server when specific time-based events occur - during a chat session. These specifications set the inactivity timeout and - the maximum duration an EVI WebSocket connection can stay open before it - is automatically disconnected. - properties: - inactivity: - type: optional - docs: >- - Specifies the duration of user inactivity (in seconds) after which the - EVI WebSocket connection will be automatically disconnected. Default - is 600 seconds (10 minutes). - - - Accepts a minimum value of 1 second and a maximum value of 1,800 - seconds. - max_duration: - type: optional - docs: >- - Specifies the maximum allowed duration (in seconds) for an EVI - WebSocket connection before it is automatically disconnected. Default - is 1,800 seconds (30 minutes). - - - Accepts a minimum value of 1 second and a maximum value of 1,800 - seconds. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedUserDefinedToolSpec: - docs: A specific tool identifier to be posted to the server - properties: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: optional - docs: >- - Version number for a Tool. - - - Tools, as well as Configs and Prompts, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine tools and revert to previous versions if needed. - - - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedVoiceName: - enum: - - ITO - - DACHER - - KORA - docs: >- - String with the name of the voice to use. Maximum length of 75 characters. - Will be converted to all-uppercase. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedVoice: - docs: A Voice specification posted to the server - properties: - provider: - type: literal<"HUME_AI"> - docs: >- - The provider of the voice to use. Currently, only `HUME_AI` is - supported as the voice provider. - name: - type: optional - docs: >- - String with the name of the voice to use. Maximum length of 75 - characters. Will be converted to all-uppercase. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnBuiltinToolToolType: - enum: - - BUILTIN - - FUNCTION - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like web - search, or `FUNCTION` for user-defined tools. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnBuiltinTool: - docs: A specific builtin tool version returned from the server - properties: - tool_type: - type: ReturnBuiltinToolToolType - docs: >- - Type of Tool. Either `BUILTIN` for natively implemented tools, like - web search, or `FUNCTION` for user-defined tools. - name: - type: string - docs: Name applied to all versions of a particular Tool. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the Tool errors. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnConfig: - docs: A specific config version returned from the server - properties: - id: - type: optional - docs: Identifier for a Config. Formatted as a UUID. - version: - type: optional - docs: >- - Version number for a Config. - - - Configs, as well as Prompts and Tools, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine configurations and revert to previous versions if needed. - - - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - version_description: - type: optional - docs: An optional description of the Config version. - name: - type: optional - docs: Name applied to all versions of a particular Config. - created_on: - type: optional - docs: >- - Time at which the Config was created. Measured in seconds since the - Unix epoch. - modified_on: - type: optional - docs: >- - Time at which the Config was last modified. Measured in seconds since - the Unix epoch. - prompt: optional - voice: - type: optional - docs: A voice specification associated with this Config. - language_model: - type: optional - docs: >- - The supplemental language model associated with this Config. - - - This model is used to generate longer, more detailed responses from - EVI. Choosing an appropriate supplemental language model for your use - case is crucial for generating fast, high-quality responses from EVI. - ellm_model: - type: optional - docs: >- - The eLLM setup associated with this Config. - - - Hume's eLLM (empathic Large Language Model) is a multimodal language - model that takes into account both expression measures and language. - The eLLM generates short, empathic language responses and guides - text-to-speech (TTS) prosody. - tools: - type: optional>> - docs: List of user-defined tools associated with this Config. - builtin_tools: - type: optional>> - docs: List of built-in tools associated with this Config. - event_messages: optional - timeouts: optional - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnEllmModel: - docs: A specific eLLM Model configuration - properties: - allow_short_responses: - type: boolean - docs: |- - Boolean indicating if the eLLM is allowed to generate short responses. - - If omitted, short responses from the eLLM are enabled by default. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnEventMessageSpec: - docs: A specific event message configuration to be returned from the server - properties: - enabled: - type: boolean - docs: >- - Boolean indicating if this event message is enabled. - - - If set to `true`, a message will be sent when the circumstances for - the specific event are met. - text: - type: optional - docs: >- - Text to use as the event message when the corresponding event occurs. - If no text is specified, EVI will generate an appropriate message - based on its current context and the system prompt. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnEventMessageSpecs: - docs: >- - Collection of event messages returned by the server. - - - Event messages are sent by the server when specific events occur during a - chat session. These messages are used to configure behaviors for EVI, such - as controlling how EVI starts a new conversation. - properties: - on_new_chat: - type: optional - docs: >- - Specifies the initial message EVI provides when a new chat is started, - such as a greeting or welcome message. - on_inactivity_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is about to be - disconnected due to a user inactivity timeout, such as a message - mentioning a lack of user input for a period of time. - - - Enabling an inactivity message allows developers to use this message - event for "checking in" with the user if they are not responding to - see if they are still active. - - - If the user does not respond in the number of seconds specified in the - `inactivity_timeout` field, then EVI will say the message and the user - has 15 seconds to respond. If they respond in time, the conversation - will continue; if not, the conversation will end. - - - However, if the inactivity message is not enabled, then reaching the - inactivity timeout will immediately end the connection. - on_max_duration_timeout: - type: optional - docs: >- - Specifies the message EVI provides when the chat is disconnected due - to reaching the maximum chat duration, such as a message mentioning - the time limit for the chat has been reached. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnLanguageModelModelProvider: - enum: - - OPEN_AI - - CUSTOM_LANGUAGE_MODEL - - ANTHROPIC - - FIREWORKS - - GROQ - - GOOGLE + model_resource: + type: optional + docs: String that specifies the language model to use with `model_provider`. + temperature: + type: optional + docs: >- + The model temperature, with values between 0 to 1 (inclusive). + + + Controls the randomness of the LLM’s output, with values closer to 0 + yielding focused, deterministic responses and values closer to 1 + producing more creative, diverse responses. + source: + openapi: stenographer-openapi.json + PostedTimeoutSpec: + docs: Settings for a specific timeout to be posted to the server + properties: + enabled: + type: boolean + docs: Boolean indicating if this event message is enabled. + duration_secs: + type: optional + docs: Duration in seconds for the timeout. + source: + openapi: stenographer-openapi.json + PostedTimeoutSpecsInactivity: + docs: >- + Specifies the duration of user inactivity (in seconds) after which the EVI + WebSocket connection will be automatically disconnected. Default is 600 + seconds (10 minutes). + + + Accepts a minimum value of 1 second and a maximum value of 1,800 seconds. + properties: + enabled: + type: boolean + docs: >- + Boolean indicating if this timeout is enabled. + + + If set to false, EVI will not timeout due to a specified duration of + user inactivity being reached. However, the conversation will + eventually disconnect after 1,800 seconds (30 minutes), which is the + maximum WebSocket duration limit for EVI. + duration_secs: + type: optional + docs: >- + Duration in seconds for the timeout (e.g. 600 seconds represents 10 + minutes). + source: + openapi: stenographer-openapi.json + PostedTimeoutSpecsMaxDuration: + docs: >- + Specifies the maximum allowed duration (in seconds) for an EVI WebSocket + connection before it is automatically disconnected. Default is 1,800 + seconds (30 minutes). + + + Accepts a minimum value of 1 second and a maximum value of 1,800 seconds. + properties: + enabled: + type: boolean + docs: >- + Boolean indicating if this timeout is enabled. + + + If set to false, EVI will not timeout due to a specified maximum + duration being reached. However, the conversation will eventually + disconnect after 1,800 seconds (30 minutes), which is the maximum + WebSocket duration limit for EVI. + duration_secs: + type: optional + docs: >- + Duration in seconds for the timeout (e.g. 600 seconds represents 10 + minutes). + source: + openapi: stenographer-openapi.json + PostedTimeoutSpecs: + docs: >- + Collection of timeout specifications returned by the server. + + + Timeouts are sent by the server when specific time-based events occur + during a chat session. These specifications set the inactivity timeout and + the maximum duration an EVI WebSocket connection can stay open before it + is automatically disconnected. + properties: + inactivity: + type: optional + docs: >- + Specifies the duration of user inactivity (in seconds) after which the + EVI WebSocket connection will be automatically disconnected. Default + is 600 seconds (10 minutes). + + + Accepts a minimum value of 1 second and a maximum value of 1,800 + seconds. + max_duration: + type: optional + docs: >- + Specifies the maximum allowed duration (in seconds) for an EVI + WebSocket connection before it is automatically disconnected. Default + is 1,800 seconds (30 minutes). + + + Accepts a minimum value of 1 second and a maximum value of 1,800 + seconds. + source: + openapi: stenographer-openapi.json + PostedUserDefinedToolSpec: + docs: A specific tool identifier to be posted to the server + properties: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + version: + type: optional + docs: >- + Version number for a Tool. + + + Tools, Configs, Custom Voices, and Prompts are versioned. This + versioning system supports iterative development, allowing you to + progressively refine tools and revert to previous versions if needed. + + + Version numbers are integer values representing different iterations + of the Tool. Each update to the Tool increments its version number. + source: + openapi: stenographer-openapi.json + PostedVoice: + docs: A Voice specification posted to the server + properties: + provider: + type: literal<"HUME_AI"> + docs: >- + The provider of the voice to use. Currently, only `HUME_AI` is + supported as the voice provider. + name: + type: optional + docs: >- + Specifies the name of the voice to use. + + + This can be either the name of a previously created Custom Voice or + one of our 7 base voices: `ITO`, `KORA`, `DACHER`, `AURA`, `FINN`, + `WHIMSY`, or `STELLA`. + + + The name will be automatically converted to uppercase (e.g., "Ito" + becomes "ITO"). If a name is not specified, then a [Custom + Voice](/reference/empathic-voice-interface-evi/configs/create-config#request.body.voice.custom_voice) + specification must be provided. + custom_voice: optional + source: + openapi: stenographer-openapi.json + ReturnBuiltinToolToolType: + enum: + - BUILTIN + - FUNCTION + docs: >- + Type of Tool. Either `BUILTIN` for natively implemented tools, like web + search, or `FUNCTION` for user-defined tools. + source: + openapi: stenographer-openapi.json + ReturnBuiltinTool: + docs: A specific builtin tool version returned from the server + properties: + tool_type: + type: ReturnBuiltinToolToolType + docs: >- + Type of Tool. Either `BUILTIN` for natively implemented tools, like + web search, or `FUNCTION` for user-defined tools. + name: + type: string + docs: Name applied to all versions of a particular Tool. + fallback_content: + type: optional + docs: >- + Optional text passed to the supplemental LLM in place of the tool call + result. The LLM then uses this text to generate a response back to the + user, ensuring continuity in the conversation if the Tool errors. + source: + openapi: stenographer-openapi.json + ReturnConfig: + docs: A specific config version returned from the server + properties: + id: + type: optional + docs: Identifier for a Config. Formatted as a UUID. + version: + type: optional + docs: >- + Version number for a Config. + + + Configs, Prompts, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Config. Each update to the Config increments its version + number. + evi_version: + type: optional + docs: >- + Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` + for the latest enhanced version. For a detailed comparison of the two + versions, refer to our + [guide](/docs/empathic-voice-interface-evi/evi-2). + version_description: + type: optional + docs: An optional description of the Config version. + name: + type: optional + docs: Name applied to all versions of a particular Config. + created_on: + type: optional + docs: >- + Time at which the Config was created. Measured in seconds since the + Unix epoch. + modified_on: + type: optional + docs: >- + Time at which the Config was last modified. Measured in seconds since + the Unix epoch. + prompt: optional + voice: + type: optional + docs: A voice specification associated with this Config. + language_model: + type: optional + docs: >- + The supplemental language model associated with this Config. + + + This model is used to generate longer, more detailed responses from + EVI. Choosing an appropriate supplemental language model for your use + case is crucial for generating fast, high-quality responses from EVI. + ellm_model: + type: optional + docs: >- + The eLLM setup associated with this Config. + + + Hume's eLLM (empathic Large Language Model) is a multimodal language + model that takes into account both expression measures and language. + The eLLM generates short, empathic language responses and guides + text-to-speech (TTS) prosody. + tools: + type: optional>> + docs: List of user-defined tools associated with this Config. + builtin_tools: + type: optional>> + docs: List of built-in tools associated with this Config. + event_messages: optional + timeouts: optional + source: + openapi: stenographer-openapi.json + ReturnEllmModel: + docs: A specific eLLM Model configuration + properties: + allow_short_responses: + type: boolean + docs: |- + Boolean indicating if the eLLM is allowed to generate short responses. + + If omitted, short responses from the eLLM are enabled by default. + source: + openapi: stenographer-openapi.json + ReturnEventMessageSpec: + docs: A specific event message configuration to be returned from the server + properties: + enabled: + type: boolean + docs: >- + Boolean indicating if this event message is enabled. + + + If set to `true`, a message will be sent when the circumstances for + the specific event are met. + text: + type: optional + docs: >- + Text to use as the event message when the corresponding event occurs. + If no text is specified, EVI will generate an appropriate message + based on its current context and the system prompt. + source: + openapi: stenographer-openapi.json + ReturnEventMessageSpecs: + docs: >- + Collection of event messages returned by the server. + + + Event messages are sent by the server when specific events occur during a + chat session. These messages are used to configure behaviors for EVI, such + as controlling how EVI starts a new conversation. + properties: + on_new_chat: + type: optional + docs: >- + Specifies the initial message EVI provides when a new chat is started, + such as a greeting or welcome message. + on_inactivity_timeout: + type: optional + docs: >- + Specifies the message EVI provides when the chat is about to be + disconnected due to a user inactivity timeout, such as a message + mentioning a lack of user input for a period of time. + + + Enabling an inactivity message allows developers to use this message + event for "checking in" with the user if they are not responding to + see if they are still active. + + + If the user does not respond in the number of seconds specified in the + `inactivity_timeout` field, then EVI will say the message and the user + has 15 seconds to respond. If they respond in time, the conversation + will continue; if not, the conversation will end. + + + However, if the inactivity message is not enabled, then reaching the + inactivity timeout will immediately end the connection. + on_max_duration_timeout: + type: optional + docs: >- + Specifies the message EVI provides when the chat is disconnected due + to reaching the maximum chat duration, such as a message mentioning + the time limit for the chat has been reached. + source: + openapi: stenographer-openapi.json + ReturnLanguageModelModelProvider: + enum: + - OPEN_AI + - CUSTOM_LANGUAGE_MODEL + - ANTHROPIC + - FIREWORKS + - GROQ + - GOOGLE + docs: The provider of the supplemental language model. + source: + openapi: stenographer-openapi.json + ReturnLanguageModel: + docs: A specific LanguageModel + properties: + model_provider: + type: optional docs: The provider of the supplemental language model. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnLanguageModel: - docs: A specific LanguageModel - properties: - model_provider: - type: optional - docs: The provider of the supplemental language model. - model_resource: - type: optional - docs: String that specifies the language model to use with `model_provider`. - temperature: - type: optional - docs: >- - The model temperature, with values between 0 to 1 (inclusive). - - - Controls the randomness of the LLM’s output, with values closer to 0 - yielding focused, deterministic responses and values closer to 1 - producing more creative, diverse responses. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnTimeoutSpec: - docs: A specific timeout configuration to be returned from the server - properties: - enabled: - type: boolean - docs: >- - Boolean indicating if this timeout is enabled. - - - If set to false, EVI will not timeout due to a specified duration - being reached. However, the conversation will eventually disconnect - after 1,800 seconds (30 minutes), which is the maximum WebSocket - duration limit for EVI. - duration_secs: - type: optional - docs: >- - Duration in seconds for the timeout (e.g. 600 seconds represents 10 - minutes). - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnTimeoutSpecs: - docs: >- - Collection of timeout specifications returned by the server. - - - Timeouts are sent by the server when specific time-based events occur - during a chat session. These specifications set the inactivity timeout and - the maximum duration an EVI WebSocket connection can stay open before it - is automatically disconnected. - properties: - inactivity: - type: ReturnTimeoutSpec - docs: >- - Specifies the duration of user inactivity (in seconds) after which the - EVI WebSocket connection will be automatically disconnected. Default - is 600 seconds (10 minutes). - - - Accepts a minimum value of 1 second and a maximum value of 1,800 - seconds. - max_duration: - type: ReturnTimeoutSpec - docs: >- - Specifies the maximum allowed duration (in seconds) for an EVI - WebSocket connection before it is automatically disconnected. Default - is 1,800 seconds (30 minutes). - - - Accepts a minimum value of 1 second and a maximum value of 1,800 - seconds. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnVoiceName: - enum: - - ITO - - DACHER - - KORA - docs: >- - String with the name of the voice to use. Maximum length of 75 characters. - Will be converted to all-uppercase. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnVoice: - docs: A specific voice specification - properties: - provider: - type: literal<"HUME_AI"> - docs: >- - The provider of the voice to use. Currently, only `HUME_AI` is - supported as the voice provider. - name: - type: optional - docs: >- - String with the name of the voice to use. Maximum length of 75 - characters. Will be converted to all-uppercase. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - PostedCustomVoiceName: - docs: A custom voice name change to be posted to the server - properties: - name: - type: string - docs: >- - String with the name of the voice to use. Maximum length of 75 - characters. Will be converted to all-uppercase. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnPagedUserDefinedTools: - docs: A paginated list of user defined tool versions returned from the server - properties: - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - tools_page: - docs: >- - List of tools returned for the specified `page_number` and - `page_size`. - type: list> - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnPagedPrompts: - docs: A paginated list of prompt versions returned from the server - properties: - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - prompts_page: - docs: >- - List of prompts returned for the specified `page_number` and - `page_size`. - type: list> - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnPagedCustomVoices: - docs: A paginated list of custom voices returned from the server - properties: - page_number: - type: integer - docs: The page number of the returned results. - page_size: - type: integer - docs: The number of results returned per page. - total_pages: - type: integer - docs: The total number of pages in the collection - custom_voices_page: - docs: >- - List of custom voices returned for the specified page number and page - size. - type: list - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnPagedConfigs: - docs: A paginated list of config versions returned from the server - properties: - page_number: - type: optional - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: optional - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - configs_page: - type: optional> - docs: >- - List of configs returned for the specified `page_number` and - `page_size`. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnChatStatus: - enum: - - ACTIVE - - USER_ENDED - - USER_TIMEOUT - - MAX_DURATION_TIMEOUT - - INACTIVITY_TIMEOUT - - ERROR - docs: >- - Indicates the current state of the chat. There are six possible statuses: - - - - `ACTIVE`: The chat is currently active and ongoing. - - - - `USER_ENDED`: The chat was manually ended by the user. - - - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the maximum - allowed duration. - - - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - - - `ERROR`: The chat ended unexpectedly due to an error. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnChat: - docs: A description of chat and its status - properties: - id: - type: string - docs: Identifier for a Chat. Formatted as a UUID. - chat_group_id: - type: string - docs: >- - Identifier for the Chat Group. Any chat resumed from this Chat will - have the same `chat_group_id`. Formatted as a UUID. - status: - type: ReturnChatStatus - docs: >- - Indicates the current state of the chat. There are six possible - statuses: - - - - `ACTIVE`: The chat is currently active and ongoing. - - - - `USER_ENDED`: The chat was manually ended by the user. - - - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the - maximum allowed duration. - - - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - - - `ERROR`: The chat ended unexpectedly due to an error. - start_timestamp: - type: long - docs: >- - Time at which the Chat started. Measured in seconds since the Unix - epoch. - end_timestamp: - type: optional - docs: >- - Time at which the Chat ended. Measured in seconds since the Unix - epoch. - event_count: - type: optional - docs: The total number of events currently in this chat. - metadata: - type: optional - docs: Stringified JSON with additional metadata about the chat. - config: optional - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnConfigSpec: - docs: The Config associated with this Chat. - properties: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: optional - docs: >- - Version number for a Config. - + model_resource: + type: optional + docs: String that specifies the language model to use with `model_provider`. + temperature: + type: optional + docs: >- + The model temperature, with values between 0 to 1 (inclusive). + + + Controls the randomness of the LLM’s output, with values closer to 0 + yielding focused, deterministic responses and values closer to 1 + producing more creative, diverse responses. + source: + openapi: stenographer-openapi.json + ReturnTimeoutSpec: + docs: A specific timeout configuration to be returned from the server + properties: + enabled: + type: boolean + docs: >- + Boolean indicating if this timeout is enabled. + - Configs, as well as Prompts and Tools, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine configurations and revert to previous versions if needed. + If set to false, EVI will not timeout due to a specified duration + being reached. However, the conversation will eventually disconnect + after 1,800 seconds (30 minutes), which is the maximum WebSocket + duration limit for EVI. + duration_secs: + type: optional + docs: >- + Duration in seconds for the timeout (e.g. 600 seconds represents 10 + minutes). + source: + openapi: stenographer-openapi.json + ReturnTimeoutSpecs: + docs: >- + Collection of timeout specifications returned by the server. + + + Timeouts are sent by the server when specific time-based events occur + during a chat session. These specifications set the inactivity timeout and + the maximum duration an EVI WebSocket connection can stay open before it + is automatically disconnected. + properties: + inactivity: + type: ReturnTimeoutSpec + docs: >- + Specifies the duration of user inactivity (in seconds) after which the + EVI WebSocket connection will be automatically disconnected. Default + is 600 seconds (10 minutes). - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnPagedChatsPaginationDirection: - enum: - - ASC - - DESC + Accepts a minimum value of 1 second and a maximum value of 1,800 + seconds. + max_duration: + type: ReturnTimeoutSpec + docs: >- + Specifies the maximum allowed duration (in seconds) for an EVI + WebSocket connection before it is automatically disconnected. Default + is 1,800 seconds (30 minutes). + + + Accepts a minimum value of 1 second and a maximum value of 1,800 + seconds. + source: + openapi: stenographer-openapi.json + ReturnVoice: + docs: A specific voice specification + properties: + provider: + type: literal<"HUME_AI"> + docs: >- + The provider of the voice to use. Currently, only `HUME_AI` is + supported as the voice provider. + name: + type: optional docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. + The name of the specified voice. + + + This will either be the name of a previously created Custom Voice or + one of our 7 base voices: `ITO`, `KORA`, `DACHER`, `AURA`, `FINN`, + `WHIMSY`, or `STELLA`. + custom_voice: ReturnCustomVoice + source: + openapi: stenographer-openapi.json + ReturnPagedUserDefinedTools: + docs: A paginated list of user defined tool versions returned from the server + properties: + page_number: + type: integer + docs: >- + The page number of the returned list. - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnPagedChats: - docs: A paginated list of chats returned from the server - properties: - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - pagination_direction: - type: ReturnPagedChatsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - chats_page: - docs: >- - List of Chats and their metadata returned for the specified - `page_number` and `page_size`. - type: list - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnChatEventRole: - enum: - - USER - - AGENT - - SYSTEM - - TOOL + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + tools_page: + docs: >- + List of tools returned for the specified `page_number` and + `page_size`. + type: list> + source: + openapi: stenographer-openapi.json + ReturnPagedPrompts: + docs: A paginated list of prompt versions returned from the server + properties: + page_number: + type: integer docs: >- - The role of the entity which generated the Chat Event. There are four - possible values: + The page number of the returned list. - - `USER`: The user, capable of sending user messages and interruptions. + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. - - `AGENT`: The assistant, capable of sending agent messages. + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + prompts_page: + docs: >- + List of prompts returned for the specified `page_number` and + `page_size`. + type: list> + source: + openapi: stenographer-openapi.json + ReturnPagedCustomVoices: + docs: A paginated list of custom voices returned from the server + properties: + page_number: + type: integer + docs: >- + The page number of the returned list. + + + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. + + + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + custom_voices_page: + docs: List of Custom Voices for the specified `page_number` and `page_size`. + type: list + source: + openapi: stenographer-openapi.json + ReturnPagedConfigs: + docs: A paginated list of config versions returned from the server + properties: + page_number: + type: optional + docs: >- + The page number of the returned list. - - `SYSTEM`: The backend server, capable of transmitting errors. + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: optional + docs: >- + The maximum number of items returned per page. - - `TOOL`: The function calling mechanism. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnChatEventType: - enum: - - SYSTEM_PROMPT - - USER_MESSAGE - - USER_INTERRUPTION - - AGENT_MESSAGE - - FUNCTION_CALL - - FUNCTION_CALL_RESPONSE + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + configs_page: + type: optional> docs: >- - Type of Chat Event. There are six possible values: + List of configs returned for the specified `page_number` and + `page_size`. + source: + openapi: stenographer-openapi.json + ReturnChatStatus: + enum: + - ACTIVE + - USER_ENDED + - USER_TIMEOUT + - MAX_DURATION_TIMEOUT + - INACTIVITY_TIMEOUT + - ERROR + docs: >- + Indicates the current state of the chat. There are six possible statuses: - - `SYSTEM_PROMPT`: Contains the system prompt for use in the session. + - `ACTIVE`: The chat is currently active and ongoing. - - `USER_MESSAGE`: Contains the message sent by the user. + - `USER_ENDED`: The chat was manually ended by the user. - - `USER_INTERRUPTION`: Contains an interruption made by the user while the - agent is speaking. + - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - `AGENT_MESSAGE`: Contains the assistant’s message, generated by Hume’s - eLLM and supplemental LLM. + - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the maximum + allowed duration. - - `FUNCTION_CALL`: Contains the invocation of a tool. + - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - `FUNCTION_CALL_RESPONSE`: Contains the tool response. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnChatEvent: - docs: A description of a single event in a chat returned from the server - properties: - id: - type: string - docs: Identifier for a Chat Event. Formatted as a UUID. - chat_id: - type: string - docs: Identifier for the Chat this event occurred in. Formatted as a UUID. - timestamp: - type: long - docs: >- - Time at which the Chat Event occurred. Measured in seconds since the - Unix epoch. - role: - type: ReturnChatEventRole - docs: >- - The role of the entity which generated the Chat Event. There are four - possible values: + - `ERROR`: The chat ended unexpectedly due to an error. + source: + openapi: stenographer-openapi.json + ReturnChat: + docs: A description of chat and its status + properties: + id: + type: string + docs: Identifier for a Chat. Formatted as a UUID. + chat_group_id: + type: string + docs: >- + Identifier for the Chat Group. Any chat resumed from this Chat will + have the same `chat_group_id`. Formatted as a UUID. + status: + type: ReturnChatStatus + docs: >- + Indicates the current state of the chat. There are six possible + statuses: + + - `ACTIVE`: The chat is currently active and ongoing. - - `USER`: The user, capable of sending user messages and - interruptions. + - `USER_ENDED`: The chat was manually ended by the user. - - `AGENT`: The assistant, capable of sending agent messages. + - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - `SYSTEM`: The backend server, capable of transmitting errors. + - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the + maximum allowed duration. - - `TOOL`: The function calling mechanism. - type: - type: ReturnChatEventType - docs: >- - Type of Chat Event. There are six possible values: + - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - `SYSTEM_PROMPT`: Contains the system prompt for use in the session. + + - `ERROR`: The chat ended unexpectedly due to an error. + start_timestamp: + type: long + docs: >- + Time at which the Chat started. Measured in seconds since the Unix + epoch. + end_timestamp: + type: optional + docs: >- + Time at which the Chat ended. Measured in seconds since the Unix + epoch. + event_count: + type: optional + docs: The total number of events currently in this chat. + metadata: + type: optional + docs: Stringified JSON with additional metadata about the chat. + config: optional + source: + openapi: stenographer-openapi.json + ReturnConfigSpec: + docs: The Config associated with this Chat. + properties: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + version: + type: optional + docs: >- + Version number for a Config. + + + Configs, Prompts, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions if + needed. + + + Version numbers are integer values representing different iterations + of the Config. Each update to the Config increments its version + number. + source: + openapi: stenographer-openapi.json + ReturnPagedChatsPaginationDirection: + enum: + - ASC + - DESC + docs: >- + Indicates the order in which the paginated results are presented, based on + their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest records + first) or `DESC` for descending order (reverse-chronological, with the + newest records first). This value corresponds to the `ascending_order` + query parameter used in the request. + source: + openapi: stenographer-openapi.json + ReturnPagedChats: + docs: A paginated list of chats returned from the server + properties: + page_number: + type: integer + docs: >- + The page number of the returned list. + + + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. + + + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + pagination_direction: + type: ReturnPagedChatsPaginationDirection + docs: >- + Indicates the order in which the paginated results are presented, + based on their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest + records first) or `DESC` for descending order (reverse-chronological, + with the newest records first). This value corresponds to the + `ascending_order` query parameter used in the request. + chats_page: + docs: >- + List of Chats and their metadata returned for the specified + `page_number` and `page_size`. + type: list + source: + openapi: stenographer-openapi.json + ReturnChatEventRole: + enum: + - USER + - AGENT + - SYSTEM + - TOOL + docs: >- + The role of the entity which generated the Chat Event. There are four + possible values: - - `USER_MESSAGE`: Contains the message sent by the user. + - `USER`: The user, capable of sending user messages and interruptions. - - `USER_INTERRUPTION`: Contains an interruption made by the user while - the agent is speaking. + - `AGENT`: The assistant, capable of sending agent messages. - - `AGENT_MESSAGE`: Contains the assistant’s message, generated by - Hume’s eLLM and supplemental LLM. + - `SYSTEM`: The backend server, capable of transmitting errors. - - `FUNCTION_CALL`: Contains the invocation of a tool. + - `TOOL`: The function calling mechanism. + source: + openapi: stenographer-openapi.json + ReturnChatEventType: + enum: + - SYSTEM_PROMPT + - USER_MESSAGE + - USER_INTERRUPTION + - AGENT_MESSAGE + - FUNCTION_CALL + - FUNCTION_CALL_RESPONSE + docs: >- + Type of Chat Event. There are six possible values: - - `FUNCTION_CALL_RESPONSE`: Contains the tool response. - message_text: - type: optional - docs: >- - The text of the Chat Event. This field contains the message content - for each event type listed in the `type` field. - emotion_features: - type: optional - docs: >- - Stringified JSON containing the prosody model inference results. + - `SYSTEM_PROMPT`: Contains the system prompt for use in the session. - EVI uses the prosody model to measure 48 expressions related to speech - and vocal characteristics. These results contain a detailed emotional - and tonal analysis of the audio. Scores typically range from 0 to 1, - with higher values indicating a stronger confidence level in the - measured attribute. - metadata: - type: optional - docs: Stringified JSON with additional metadata about the chat event. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnChatPagedEventsStatus: - enum: - - ACTIVE - - USER_ENDED - - USER_TIMEOUT - - MAX_DURATION_TIMEOUT - - INACTIVITY_TIMEOUT - - ERROR + - `USER_MESSAGE`: Contains the message sent by the user. + + + - `USER_INTERRUPTION`: Contains an interruption made by the user while the + agent is speaking. + + + - `AGENT_MESSAGE`: Contains the assistant’s message, generated by Hume’s + eLLM and supplemental LLM. + + + - `FUNCTION_CALL`: Contains the invocation of a tool. + + + - `FUNCTION_CALL_RESPONSE`: Contains the tool response. + source: + openapi: stenographer-openapi.json + ReturnChatEvent: + docs: A description of a single event in a chat returned from the server + properties: + id: + type: string + docs: Identifier for a Chat Event. Formatted as a UUID. + chat_id: + type: string + docs: Identifier for the Chat this event occurred in. Formatted as a UUID. + timestamp: + type: long + docs: >- + Time at which the Chat Event occurred. Measured in seconds since the + Unix epoch. + role: + type: ReturnChatEventRole + docs: >- + The role of the entity which generated the Chat Event. There are four + possible values: + + + - `USER`: The user, capable of sending user messages and + interruptions. + + + - `AGENT`: The assistant, capable of sending agent messages. + + + - `SYSTEM`: The backend server, capable of transmitting errors. + + + - `TOOL`: The function calling mechanism. + type: + type: ReturnChatEventType docs: >- - Indicates the current state of the chat. There are six possible statuses: + Type of Chat Event. There are six possible values: - - `ACTIVE`: The chat is currently active and ongoing. + - `SYSTEM_PROMPT`: Contains the system prompt for use in the session. - - `USER_ENDED`: The chat was manually ended by the user. + - `USER_MESSAGE`: Contains the message sent by the user. - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. + - `USER_INTERRUPTION`: Contains an interruption made by the user while + the agent is speaking. - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the maximum - allowed duration. + - `AGENT_MESSAGE`: Contains the assistant’s message, generated by + Hume’s eLLM and supplemental LLM. - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. + - `FUNCTION_CALL`: Contains the invocation of a tool. - - `ERROR`: The chat ended unexpectedly due to an error. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnChatPagedEventsPaginationDirection: - enum: - - ASC - - DESC + - `FUNCTION_CALL_RESPONSE`: Contains the tool response. + message_text: + type: optional + docs: >- + The text of the Chat Event. This field contains the message content + for each event type listed in the `type` field. + emotion_features: + type: optional + docs: >- + Stringified JSON containing the prosody model inference results. + + + EVI uses the prosody model to measure 48 expressions related to speech + and vocal characteristics. These results contain a detailed emotional + and tonal analysis of the audio. Scores typically range from 0 to 1, + with higher values indicating a stronger confidence level in the + measured attribute. + metadata: + type: optional + docs: Stringified JSON with additional metadata about the chat event. + source: + openapi: stenographer-openapi.json + ReturnChatPagedEventsStatus: + enum: + - ACTIVE + - USER_ENDED + - USER_TIMEOUT + - MAX_DURATION_TIMEOUT + - INACTIVITY_TIMEOUT + - ERROR + docs: >- + Indicates the current state of the chat. There are six possible statuses: + + + - `ACTIVE`: The chat is currently active and ongoing. + + + - `USER_ENDED`: The chat was manually ended by the user. + + + - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. + + + - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the maximum + allowed duration. + + + - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. + + + - `ERROR`: The chat ended unexpectedly due to an error. + source: + openapi: stenographer-openapi.json + ReturnChatPagedEventsPaginationDirection: + enum: + - ASC + - DESC + docs: >- + Indicates the order in which the paginated results are presented, based on + their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest records + first) or `DESC` for descending order (reverse-chronological, with the + newest records first). This value corresponds to the `ascending_order` + query parameter used in the request. + source: + openapi: stenographer-openapi.json + ReturnChatPagedEvents: + docs: >- + A description of chat status with a paginated list of chat events returned + from the server + properties: + id: + type: string + docs: Identifier for a Chat. Formatted as a UUID. + chat_group_id: + type: string docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. + Identifier for the Chat Group. Any chat resumed from this Chat will + have the same `chat_group_id`. Formatted as a UUID. + status: + type: ReturnChatPagedEventsStatus + docs: >- + Indicates the current state of the chat. There are six possible + statuses: + + - `ACTIVE`: The chat is currently active and ongoing. - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnChatPagedEvents: + + - `USER_ENDED`: The chat was manually ended by the user. + + + - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. + + + - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the + maximum allowed duration. + + + - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. + + + - `ERROR`: The chat ended unexpectedly due to an error. + start_timestamp: + type: long + docs: >- + Time at which the Chat started. Measured in seconds since the Unix + epoch. + end_timestamp: + type: optional + docs: >- + Time at which the Chat ended. Measured in seconds since the Unix + epoch. + pagination_direction: + type: ReturnChatPagedEventsPaginationDirection + docs: >- + Indicates the order in which the paginated results are presented, + based on their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest + records first) or `DESC` for descending order (reverse-chronological, + with the newest records first). This value corresponds to the + `ascending_order` query parameter used in the request. + events_page: + docs: List of Chat Events for the specified `page_number` and `page_size`. + type: list + metadata: + type: optional + docs: Stringified JSON with additional metadata about the chat. + page_number: + type: integer + docs: >- + The page number of the returned list. + + + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. + + + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + config: optional + source: + openapi: stenographer-openapi.json + ReturnActiveChatCount: + docs: A description of current chat chat sessions for a user + properties: + timestamp: + type: long + docs: >- + The timestamp for when chat status was measured. Formatted as a Unix + epoch milliseconds. + total_user_active_chats: + type: integer + docs: The total number of active chats for this user. + max_allowed_active_chats: + type: optional + docs: The maximum number of concurrent active chats for this user. + more_active_chats_allowed: + type: boolean + docs: Boolean indicating if the user is allowed to start more chats. + per_tag: + type: optional>> + docs: Optional List of chat counts per tag. + source: + openapi: stenographer-openapi.json + ReturnActiveChatCountPerTag: + docs: A description of current chat chat sessions per tag + properties: + tag: + type: string + docs: User tag applied to a chat. + total_tag_active_chats: + type: integer + docs: The total number of active chats for this user with the specified tag. + source: + openapi: stenographer-openapi.json + ReturnChatGroup: + docs: A description of chat_group and its status + properties: + id: + type: string + docs: >- + Identifier for the Chat Group. Any Chat resumed from this Chat Group + will have the same `chat_group_id`. Formatted as a UUID. + first_start_timestamp: + type: long + docs: >- + Time at which the first Chat in this Chat Group was created. Measured + in seconds since the Unix epoch. + most_recent_start_timestamp: + type: long + docs: >- + Time at which the most recent Chat in this Chat Group was created. + Measured in seconds since the Unix epoch. + most_recent_chat_id: + type: optional + docs: >- + The `chat_id` of the most recent Chat in this Chat Group. Formatted as + a UUID. + num_chats: + type: integer + docs: The total number of Chats in this Chat Group. + active: optional + source: + openapi: stenographer-openapi.json + ReturnPagedChatGroupsPaginationDirection: + enum: + - ASC + - DESC + docs: >- + Indicates the order in which the paginated results are presented, based on + their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest records + first) or `DESC` for descending order (reverse-chronological, with the + newest records first). This value corresponds to the `ascending_order` + query parameter used in the request. + source: + openapi: stenographer-openapi.json + ReturnPagedChatGroups: + docs: A paginated list of chat_groups returned from the server + properties: + page_number: + type: integer + docs: >- + The page number of the returned list. + + + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. + + + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + pagination_direction: + type: ReturnPagedChatGroupsPaginationDirection + docs: >- + Indicates the order in which the paginated results are presented, + based on their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest + records first) or `DESC` for descending order (reverse-chronological, + with the newest records first). This value corresponds to the + `ascending_order` query parameter used in the request. + chat_groups_page: + docs: >- + List of Chat Groups and their metadata returned for the specified + `page_number` and `page_size`. + type: list + source: + openapi: stenographer-openapi.json + ReturnChatGroupPagedChats: + docs: >- + A description of chat_group and its status with a paginated list of each + chat in the chat_group + properties: + id: + type: string + docs: >- + Identifier for the chat group. Any chat resumed from this chat will + have the same chat_group_id. Formatted as a UUID. + first_start_timestamp: + type: long + docs: >- + The timestamp when the first chat in this chat group started, + formatted as a Unix epoch milliseconds. + most_recent_start_timestamp: + type: long + docs: >- + The timestamp when the most recent chat in this chat group started, + formatted as a Unix epoch milliseconds. + num_chats: + type: integer + docs: The total number of chats in this chat group. + page_number: + type: integer + docs: The page number of the returned results. + page_size: + type: integer + docs: The number of results returned per page. + total_pages: + type: integer + docs: The total number of pages in the collection + pagination_direction: + type: string + docs: The direction of the pagination (ASC or DESC). + chats_page: + docs: >- + List of chats and their metadata returned for the specified page + number and page size. + type: list + active: optional + source: + openapi: stenographer-openapi.json + ReturnChatGroupPagedEventsPaginationDirection: + enum: + - ASC + - DESC + docs: >- + Indicates the order in which the paginated results are presented, based on + their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest records + first) or `DESC` for descending order (reverse-chronological, with the + newest records first). This value corresponds to the `ascending_order` + query parameter used in the request. + source: + openapi: stenographer-openapi.json + ReturnChatGroupPagedEvents: + docs: >- + A paginated list of chat events that occurred across chats in this + chat_group from the server + properties: + id: + type: string + docs: >- + Identifier for the Chat Group. Any Chat resumed from this Chat Group + will have the same `chat_group_id`. Formatted as a UUID. + page_number: + type: integer + docs: >- + The page number of the returned list. + + + This value corresponds to the `page_number` parameter specified in the + request. Pagination uses zero-based indexing. + page_size: + type: integer + docs: >- + The maximum number of items returned per page. + + + This value corresponds to the `page_size` parameter specified in the + request. + total_pages: + type: integer + docs: The total number of pages in the collection. + pagination_direction: + type: ReturnChatGroupPagedEventsPaginationDirection + docs: >- + Indicates the order in which the paginated results are presented, + based on their creation date. + + + It shows `ASC` for ascending order (chronological, with the oldest + records first) or `DESC` for descending order (reverse-chronological, + with the newest records first). This value corresponds to the + `ascending_order` query parameter used in the request. + events_page: + docs: List of Chat Events for the specified `page_number` and `page_size`. + type: list + source: + openapi: stenographer-openapi.json + PostedPromptSpec: + docs: A Prompt associated with this Config. + properties: + version: optional + source: + openapi: stenographer-openapi.json + AssistantInput: + docs: When provided, the input is spoken by EVI. + properties: + type: + type: literal<"assistant_input"> + docs: >- + The type of message sent through the socket; must be `assistant_input` + for our server to correctly identify and process it as an Assistant + Input message. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + text: + type: string + docs: >- + Assistant text to synthesize into spoken audio and insert into the + conversation. + + + EVI uses this text to generate spoken audio using our proprietary + expressive text-to-speech model. Our model adds appropriate emotional + inflections and tones to the text based on the user’s expressions and + the context of the conversation. The synthesized audio is streamed + back to the user as an [Assistant + Message](/reference/empathic-voice-interface-evi/chat/chat#receive.Assistant%20Message.type). + source: + openapi: assistant-asyncapi.json + AudioConfiguration: + properties: + encoding: + type: Encoding + docs: Encoding format of the audio input, such as `linear16`. + channels: + type: integer + docs: Number of audio channels. + sample_rate: + type: integer + docs: >- + Audio sample rate. Number of samples per second in the audio input, + measured in Hertz. + source: + openapi: assistant-asyncapi.json + AudioInput: + docs: When provided, the input is audio. + properties: + type: + type: literal<"audio_input"> + docs: >- + The type of message sent through the socket; must be `audio_input` for + our server to correctly identify and process it as an Audio Input + message. + + + This message is used for sending audio input data to EVI for + processing and expression measurement. Audio data should be sent as a + continuous stream, encoded in Base64. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + data: + type: string + docs: >- + Base64 encoded audio input to insert into the conversation. + + + The content of an Audio Input message is treated as the user’s speech + to EVI and must be streamed continuously. Pre-recorded audio files are + not supported. + + + For optimal transcription quality, the audio data should be + transmitted in small chunks. + + + Hume recommends streaming audio with a buffer window of 20 + milliseconds (ms), or 100 milliseconds (ms) for web applications. + source: + openapi: assistant-asyncapi.json + BuiltInTool: + type: literal<"web_search"> + docs: >- + Name of the built-in tool. Set to `web_search` to equip EVI with the + built-in Web Search tool. + BuiltinToolConfig: + properties: + name: BuiltInTool + fallback_content: + type: optional + docs: >- + Optional text passed to the supplemental LLM if the tool call fails. + The LLM then uses this text to generate a response back to the user, + ensuring continuity in the conversation. + source: + openapi: assistant-asyncapi.json + Context: + properties: + type: + type: optional + docs: >- + The persistence level of the injected context. Specifies how long the + injected context will remain active in the session. + + + There are three possible context types: + + + - **Persistent**: The context is appended to all user messages for the + duration of the session. + + + - **Temporary**: The context is appended only to the next user + message. + + - **Editable**: The original context is updated to reflect the new context. + + If the type is not specified, it will default to `temporary`. + text: + type: string + docs: >- + The context to be injected into the conversation. Helps inform the + LLM's response by providing relevant information about the ongoing + conversation. + + + This text will be appended to the end of user messages based on the + chosen persistence level. For example, if you want to remind EVI of + its role as a helpful weather assistant, the context you insert will + be appended to the end of user messages as `{Context: You are a + helpful weather assistant}`. + source: + openapi: assistant-asyncapi.json + ContextType: + enum: + - editable + - persistent + - temporary + source: + openapi: assistant-asyncapi.json + Encoding: literal<"linear16"> + ErrorLevel: literal<"warn"> + PauseAssistantMessage: + docs: >- + Pause responses from EVI. Chat history is still saved and sent after + resuming. + properties: + type: + type: literal<"pause_assistant_message"> + docs: >- + The type of message sent through the socket; must be + `pause_assistant_message` for our server to correctly identify and + process it as a Pause Assistant message. + + + Once this message is sent, EVI will not respond until a [Resume + Assistant + message](/reference/empathic-voice-interface-evi/chat/chat#send.Resume%20Assistant%20Message.type) + is sent. When paused, EVI won’t respond, but transcriptions of your + audio inputs will still be recorded. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + source: + openapi: assistant-asyncapi.json + ResumeAssistantMessage: + docs: >- + Resume responses from EVI. Chat history sent while paused will now be + sent. + properties: + type: + type: literal<"resume_assistant_message"> + docs: >- + The type of message sent through the socket; must be + `resume_assistant_message` for our server to correctly identify and + process it as a Resume Assistant message. + + + Upon resuming, if any audio input was sent during the pause, EVI will + retain context from all messages sent but only respond to the last + user message. (e.g., If you ask EVI two questions while paused and + then send a `resume_assistant_message`, EVI will respond to the second + question and have added the first question to its conversation + context.) + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + source: + openapi: assistant-asyncapi.json + SessionSettings: + docs: Settings for this chat session. + properties: + type: + type: literal<"session_settings"> + docs: >- + The type of message sent through the socket; must be + `session_settings` for our server to correctly identify and process it + as a Session Settings message. + + + Session settings are temporary and apply only to the current Chat + session. These settings can be adjusted dynamically based on the + requirements of each session to ensure optimal performance and user + experience. + + + For more information, please refer to the [Session Settings + section](/docs/empathic-voice-interface-evi/configuration#session-settings) + on the EVI Configuration page. + custom_session_id: + type: optional + docs: >- + Unique identifier for the session. Used to manage conversational + state, correlate frontend and backend data, and persist conversations + across EVI sessions. + + + If included, the response sent from Hume to your backend will include + this ID. This allows you to correlate frontend users with their + incoming messages. + + + It is recommended to pass a `custom_session_id` if you are using a + Custom Language Model. Please see our guide to [using a custom + language + model](/docs/empathic-voice-interface-evi/custom-language-model) with + EVI to learn more. + system_prompt: + type: optional + docs: >- + Instructions used to shape EVI’s behavior, responses, and style for + the session. + + + When included in a Session Settings message, the provided Prompt + overrides the existing one specified in the EVI configuration. If no + Prompt was defined in the configuration, this Prompt will be the one + used for the session. + + + You can use the Prompt to define a specific goal or role for EVI, + specifying how it should act or what it should focus on during the + conversation. For example, EVI can be instructed to act as a customer + support representative, a fitness coach, or a travel advisor, each + with its own set of behaviors and response styles. + + + For help writing a system prompt, see our [Prompting + Guide](/docs/empathic-voice-interface-evi/prompting). + context: + type: optional + docs: >- + Allows developers to inject additional context into the conversation, + which is appended to the end of user messages for the session. + + + When included in a Session Settings message, the provided context can + be used to remind the LLM of its role in every user message, prevent + it from forgetting important details, or add new relevant information + to the conversation. + + + Set to `null` to disable context injection. + audio: + type: optional + docs: >- + Configuration details for the audio input used during the session. + Ensures the audio is being correctly set up for processing. + + + This optional field is only required when the audio input is encoded + in PCM Linear 16 (16-bit, little-endian, signed PCM WAV data). For + detailed instructions on how to configure session settings for PCM + Linear 16 audio, please refer to the [Session Settings + section](/docs/empathic-voice-interface-evi/configuration#session-settings) + on the EVI Configuration page. + language_model_api_key: + type: optional + docs: >- + Third party API key for the supplemental language model. + + + When provided, EVI will use this key instead of Hume’s API key for the + supplemental LLM. This allows you to bypass rate limits and utilize + your own API key as needed. + tools: + type: optional> + docs: >- + List of user-defined tools to enable for the session. + + + Tools are resources used by EVI to perform various tasks, such as + searching the web or calling external APIs. Built-in tools, like web + search, are natively integrated, while user-defined tools are created + and invoked by the user. To learn more, see our [Tool Use + Guide](/docs/empathic-voice-interface-evi/tool-use). + builtin_tools: + type: optional> + docs: >- + List of built-in tools to enable for the session. + + + Tools are resources used by EVI to perform various tasks, such as + searching the web or calling external APIs. Built-in tools, like web + search, are natively integrated, while user-defined tools are created + and invoked by the user. To learn more, see our [Tool Use + Guide](/docs/empathic-voice-interface-evi/tool-use). + + + Currently, the only built-in tool Hume provides is **Web Search**. + When enabled, Web Search equips EVI with the ability to search the web + for up-to-date information. + metadata: optional> + variables: + type: optional> + docs: Dynamic values that can be used to populate EVI prompts. + source: + openapi: assistant-asyncapi.json + Tool: + properties: + type: + type: ToolType + docs: Type of tool. Set to `function` for user-defined tools. + name: + type: string + docs: Name of the user-defined tool to be enabled. + parameters: + type: string + docs: >- + Parameters of the tool. Is a stringified JSON schema. + + + These parameters define the inputs needed for the tool’s execution, + including the expected data type and description for each input field. + Structured as a JSON schema, this format ensures the tool receives + data in the expected format. + description: + type: optional + docs: >- + An optional description of what the tool does, used by the + supplemental LLM to choose when and how to call the function. + fallback_content: + type: optional + docs: >- + Optional text passed to the supplemental LLM if the tool call fails. + The LLM then uses this text to generate a response back to the user, + ensuring continuity in the conversation. + source: + openapi: assistant-asyncapi.json + ToolErrorMessage: + docs: When provided, the output is a function call error. + properties: + type: + type: literal<"tool_error"> + docs: >- + The type of message sent through the socket; for a Tool Error message, + this must be `tool_error`. + + + Upon receiving a [Tool Call + message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type) + and failing to invoke the function, this message is sent to notify EVI + of the tool's failure. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + tool_type: + type: optional + docs: >- + Type of tool called. Either `builtin` for natively implemented tools, + like web search, or `function` for user-defined tools. + tool_call_id: + type: string + docs: >- + The unique identifier for a specific tool call instance. + + + This ID is used to track the request and response of a particular tool + invocation, ensuring that the Tool Error message is linked to the + appropriate tool call request. The specified `tool_call_id` must match + the one received in the [Tool Call + message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type). + content: + type: optional + docs: >- + Optional text passed to the supplemental LLM in place of the tool call + result. The LLM then uses this text to generate a response back to the + user, ensuring continuity in the conversation if the tool errors. + error: + type: string + docs: Error message from the tool call, not exposed to the LLM or user. + code: + type: optional + docs: Error code. Identifies the type of error encountered. + level: + type: optional + docs: >- + Indicates the severity of an error; for a Tool Error message, this + must be `warn` to signal an unexpected event. + source: + openapi: assistant-asyncapi.json + ToolResponseMessage: + docs: When provided, the output is a function call response. + properties: + type: + type: literal<"tool_response"> + docs: >- + The type of message sent through the socket; for a Tool Response + message, this must be `tool_response`. + + + Upon receiving a [Tool Call + message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type) + and successfully invoking the function, this message is sent to convey + the result of the function call back to EVI. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + tool_call_id: + type: string + docs: >- + The unique identifier for a specific tool call instance. + + + This ID is used to track the request and response of a particular tool + invocation, ensuring that the correct response is linked to the + appropriate request. The specified `tool_call_id` must match the one + received in the [Tool Call + message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.tool_call_id). + content: + type: string + docs: >- + Return value of the tool call. Contains the output generated by the + tool to pass back to EVI. + tool_name: + type: optional + docs: >- + Name of the tool. + + + Include this optional field to help the supplemental LLM identify + which tool generated the response. The specified `tool_name` must + match the one received in the [Tool Call + message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type). + tool_type: + type: optional + docs: >- + Type of tool called. Either `builtin` for natively implemented tools, + like web search, or `function` for user-defined tools. + source: + openapi: assistant-asyncapi.json + ToolType: + enum: + - builtin + - function + source: + openapi: assistant-asyncapi.json + UserInput: + docs: User text to insert into the conversation. + properties: + type: + type: literal<"user_input"> + docs: >- + The type of message sent through the socket; must be `user_input` for + our server to correctly identify and process it as a User Input + message. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + text: + type: string + docs: >- + User text to insert into the conversation. Text sent through a User + Input message is treated as the user’s speech to EVI. EVI processes + this input and provides a corresponding response. + + + Expression measurement results are not available for User Input + messages, as the prosody model relies on audio input and cannot + process text alone. + source: + openapi: assistant-asyncapi.json + AssistantEnd: + docs: When provided, the output is an assistant end message. + properties: + type: + type: literal<"assistant_end"> + docs: >- + The type of message sent through the socket; for an Assistant End + message, this must be `assistant_end`. + + + This message indicates the conclusion of the assistant’s response, + signaling that the assistant has finished speaking for the current + conversational turn. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + source: + openapi: assistant-asyncapi.json + AssistantMessage: + docs: When provided, the output is an assistant message. + properties: + type: + type: literal<"assistant_message"> docs: >- - A description of chat status with a paginated list of chat events returned - from the server - properties: - id: - type: string - docs: Identifier for a Chat. Formatted as a UUID. - chat_group_id: - type: string - docs: >- - Identifier for the Chat Group. Any chat resumed from this Chat will - have the same `chat_group_id`. Formatted as a UUID. - status: - type: ReturnChatPagedEventsStatus - docs: >- - Indicates the current state of the chat. There are six possible - statuses: - - - - `ACTIVE`: The chat is currently active and ongoing. - - - - `USER_ENDED`: The chat was manually ended by the user. - - - - `USER_TIMEOUT`: The chat ended due to a user-defined timeout. - - - - `MAX_DURATION_TIMEOUT`: The chat ended because it reached the - maximum allowed duration. - - - - `INACTIVITY_TIMEOUT`: The chat ended due to an inactivity timeout. - - - - `ERROR`: The chat ended unexpectedly due to an error. - start_timestamp: - type: long - docs: >- - Time at which the Chat started. Measured in seconds since the Unix - epoch. - end_timestamp: - type: optional - docs: >- - Time at which the Chat ended. Measured in seconds since the Unix - epoch. - pagination_direction: - type: ReturnChatPagedEventsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - events_page: - docs: List of Chat Events for the specified `page_number` and `page_size`. - type: list - metadata: - type: optional - docs: Stringified JSON with additional metadata about the chat. - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - config: optional - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnActiveChatCount: - docs: A description of current chat chat sessions for a user - properties: - timestamp: - type: long - docs: >- - The timestamp for when chat status was measured. Formatted as a Unix - epoch milliseconds. - total_user_active_chats: - type: integer - docs: The total number of active chats for this user. - max_allowed_active_chats: - type: optional - docs: The maximum number of concurrent active chats for this user. - more_active_chats_allowed: - type: boolean - docs: Boolean indicating if the user is allowed to start more chats. - per_tag: - type: optional>> - docs: Optional List of chat counts per tag. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnActiveChatCountPerTag: - docs: A description of current chat chat sessions per tag - properties: - tag: - type: string - docs: User tag applied to a chat. - total_tag_active_chats: - type: integer - docs: The total number of active chats for this user with the specified tag. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnChatGroup: - docs: A description of chat_group and its status - properties: - id: - type: string - docs: >- - Identifier for the Chat Group. Any Chat resumed from this Chat Group - will have the same `chat_group_id`. Formatted as a UUID. - first_start_timestamp: - type: long - docs: >- - Time at which the first Chat in this Chat Group was created. Measured - in seconds since the Unix epoch. - most_recent_start_timestamp: - type: long - docs: >- - Time at which the most recent Chat in this Chat Group was created. - Measured in seconds since the Unix epoch. - most_recent_chat_id: - type: optional - docs: >- - The `chat_id` of the most recent Chat in this Chat Group. Formatted as - a UUID. - num_chats: - type: integer - docs: The total number of Chats in this Chat Group. - active: optional - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnPagedChatGroupsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnPagedChatGroups: - docs: A paginated list of chat_groups returned from the server - properties: - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - pagination_direction: - type: ReturnPagedChatGroupsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - chat_groups_page: - docs: >- - List of Chat Groups and their metadata returned for the specified - `page_number` and `page_size`. - type: list - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnChatGroupPagedChats: - docs: >- - A description of chat_group and its status with a paginated list of each - chat in the chat_group - properties: - id: - type: string - docs: >- - Identifier for the chat group. Any chat resumed from this chat will - have the same chat_group_id. Formatted as a UUID. - first_start_timestamp: - type: long - docs: >- - The timestamp when the first chat in this chat group started, - formatted as a Unix epoch milliseconds. - most_recent_start_timestamp: - type: long - docs: >- - The timestamp when the most recent chat in this chat group started, - formatted as a Unix epoch milliseconds. - num_chats: - type: integer - docs: The total number of chats in this chat group. - page_number: - type: integer - docs: The page number of the returned results. - page_size: - type: integer - docs: The number of results returned per page. - total_pages: - type: integer - docs: The total number of pages in the collection - pagination_direction: - type: string - docs: The direction of the pagination (ASC or DESC). - chats_page: - docs: >- - List of chats and their metadata returned for the specified page - number and page size. - type: list - active: optional - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnChatGroupPagedEventsPaginationDirection: - enum: - - ASC - - DESC - docs: >- - Indicates the order in which the paginated results are presented, based on - their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest records - first) or `DESC` for descending order (reverse-chronological, with the - newest records first). This value corresponds to the `ascending_order` - query parameter used in the request. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - ReturnChatGroupPagedEvents: - docs: >- - A paginated list of chat events that occurred across chats in this - chat_group from the server - properties: - id: - type: string - docs: >- - Identifier for the Chat Group. Any Chat resumed from this Chat Group - will have the same `chat_group_id`. Formatted as a UUID. - page_number: - type: integer - docs: >- - The page number of the returned list. - - - This value corresponds to the `page_number` parameter specified in the - request. Pagination uses zero-based indexing. - page_size: - type: integer - docs: >- - The maximum number of items returned per page. - - - This value corresponds to the `page_size` parameter specified in the - request. - total_pages: - type: integer - docs: The total number of pages in the collection. - pagination_direction: - type: ReturnChatGroupPagedEventsPaginationDirection - docs: >- - Indicates the order in which the paginated results are presented, - based on their creation date. - - - It shows `ASC` for ascending order (chronological, with the oldest - records first) or `DESC` for descending order (reverse-chronological, - with the newest records first). This value corresponds to the - `ascending_order` query parameter used in the request. - events_page: - docs: List of Chat Events for the specified `page_number` and `page_size`. - type: list - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json - AssistantInput: - docs: When provided, the input is spoken by EVI. - properties: - type: - type: literal<"assistant_input"> - docs: >- - The type of message sent through the socket; must be `assistant_input` - for our server to correctly identify and process it as an Assistant - Input message. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - text: - type: string - docs: >- - Assistant text to synthesize into spoken audio and insert into the - conversation. - - - EVI uses this text to generate spoken audio using our proprietary - expressive text-to-speech model. Our model adds appropriate emotional - inflections and tones to the text based on the user’s expressions and - the context of the conversation. The synthesized audio is streamed - back to the user as an [Assistant - Message](/reference/empathic-voice-interface-evi/chat/chat#receive.Assistant%20Message.type). - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - AudioConfiguration: - properties: - encoding: - type: Encoding - docs: Encoding format of the audio input, such as `linear16`. - channels: - type: integer - docs: Number of audio channels. - sample_rate: - type: integer - docs: >- - Audio sample rate. Number of samples per second in the audio input, - measured in Hertz. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - AudioInput: - docs: When provided, the input is audio. - properties: - type: - type: literal<"audio_input"> - docs: >- - The type of message sent through the socket; must be `audio_input` for - our server to correctly identify and process it as an Audio Input - message. - - - This message is used for sending audio input data to EVI for - processing and expression measurement. Audio data should be sent as a - continuous stream, encoded in Base64. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - data: - type: string - docs: >- - Base64 encoded audio input to insert into the conversation. - - - The content of an Audio Input message is treated as the user’s speech - to EVI and must be streamed continuously. Pre-recorded audio files are - not supported. - - - For optimal transcription quality, the audio data should be - transmitted in small chunks. - - - Hume recommends streaming audio with a buffer window of 20 - milliseconds (ms), or 100 milliseconds (ms) for web applications. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - BuiltInTool: - type: literal<"web_search"> - docs: >- - Name of the built-in tool. Set to `web_search` to equip EVI with the - built-in Web Search tool. - BuiltinToolConfig: - properties: - name: BuiltInTool - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM if the tool call fails. - The LLM then uses this text to generate a response back to the user, - ensuring continuity in the conversation. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - Context: - properties: - type: - type: optional - docs: >- - The persistence level of the injected context. Specifies how long the - injected context will remain active in the session. - - - There are three possible context types: - - - - **Persistent**: The context is appended to all user messages for the - duration of the session. - - - - **Temporary**: The context is appended only to the next user - message. - - - **Editable**: The original context is updated to reflect the new context. - - If the type is not specified, it will default to `temporary`. - text: - type: string - docs: >- - The context to be injected into the conversation. Helps inform the - LLM's response by providing relevant information about the ongoing - conversation. - - - This text will be appended to the end of user messages based on the - chosen persistence level. For example, if you want to remind EVI of - its role as a helpful weather assistant, the context you insert will - be appended to the end of user messages as `{Context: You are a - helpful weather assistant}`. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - ContextType: - enum: - - editable - - persistent - - temporary - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - Encoding: literal<"linear16"> - ErrorLevel: literal<"warn"> - PauseAssistantMessage: - docs: >- - Pause responses from EVI. Chat history is still saved and sent after - resuming. - properties: - type: - type: literal<"pause_assistant_message"> - docs: >- - The type of message sent through the socket; must be - `pause_assistant_message` for our server to correctly identify and - process it as a Pause Assistant message. - - - Once this message is sent, EVI will not respond until a [Resume - Assistant - message](/reference/empathic-voice-interface-evi/chat/chat#send.Resume%20Assistant%20Message.type) - is sent. When paused, EVI won’t respond, but transcriptions of your - audio inputs will still be recorded. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - ResumeAssistantMessage: - docs: >- - Resume responses from EVI. Chat history sent while paused will now be - sent. - properties: - type: - type: literal<"resume_assistant_message"> - docs: >- - The type of message sent through the socket; must be - `resume_assistant_message` for our server to correctly identify and - process it as a Resume Assistant message. - - - Upon resuming, if any audio input was sent during the pause, EVI will - retain context from all messages sent but only respond to the last - user message. (e.g., If you ask EVI two questions while paused and - then send a `resume_assistant_message`, EVI will respond to the second - question and have added the first question to its conversation - context.) - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - SessionSettings: - docs: Settings for this chat session. - properties: - type: - type: literal<"session_settings"> - docs: >- - The type of message sent through the socket; must be - `session_settings` for our server to correctly identify and process it - as a Session Settings message. - - - Session settings are temporary and apply only to the current Chat - session. These settings can be adjusted dynamically based on the - requirements of each session to ensure optimal performance and user - experience. - - - For more information, please refer to the [Session Settings - section](/docs/empathic-voice-interface-evi/configuration#session-settings) - on the EVI Configuration page. - custom_session_id: - type: optional - docs: >- - Unique identifier for the session. Used to manage conversational - state, correlate frontend and backend data, and persist conversations - across EVI sessions. - - - If included, the response sent from Hume to your backend will include - this ID. This allows you to correlate frontend users with their - incoming messages. - - - It is recommended to pass a `custom_session_id` if you are using a - Custom Language Model. Please see our guide to [using a custom - language - model](/docs/empathic-voice-interface-evi/custom-language-model) with - EVI to learn more. - system_prompt: - type: optional - docs: >- - Instructions used to shape EVI’s behavior, responses, and style for - the session. - - - When included in a Session Settings message, the provided Prompt - overrides the existing one specified in the EVI configuration. If no - Prompt was defined in the configuration, this Prompt will be the one - used for the session. - - - You can use the Prompt to define a specific goal or role for EVI, - specifying how it should act or what it should focus on during the - conversation. For example, EVI can be instructed to act as a customer - support representative, a fitness coach, or a travel advisor, each - with its own set of behaviors and response styles. - - - For help writing a system prompt, see our [Prompting - Guide](/docs/empathic-voice-interface-evi/prompting). - context: - type: optional - docs: >- - Allows developers to inject additional context into the conversation, - which is appended to the end of user messages for the session. - - - When included in a Session Settings message, the provided context can - be used to remind the LLM of its role in every user message, prevent - it from forgetting important details, or add new relevant information - to the conversation. - - - Set to `null` to disable context injection. - audio: - type: optional - docs: >- - Configuration details for the audio input used during the session. - Ensures the audio is being correctly set up for processing. - - - This optional field is only required when the audio input is encoded - in PCM Linear 16 (16-bit, little-endian, signed PCM WAV data). For - detailed instructions on how to configure session settings for PCM - Linear 16 audio, please refer to the [Session Settings - section](/docs/empathic-voice-interface-evi/configuration#session-settings) - on the EVI Configuration page. - language_model_api_key: - type: optional - docs: >- - Third party API key for the supplemental language model. - - - When provided, EVI will use this key instead of Hume’s API key for the - supplemental LLM. This allows you to bypass rate limits and utilize - your own API key as needed. - tools: - type: optional> - docs: >- - List of user-defined tools to enable for the session. - - - Tools are resources used by EVI to perform various tasks, such as - searching the web or calling external APIs. Built-in tools, like web - search, are natively integrated, while user-defined tools are created - and invoked by the user. To learn more, see our [Tool Use - Guide](/docs/empathic-voice-interface-evi/tool-use). - builtin_tools: - type: optional> - docs: >- - List of built-in tools to enable for the session. - - - Tools are resources used by EVI to perform various tasks, such as - searching the web or calling external APIs. Built-in tools, like web - search, are natively integrated, while user-defined tools are created - and invoked by the user. To learn more, see our [Tool Use - Guide](/docs/empathic-voice-interface-evi/tool-use). - - - Currently, the only built-in tool Hume provides is **Web Search**. - When enabled, Web Search equips EVI with the ability to search the web - for up-to-date information. - metadata: optional> - variables: - type: optional> - docs: Dynamic values that can be used to populate EVI prompts. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - Tool: - properties: - type: - type: ToolType - docs: Type of tool. Set to `function` for user-defined tools. - name: - type: string - docs: Name of the user-defined tool to be enabled. - parameters: - type: string - docs: >- - Parameters of the tool. Is a stringified JSON schema. - - - These parameters define the inputs needed for the tool’s execution, - including the expected data type and description for each input field. - Structured as a JSON schema, this format ensures the tool receives - data in the expected format. - description: - type: optional - docs: >- - An optional description of what the tool does, used by the - supplemental LLM to choose when and how to call the function. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM if the tool call fails. - The LLM then uses this text to generate a response back to the user, - ensuring continuity in the conversation. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - ToolErrorMessage: - docs: When provided, the output is a function call error. - properties: - type: - type: literal<"tool_error"> - docs: >- - The type of message sent through the socket; for a Tool Error message, - this must be `tool_error`. - - - Upon receiving a [Tool Call - message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type) - and failing to invoke the function, this message is sent to notify EVI - of the tool's failure. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - tool_type: - type: optional - docs: >- - Type of tool called. Either `builtin` for natively implemented tools, - like web search, or `function` for user-defined tools. - tool_call_id: - type: string - docs: >- - The unique identifier for a specific tool call instance. - - - This ID is used to track the request and response of a particular tool - invocation, ensuring that the Tool Error message is linked to the - appropriate tool call request. The specified `tool_call_id` must match - the one received in the [Tool Call - message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type). - content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the tool call - result. The LLM then uses this text to generate a response back to the - user, ensuring continuity in the conversation if the tool errors. - error: - type: string - docs: Error message from the tool call, not exposed to the LLM or user. - code: - type: optional - docs: Error code. Identifies the type of error encountered. - level: - type: optional - docs: >- - Indicates the severity of an error; for a Tool Error message, this - must be `warn` to signal an unexpected event. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - ToolResponseMessage: - docs: When provided, the output is a function call response. - properties: - type: - type: literal<"tool_response"> - docs: >- - The type of message sent through the socket; for a Tool Response - message, this must be `tool_response`. - - - Upon receiving a [Tool Call - message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type) - and successfully invoking the function, this message is sent to convey - the result of the function call back to EVI. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - tool_call_id: - type: string - docs: >- - The unique identifier for a specific tool call instance. - - - This ID is used to track the request and response of a particular tool - invocation, ensuring that the correct response is linked to the - appropriate request. The specified `tool_call_id` must match the one - received in the [Tool Call - message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.tool_call_id). - content: - type: string - docs: >- - Return value of the tool call. Contains the output generated by the - tool to pass back to EVI. - tool_name: - type: optional - docs: >- - Name of the tool. - - - Include this optional field to help the supplemental LLM identify - which tool generated the response. The specified `tool_name` must - match the one received in the [Tool Call - message](/reference/empathic-voice-interface-evi/chat/chat#receive.Tool%20Call%20Message.type). - tool_type: - type: optional - docs: >- - Type of tool called. Either `builtin` for natively implemented tools, - like web search, or `function` for user-defined tools. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - ToolType: - enum: - - builtin - - function - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - UserInput: - docs: User text to insert into the conversation. - properties: - type: - type: literal<"user_input"> - docs: >- - The type of message sent through the socket; must be `user_input` for - our server to correctly identify and process it as a User Input - message. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - text: - type: string - docs: >- - User text to insert into the conversation. Text sent through a User - Input message is treated as the user’s speech to EVI. EVI processes - this input and provides a corresponding response. - - - Expression measurement results are not available for User Input - messages, as the prosody model relies on audio input and cannot - process text alone. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - AssistantEnd: - docs: When provided, the output is an assistant end message. - properties: - type: - type: literal<"assistant_end"> - docs: >- - The type of message sent through the socket; for an Assistant End - message, this must be `assistant_end`. - - - This message indicates the conclusion of the assistant’s response, - signaling that the assistant has finished speaking for the current - conversational turn. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - AssistantMessage: - docs: When provided, the output is an assistant message. - properties: - type: - type: literal<"assistant_message"> - docs: >- - The type of message sent through the socket; for an Assistant Message, - this must be `assistant_message`. - - - This message contains both a transcript of the assistant’s response - and the expression measurement predictions of the assistant’s audio - output. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - id: - type: optional - docs: >- - ID of the assistant message. Allows the Assistant Message to be - tracked and referenced. - message: - type: ChatMessage - docs: Transcript of the message. - models: - type: Inference - docs: Inference model results. - from_text: - type: boolean - docs: >- - Indicates if this message was inserted into the conversation as text - from an [Assistant Input - message](/reference/empathic-voice-interface-evi/chat/chat#send.Assistant%20Input.text). - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - AudioOutput: - docs: When provided, the output is audio. - properties: - type: - type: literal<"audio_output"> - docs: >- - The type of message sent through the socket; for an Audio Output - message, this must be `audio_output`. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - id: - type: string - docs: >- - ID of the audio output. Allows the Audio Output message to be tracked - and referenced. - data: - type: string - docs: >- - Base64 encoded audio output. This encoded audio is transmitted to the - client, where it can be decoded and played back as part of the user - interaction. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - ChatMessageToolResult: - discriminated: false + The type of message sent through the socket; for an Assistant Message, + this must be `assistant_message`. + + + This message contains both a transcript of the assistant’s response + and the expression measurement predictions of the assistant’s audio + output. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + id: + type: optional + docs: >- + ID of the assistant message. Allows the Assistant Message to be + tracked and referenced. + message: + type: ChatMessage + docs: Transcript of the message. + models: + type: Inference + docs: Inference model results. + from_text: + type: boolean + docs: >- + Indicates if this message was inserted into the conversation as text + from an [Assistant Input + message](/reference/empathic-voice-interface-evi/chat/chat#send.Assistant%20Input.text). + source: + openapi: assistant-asyncapi.json + AudioOutput: + docs: When provided, the output is audio. + properties: + type: + type: literal<"audio_output"> + docs: >- + The type of message sent through the socket; for an Audio Output + message, this must be `audio_output`. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + id: + type: string + docs: >- + ID of the audio output. Allows the Audio Output message to be tracked + and referenced. + data: + type: string + docs: >- + Base64 encoded audio output. This encoded audio is transmitted to the + client, where it can be decoded and played back as part of the user + interaction. + source: + openapi: assistant-asyncapi.json + ChatMessageToolResult: + discriminated: false + docs: Function call response from client. + union: + - ToolResponseMessage + - ToolErrorMessage + source: + openapi: assistant-asyncapi.json + ChatMessage: + properties: + role: + type: Role + docs: Role of who is providing the message. + content: + type: optional + docs: Transcript of the message. + tool_call: + type: optional + docs: Function call name and arguments. + tool_result: + type: optional docs: Function call response from client. - union: - - ToolResponseMessage - - ToolErrorMessage - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - ChatMessage: - properties: - role: - type: Role - docs: Role of who is providing the message. - content: - type: optional - docs: Transcript of the message. - tool_call: - type: optional - docs: Function call name and arguments. - tool_result: - type: optional - docs: Function call response from client. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - ChatMetadata: - docs: When provided, the output is a chat metadata message. - properties: - type: - type: literal<"chat_metadata"> - docs: >- - The type of message sent through the socket; for a Chat Metadata - message, this must be `chat_metadata`. - - - The Chat Metadata message is the first message you receive after - establishing a connection with EVI and contains important identifiers - for the current Chat session. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - chat_group_id: - type: string - docs: >- - ID of the Chat Group. - - - Used to resume a Chat when passed in the - [resumed_chat_group_id](/reference/empathic-voice-interface-evi/chat/chat#request.query.resumed_chat_group_id) - query parameter of a subsequent connection request. This allows EVI to - continue the conversation from where it left off within the Chat - Group. - - - Learn more about [supporting chat - resumability](/docs/empathic-voice-interface-evi/faq#does-evi-support-chat-resumability) - from the EVI FAQ. - chat_id: - type: string - docs: >- - ID of the Chat session. Allows the Chat session to be tracked and - referenced. - request_id: - type: optional - docs: ID of the initiating request. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - EmotionScores: - properties: - Admiration: double - Adoration: double - Aesthetic Appreciation: double - Amusement: double - Anger: double - Anxiety: double - Awe: double - Awkwardness: double - Boredom: double - Calmness: double - Concentration: double - Confusion: double - Contemplation: double - Contempt: double - Contentment: double - Craving: double - Desire: double - Determination: double - Disappointment: double - Disgust: double - Distress: double - Doubt: double - Ecstasy: double - Embarrassment: double - Empathic Pain: double - Entrancement: double - Envy: double - Excitement: double - Fear: double - Guilt: double - Horror: double - Interest: double - Joy: double - Love: double - Nostalgia: double - Pain: double - Pride: double - Realization: double - Relief: double - Romance: double - Sadness: double - Satisfaction: double - Shame: double - Surprise (negative): double - Surprise (positive): double - Sympathy: double - Tiredness: double - Triumph: double - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - WebSocketError: - docs: When provided, the output is an error message. - properties: - type: - type: literal<"error"> - docs: >- - The type of message sent through the socket; for a Web Socket Error - message, this must be `error`. - - - This message indicates a disruption in the WebSocket connection, such - as an unexpected disconnection, protocol error, or data transmission - issue. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - code: - type: string - docs: Error code. Identifies the type of error encountered. - slug: - type: string - docs: >- - Short, human-readable identifier and description for the error. See a - complete list of error slugs on the [Errors - page](/docs/resources/errors). - message: - type: string - docs: Detailed description of the error. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - Inference: - properties: - prosody: - type: optional - docs: >- - Prosody model inference results. - - - EVI uses the prosody model to measure 48 emotions related to speech - and vocal characteristics within a given expression. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - MillisecondInterval: - properties: - begin: - type: integer - docs: Start time of the interval in milliseconds. - end: - type: integer - docs: End time of the interval in milliseconds. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - ProsodyInference: - properties: - scores: - type: EmotionScores - docs: >- - The confidence scores for 48 emotions within the detected expression - of an audio sample. - - - Scores typically range from 0 to 1, with higher values indicating a - stronger confidence level in the measured attribute. - - - See our guide on [interpreting expression measurement - results](/docs/expression-measurement/faq#how-do-i-interpret-my-results) - to learn more. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - Role: - enum: - - assistant - - system - - user - - all - - tool - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - ToolCallMessage: - docs: When provided, the output is a tool call. - properties: - name: - type: string - docs: Name of the tool called. - parameters: - type: string - docs: >- - Parameters of the tool. - - - These parameters define the inputs needed for the tool’s execution, - including the expected data type and description for each input field. - Structured as a stringified JSON schema, this format ensures the tool - receives data in the expected format. - tool_call_id: - type: string - docs: >- - The unique identifier for a specific tool call instance. - - - This ID is used to track the request and response of a particular tool - invocation, ensuring that the correct response is linked to the - appropriate request. - type: - type: literal<"tool_call"> - docs: >- - The type of message sent through the socket; for a Tool Call message, - this must be `tool_call`. - - - This message indicates that the supplemental LLM has detected a need - to invoke the specified tool. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - tool_type: - type: optional - docs: >- - Type of tool called. Either `builtin` for natively implemented tools, - like web search, or `function` for user-defined tools. - response_required: - type: boolean - docs: >- - Indicates whether a response to the tool call is required from the - developer, either in the form of a [Tool Response - message](/reference/empathic-voice-interface-evi/chat/chat#send.Tool%20Response%20Message.type) - or a [Tool Error - message](/reference/empathic-voice-interface-evi/chat/chat#send.Tool%20Error%20Message.type). - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - UserInterruption: - docs: When provided, the output is an interruption. - properties: - type: - type: literal<"user_interruption"> - docs: >- - The type of message sent through the socket; for a User Interruption - message, this must be `user_interruption`. - - - This message indicates the user has interrupted the assistant’s - response. EVI detects the interruption in real-time and sends this - message to signal the interruption event. This message allows the - system to stop the current audio playback, clear the audio queue, and - prepare to handle new user input. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - time: - type: integer - docs: Unix timestamp of the detected user interruption. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - UserMessage: - docs: When provided, the output is a user message. - properties: - type: - type: literal<"user_message"> - docs: >- - The type of message sent through the socket; for a User Message, this - must be `user_message`. - - - This message contains both a transcript of the user’s input and the - expression measurement predictions if the input was sent as an [Audio - Input - message](/reference/empathic-voice-interface-evi/chat/chat#send.Audio%20Input.type). - Expression measurement predictions are not provided for a [User Input - message](/reference/empathic-voice-interface-evi/chat/chat#send.User%20Input.type), - as the prosody model relies on audio input and cannot process text - alone. - custom_session_id: - type: optional - docs: >- - Used to manage conversational state, correlate frontend and backend - data, and persist conversations across EVI sessions. - message: - type: ChatMessage - docs: Transcript of the message. - models: - type: Inference - docs: Inference model results. - time: - type: MillisecondInterval - docs: Start and End time of user message. - from_text: - type: boolean - docs: >- - Indicates if this message was inserted into the conversation as text - from a [User - Input](/reference/empathic-voice-interface-evi/chat/chat#send.User%20Input.text) - message. - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - JsonMessage: - discriminated: false - union: - - AssistantEnd - - AssistantMessage - - ChatMetadata - - WebSocketError - - UserInterruption - - UserMessage - - ToolCallMessage - - ToolResponseMessage - - ToolErrorMessage - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - TtsInput: - properties: - type: optional> - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - TextInput: - properties: - type: optional> - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - FunctionCallResponseInput: - properties: - type: optional> - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - ExtendedVoiceArgs: - properties: - text: string - voice_args: VoiceArgs - source: - openapi: ../empathic-voice-interface/assistant-openapi.json - HTTPValidationError: - properties: - detail: optional> - source: - openapi: ../empathic-voice-interface/assistant-openapi.json - ValidationErrorLocItem: - discriminated: false - union: - - string - - integer - source: - openapi: ../empathic-voice-interface/assistant-openapi.json - ValidationError: - properties: - loc: list - msg: string - type: string - source: - openapi: ../empathic-voice-interface/assistant-openapi.json - VoiceArgs: - properties: - voice: optional - baseline: - type: optional - default: false - reconstruct: - type: optional - default: false - source: - openapi: ../empathic-voice-interface/assistant-openapi.json - VoiceNameEnum: - enum: - - ITO - - KORA - - DACHER - source: - openapi: ../empathic-voice-interface/assistant-openapi.json + source: + openapi: assistant-asyncapi.json + ChatMetadata: + docs: When provided, the output is a chat metadata message. + properties: + type: + type: literal<"chat_metadata"> + docs: >- + The type of message sent through the socket; for a Chat Metadata + message, this must be `chat_metadata`. + + + The Chat Metadata message is the first message you receive after + establishing a connection with EVI and contains important identifiers + for the current Chat session. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + chat_group_id: + type: string + docs: >- + ID of the Chat Group. + + + Used to resume a Chat when passed in the + [resumed_chat_group_id](/reference/empathic-voice-interface-evi/chat/chat#request.query.resumed_chat_group_id) + query parameter of a subsequent connection request. This allows EVI to + continue the conversation from where it left off within the Chat + Group. + + + Learn more about [supporting chat + resumability](/docs/empathic-voice-interface-evi/faq#does-evi-support-chat-resumability) + from the EVI FAQ. + chat_id: + type: string + docs: >- + ID of the Chat session. Allows the Chat session to be tracked and + referenced. + request_id: + type: optional + docs: ID of the initiating request. + source: + openapi: assistant-asyncapi.json + EmotionScores: + properties: + Admiration: double + Adoration: double + Aesthetic Appreciation: double + Amusement: double + Anger: double + Anxiety: double + Awe: double + Awkwardness: double + Boredom: double + Calmness: double + Concentration: double + Confusion: double + Contemplation: double + Contempt: double + Contentment: double + Craving: double + Desire: double + Determination: double + Disappointment: double + Disgust: double + Distress: double + Doubt: double + Ecstasy: double + Embarrassment: double + Empathic Pain: double + Entrancement: double + Envy: double + Excitement: double + Fear: double + Guilt: double + Horror: double + Interest: double + Joy: double + Love: double + Nostalgia: double + Pain: double + Pride: double + Realization: double + Relief: double + Romance: double + Sadness: double + Satisfaction: double + Shame: double + Surprise (negative): double + Surprise (positive): double + Sympathy: double + Tiredness: double + Triumph: double + source: + openapi: assistant-asyncapi.json + WebSocketError: + docs: When provided, the output is an error message. + properties: + type: + type: literal<"error"> + docs: >- + The type of message sent through the socket; for a Web Socket Error + message, this must be `error`. + + + This message indicates a disruption in the WebSocket connection, such + as an unexpected disconnection, protocol error, or data transmission + issue. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + code: + type: string + docs: Error code. Identifies the type of error encountered. + slug: + type: string + docs: >- + Short, human-readable identifier and description for the error. See a + complete list of error slugs on the [Errors + page](/docs/resources/errors). + message: + type: string + docs: Detailed description of the error. + source: + openapi: assistant-asyncapi.json + Inference: + properties: + prosody: + type: optional + docs: >- + Prosody model inference results. + + + EVI uses the prosody model to measure 48 emotions related to speech + and vocal characteristics within a given expression. + source: + openapi: assistant-asyncapi.json + MillisecondInterval: + properties: + begin: + type: integer + docs: Start time of the interval in milliseconds. + end: + type: integer + docs: End time of the interval in milliseconds. + source: + openapi: assistant-asyncapi.json + ProsodyInference: + properties: + scores: + type: EmotionScores + docs: >- + The confidence scores for 48 emotions within the detected expression + of an audio sample. + + + Scores typically range from 0 to 1, with higher values indicating a + stronger confidence level in the measured attribute. + + + See our guide on [interpreting expression measurement + results](/docs/expression-measurement/faq#how-do-i-interpret-my-results) + to learn more. + source: + openapi: assistant-asyncapi.json + Role: + enum: + - assistant + - system + - user + - all + - tool + source: + openapi: assistant-asyncapi.json + ToolCallMessage: + docs: When provided, the output is a tool call. + properties: + name: + type: string + docs: Name of the tool called. + parameters: + type: string + docs: >- + Parameters of the tool. + + + These parameters define the inputs needed for the tool’s execution, + including the expected data type and description for each input field. + Structured as a stringified JSON schema, this format ensures the tool + receives data in the expected format. + tool_call_id: + type: string + docs: >- + The unique identifier for a specific tool call instance. + + + This ID is used to track the request and response of a particular tool + invocation, ensuring that the correct response is linked to the + appropriate request. + type: + type: literal<"tool_call"> + docs: >- + The type of message sent through the socket; for a Tool Call message, + this must be `tool_call`. + + + This message indicates that the supplemental LLM has detected a need + to invoke the specified tool. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + tool_type: + type: optional + docs: >- + Type of tool called. Either `builtin` for natively implemented tools, + like web search, or `function` for user-defined tools. + response_required: + type: boolean + docs: >- + Indicates whether a response to the tool call is required from the + developer, either in the form of a [Tool Response + message](/reference/empathic-voice-interface-evi/chat/chat#send.Tool%20Response%20Message.type) + or a [Tool Error + message](/reference/empathic-voice-interface-evi/chat/chat#send.Tool%20Error%20Message.type). + source: + openapi: assistant-asyncapi.json + UserInterruption: + docs: When provided, the output is an interruption. + properties: + type: + type: literal<"user_interruption"> + docs: >- + The type of message sent through the socket; for a User Interruption + message, this must be `user_interruption`. + + + This message indicates the user has interrupted the assistant’s + response. EVI detects the interruption in real-time and sends this + message to signal the interruption event. This message allows the + system to stop the current audio playback, clear the audio queue, and + prepare to handle new user input. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + time: + type: integer + docs: Unix timestamp of the detected user interruption. + source: + openapi: assistant-asyncapi.json + UserMessage: + docs: When provided, the output is a user message. + properties: + type: + type: literal<"user_message"> + docs: >- + The type of message sent through the socket; for a User Message, this + must be `user_message`. + + + This message contains both a transcript of the user’s input and the + expression measurement predictions if the input was sent as an [Audio + Input + message](/reference/empathic-voice-interface-evi/chat/chat#send.Audio%20Input.type). + Expression measurement predictions are not provided for a [User Input + message](/reference/empathic-voice-interface-evi/chat/chat#send.User%20Input.type), + as the prosody model relies on audio input and cannot process text + alone. + custom_session_id: + type: optional + docs: >- + Used to manage conversational state, correlate frontend and backend + data, and persist conversations across EVI sessions. + message: + type: ChatMessage + docs: Transcript of the message. + models: + type: Inference + docs: Inference model results. + time: + type: MillisecondInterval + docs: Start and End time of user message. + from_text: + type: boolean + docs: >- + Indicates if this message was inserted into the conversation as text + from a [User + Input](/reference/empathic-voice-interface-evi/chat/chat#send.User%20Input.text) + message. + source: + openapi: assistant-asyncapi.json + JsonMessage: + discriminated: false + union: + - AssistantEnd + - AssistantMessage + - ChatMetadata + - WebSocketError + - UserInterruption + - UserMessage + - ToolCallMessage + - ToolResponseMessage + - ToolErrorMessage + source: + openapi: assistant-asyncapi.json + TtsInput: + properties: + type: optional> + source: + openapi: assistant-asyncapi.json + TextInput: + properties: + type: optional> + source: + openapi: assistant-asyncapi.json + FunctionCallResponseInput: + properties: + type: optional> + source: + openapi: assistant-asyncapi.json + ExtendedVoiceArgs: + properties: + text: string + voice_args: VoiceArgs + source: + openapi: assistant-openapi.json + HTTPValidationError: + properties: + detail: optional> + source: + openapi: assistant-openapi.json + ValidationErrorLocItem: + discriminated: false + union: + - string + - integer + source: + openapi: assistant-openapi.json + ValidationError: + properties: + loc: list + msg: string + type: string + source: + openapi: assistant-openapi.json + VoiceArgs: + properties: + voice: optional + baseline: + type: optional + default: false + reconstruct: + type: optional + default: false + source: + openapi: assistant-openapi.json + VoiceNameEnum: + enum: + - ITO + - KORA + - DACHER + source: + openapi: assistant-openapi.json diff --git a/.mock/definition/empathic-voice/chat-groups.yml b/.mock/definition/empathic-voice/chat-groups.yml deleted file mode 100644 index 61282431..00000000 --- a/.mock/definition/empathic-voice/chat-groups.yml +++ /dev/null @@ -1,444 +0,0 @@ -imports: - root: __package__.yml -service: - auth: false - base-path: "" - endpoints: - list-chat-groups: - path: /v0/evi/chat_groups - method: GET - auth: true - display-name: List chat_groups - request: - name: ChatGroupsListChatGroupsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - config_id: - type: optional - docs: >- - The unique identifier for an EVI configuration. - - - Filter Chat Groups to only include Chats that used this - `config_id` in their most recent Chat. - response: - docs: Success - type: root.ReturnPagedChatGroups - examples: - - query-parameters: - page_number: 0 - page_size: 1 - ascending_order: true - config_id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - response: - body: - page_number: 0 - page_size: 1 - total_pages: 1 - pagination_direction: ASC - chat_groups_page: - - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - first_start_timestamp: 1721844196397 - most_recent_start_timestamp: 1721861821717 - active: false - most_recent_chat_id: dfdbdd4d-0ddf-418b-8fc4-80a266579d36 - num_chats: 5 - list-chat-group-events: - path: /v0/evi/chat_groups/{id}/events - method: GET - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Chat Group. Formatted as a UUID. - display-name: List chat events from a specific chat_group - request: - name: ChatGroupsListChatGroupEventsRequest - query-parameters: - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. - - - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. - - - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnChatGroupPagedEvents - examples: - - path-parameters: - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - query-parameters: - page_number: 0 - page_size: 3 - ascending_order: true - response: - body: - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f - page_number: 0 - page_size: 3 - total_pages: 1 - pagination_direction: ASC - events_page: - - id: 5d44bdbb-49a3-40fb-871d-32bf7e76efe7 - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244940762 - role: SYSTEM - type: SYSTEM_PROMPT - message_text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - emotion_features: "" - metadata: "" - - id: 5976ddf6-d093-4bb9-ba60-8f6c25832dde - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244956278 - role: USER - type: USER_MESSAGE - message_text: Hello. - emotion_features: >- - {"Admiration": 0.09906005859375, "Adoration": - 0.12213134765625, "Aesthetic Appreciation": - 0.05035400390625, "Amusement": 0.16552734375, "Anger": - 0.0037384033203125, "Anxiety": 0.010101318359375, "Awe": - 0.058197021484375, "Awkwardness": 0.10552978515625, - "Boredom": 0.1141357421875, "Calmness": 0.115234375, - "Concentration": 0.00444793701171875, "Confusion": - 0.0343017578125, "Contemplation": 0.00812530517578125, - "Contempt": 0.009002685546875, "Contentment": - 0.087158203125, "Craving": 0.00818634033203125, "Desire": - 0.018310546875, "Determination": 0.003238677978515625, - "Disappointment": 0.024169921875, "Disgust": - 0.00702667236328125, "Distress": 0.00936126708984375, - "Doubt": 0.00632476806640625, "Ecstasy": 0.0293731689453125, - "Embarrassment": 0.01800537109375, "Empathic Pain": - 0.0088348388671875, "Entrancement": 0.013397216796875, - "Envy": 0.02557373046875, "Excitement": 0.12109375, "Fear": - 0.004413604736328125, "Guilt": 0.016571044921875, "Horror": - 0.00274658203125, "Interest": 0.2142333984375, "Joy": - 0.29638671875, "Love": 0.16015625, "Nostalgia": - 0.007843017578125, "Pain": 0.007160186767578125, "Pride": - 0.00508880615234375, "Realization": 0.054229736328125, - "Relief": 0.048736572265625, "Romance": 0.026397705078125, - "Sadness": 0.0265350341796875, "Satisfaction": - 0.051361083984375, "Shame": 0.00974273681640625, "Surprise - (negative)": 0.0218963623046875, "Surprise (positive)": - 0.216064453125, "Sympathy": 0.021728515625, "Tiredness": - 0.0173797607421875, "Triumph": 0.004520416259765625} - metadata: >- - {"segments": [{"content": "Hello.", "embedding": - [0.6181640625, 0.1763916015625, -30.921875, 1.2705078125, - 0.927734375, 0.63720703125, 2.865234375, 0.1080322265625, - 0.2978515625, 1.0107421875, 1.34375, 0.74560546875, - 0.416259765625, 0.99462890625, -0.333740234375, - 0.361083984375, -1.388671875, 1.0107421875, 1.3173828125, - 0.55615234375, 0.541015625, -0.1837158203125, 1.697265625, - 0.228515625, 2.087890625, -0.311767578125, - 0.053680419921875, 1.3349609375, 0.95068359375, - 0.00441741943359375, 0.705078125, 1.8916015625, - -0.939453125, 0.93701171875, -0.28955078125, 1.513671875, - 0.5595703125, 1.0126953125, -0.1624755859375, 1.4072265625, - -0.28857421875, -0.4560546875, -0.1500244140625, - -0.1102294921875, -0.222412109375, 0.8779296875, - 1.275390625, 1.6689453125, 0.80712890625, -0.34814453125, - -0.325439453125, 0.412841796875, 0.81689453125, - 0.55126953125, 1.671875, 0.6611328125, 0.7451171875, - 1.50390625, 1.0224609375, -1.671875, 0.7373046875, - 2.1328125, 2.166015625, 0.41015625, -0.127685546875, - 1.9345703125, -4.2734375, 0.332275390625, 0.26171875, - 0.76708984375, 0.2685546875, 0.468017578125, 1.208984375, - -1.517578125, 1.083984375, 0.84814453125, 1.0244140625, - -0.0072174072265625, 1.34375, 1.0712890625, 1.517578125, - -0.52001953125, 0.59228515625, 0.8154296875, -0.951171875, - -0.07757568359375, 1.3330078125, 1.125, 0.61181640625, - 1.494140625, 0.357421875, 1.1796875, 1.482421875, 0.8046875, - 0.1536865234375, 1.8076171875, 0.68115234375, -15.171875, - 1.2294921875, 0.319091796875, 0.499755859375, 1.5771484375, - 0.94677734375, -0.2490234375, 0.88525390625, 3.47265625, - 0.75927734375, 0.71044921875, 1.2333984375, 1.4169921875, - -0.56640625, -1.8095703125, 1.37109375, 0.428955078125, - 1.89453125, -0.39013671875, 0.1734619140625, 1.5595703125, - -1.2294921875, 2.552734375, 0.58349609375, 0.2156982421875, - -0.00984954833984375, -0.6865234375, -0.0272979736328125, - -0.2264404296875, 2.853515625, 1.3896484375, 0.52978515625, - 0.783203125, 3.0390625, 0.75537109375, 0.219970703125, - 0.384521484375, 0.385986328125, 2.0546875, - -0.10443115234375, 1.5146484375, 1.4296875, 1.9716796875, - 1.1318359375, 0.31591796875, 0.338623046875, 1.654296875, - -0.88037109375, -0.21484375, 1.45703125, 1.0380859375, - -0.52294921875, -0.47802734375, 0.1650390625, 1.2392578125, - -1.138671875, 0.56787109375, 1.318359375, 0.4287109375, - 0.1981201171875, 2.4375, 0.281005859375, 0.89404296875, - -0.1552734375, 0.6474609375, -0.08331298828125, - 0.00740814208984375, -0.045501708984375, -0.578125, - 2.02734375, 0.59228515625, 0.35693359375, 1.2919921875, - 1.22265625, 1.0537109375, 0.145263671875, 1.05859375, - -0.369140625, 0.207275390625, 0.78857421875, 0.599609375, - 0.99072265625, 0.24462890625, 1.26953125, 0.08404541015625, - 1.349609375, 0.73291015625, 1.3212890625, 0.388916015625, - 1.0869140625, 0.9931640625, -1.5673828125, 0.0462646484375, - 0.650390625, 0.253662109375, 0.58251953125, 1.8134765625, - 0.8642578125, 2.591796875, 0.7314453125, 0.85986328125, - 0.5615234375, 0.9296875, 0.04144287109375, 1.66015625, - 1.99609375, 1.171875, 1.181640625, 1.5126953125, - 0.0224456787109375, 0.58349609375, -1.4931640625, - 0.81884765625, 0.732421875, -0.6455078125, -0.62451171875, - 1.7802734375, 0.01526641845703125, -0.423095703125, - 0.461669921875, 4.87890625, 1.2392578125, -0.6953125, - 0.6689453125, 0.62451171875, -1.521484375, 1.7685546875, - 0.810546875, 0.65478515625, 0.26123046875, 1.6396484375, - 0.87548828125, 1.7353515625, 2.046875, 1.5634765625, - 0.69384765625, 1.375, 0.8916015625, 1.0107421875, - 0.1304931640625, 2.009765625, 0.06402587890625, - -0.08428955078125, 0.04351806640625, -1.7529296875, - 2.02734375, 3.521484375, 0.404541015625, 1.6337890625, - -0.276611328125, 0.8837890625, -0.1287841796875, - 0.91064453125, 0.8193359375, 0.701171875, 0.036529541015625, - 1.26171875, 1.0478515625, -0.1422119140625, 1.0634765625, - 0.61083984375, 1.3505859375, 1.208984375, 0.57275390625, - 1.3623046875, 2.267578125, 0.484375, 0.9150390625, - 0.56787109375, -0.70068359375, 0.27587890625, - -0.70654296875, 0.8466796875, 0.57568359375, 1.6162109375, - 0.87939453125, 2.248046875, -0.5458984375, 1.7744140625, - 1.328125, 1.232421875, 0.6806640625, 0.9365234375, - 1.052734375, -1.08984375, 1.8330078125, -0.4033203125, - 1.0673828125, 0.297607421875, 1.5703125, 1.67578125, - 1.34765625, 2.8203125, 2.025390625, -0.48583984375, - 0.7626953125, 0.01007843017578125, 1.435546875, - 0.007205963134765625, 0.05157470703125, -0.9853515625, - 0.26708984375, 1.16796875, 1.2041015625, 1.99609375, - -0.07916259765625, 1.244140625, -0.32080078125, - 0.6748046875, 0.419921875, 1.3212890625, 1.291015625, - 0.599609375, 0.0550537109375, 0.9599609375, 0.93505859375, - 0.111083984375, 1.302734375, 0.0833740234375, 2.244140625, - 1.25390625, 1.6015625, 0.58349609375, 1.7568359375, - -0.263427734375, -0.019866943359375, -0.24658203125, - -0.1871337890625, 0.927734375, 0.62255859375, - 0.275146484375, 0.79541015625, 1.1796875, 1.1767578125, - -0.26123046875, -0.268310546875, 1.8994140625, 1.318359375, - 2.1875, 0.2469482421875, 1.41015625, 0.03973388671875, - 1.2685546875, 1.1025390625, 0.9560546875, 0.865234375, - -1.92578125, 1.154296875, 0.389892578125, 1.130859375, - 0.95947265625, 0.72314453125, 2.244140625, - 0.048553466796875, 0.626953125, 0.42919921875, - 0.82275390625, 0.311767578125, -0.320556640625, - 0.01041412353515625, 0.1483154296875, 0.10809326171875, - -0.3173828125, 1.1337890625, -0.8642578125, 1.4033203125, - 0.048828125, 1.1787109375, 0.98779296875, 1.818359375, - 1.1552734375, 0.6015625, 1.2392578125, -1.2685546875, - 0.39208984375, 0.83251953125, 0.224365234375, - 0.0019989013671875, 0.87548828125, 1.6572265625, - 1.107421875, 0.434814453125, 1.8251953125, 0.442626953125, - 1.2587890625, 0.09320068359375, -0.896484375, 1.8017578125, - 1.451171875, -0.0755615234375, 0.6083984375, 2.06640625, - 0.673828125, -0.33740234375, 0.192138671875, 0.21435546875, - 0.80224609375, -1.490234375, 0.9501953125, 0.86083984375, - -0.40283203125, 4.109375, 2.533203125, 1.2529296875, - 0.8271484375, 0.225830078125, 1.0478515625, -1.9755859375, - 0.841796875, 0.392822265625, 0.525390625, 0.33935546875, - -0.79443359375, 0.71630859375, 0.97998046875, - -0.175537109375, 0.97705078125, 1.705078125, 0.29638671875, - 0.68359375, 0.54150390625, 0.435791015625, 0.99755859375, - -0.369140625, 1.009765625, -0.140380859375, 0.426513671875, - 0.189697265625, 1.8193359375, 1.1201171875, -0.5009765625, - -0.331298828125, 0.759765625, -0.09442138671875, 0.74609375, - -1.947265625, 1.3544921875, -3.935546875, 2.544921875, - 1.359375, 0.1363525390625, 0.79296875, 0.79931640625, - -0.3466796875, 1.1396484375, -0.33447265625, 2.0078125, - -0.241455078125, 0.6318359375, 0.365234375, 0.296142578125, - 0.830078125, 1.0458984375, 0.5830078125, 0.61572265625, - 14.0703125, -2.0078125, -0.381591796875, 1.228515625, - 0.08282470703125, -0.67822265625, -0.04339599609375, - 0.397216796875, 0.1656494140625, 0.137451171875, - 0.244873046875, 1.1611328125, -1.3818359375, 0.8447265625, - 1.171875, 0.36328125, 0.252685546875, 0.1197509765625, - 0.232177734375, -0.020172119140625, 0.64404296875, - -0.01100921630859375, -1.9267578125, 0.222412109375, - 0.56005859375, 1.3046875, 1.1630859375, 1.197265625, - 1.02734375, 1.6806640625, -0.043731689453125, 1.4697265625, - 0.81201171875, 1.5390625, 1.240234375, -0.7353515625, - 1.828125, 1.115234375, 1.931640625, -0.517578125, - 0.77880859375, 1.0546875, 0.95361328125, 3.42578125, - 0.0160369873046875, 0.875, 0.56005859375, 1.2421875, - 1.986328125, 1.4814453125, 0.0948486328125, 1.115234375, - 0.00665283203125, 2.09375, 0.3544921875, -0.52783203125, - 1.2099609375, 0.45068359375, 0.65625, 0.1112060546875, - 1.0751953125, -0.9521484375, -0.30029296875, 1.4462890625, - 2.046875, 3.212890625, 1.68359375, 1.07421875, - -0.5263671875, 0.74560546875, 1.37890625, 0.15283203125, - 0.2440185546875, 0.62646484375, -0.1280517578125, - 0.7646484375, -0.515625, -0.35693359375, 1.2958984375, - 0.96923828125, 0.58935546875, 1.3701171875, 1.0673828125, - 0.2337646484375, 0.93115234375, 0.66357421875, 6.0, - 1.1025390625, -0.51708984375, -0.38330078125, 0.7197265625, - 0.246826171875, -0.45166015625, 1.9521484375, 0.5546875, - 0.08807373046875, 0.18505859375, 0.8857421875, - -0.57177734375, 0.251708984375, 0.234375, 2.57421875, - 0.9599609375, 0.5029296875, 0.10382080078125, - 0.08331298828125, 0.66748046875, -0.349609375, 1.287109375, - 0.259765625, 2.015625, 2.828125, -0.3095703125, - -0.164306640625, -0.3408203125, 0.486572265625, - 0.8466796875, 1.9130859375, 0.09088134765625, 0.66552734375, - 0.00972747802734375, -0.83154296875, 1.755859375, - 0.654296875, 0.173828125, 0.27587890625, -0.47607421875, - -0.264404296875, 0.7529296875, 0.6533203125, 0.7275390625, - 0.499755859375, 0.833984375, -0.44775390625, -0.05078125, - -0.454833984375, 0.75439453125, 0.68505859375, - 0.210693359375, -0.283935546875, -0.53564453125, - 0.96826171875, 0.861328125, -3.33984375, -0.26171875, - 0.77734375, 0.26513671875, -0.14111328125, -0.042236328125, - -0.84814453125, 0.2137451171875, 0.94921875, 0.65185546875, - -0.5380859375, 0.1529541015625, -0.360595703125, - -0.0333251953125, -0.69189453125, 0.8974609375, 0.7109375, - 0.81494140625, -0.259521484375, 1.1904296875, 0.62158203125, - 1.345703125, 0.89404296875, 0.70556640625, 1.0673828125, - 1.392578125, 0.5068359375, 0.962890625, 0.736328125, - 1.55078125, 0.50390625, -0.398681640625, 2.361328125, - 0.345947265625, -0.61962890625, 0.330078125, 0.75439453125, - -0.673828125, -0.2379150390625, 1.5673828125, 1.369140625, - 0.1119384765625, -0.1834716796875, 1.4599609375, - -0.77587890625, 0.5556640625, 0.09954833984375, - 0.0285186767578125, 0.58935546875, -0.501953125, - 0.212890625, 0.02679443359375, 0.1715087890625, - 0.03466796875, -0.564453125, 2.029296875, 2.45703125, - -0.72216796875, 2.138671875, 0.50830078125, - -0.09356689453125, 0.230224609375, 1.6943359375, - 1.5126953125, 0.39453125, 0.411376953125, 1.07421875, - -0.8046875, 0.51416015625, 0.2271728515625, -0.283447265625, - 0.38427734375, 0.73388671875, 0.6962890625, 1.4990234375, - 0.02813720703125, 0.40478515625, 1.2451171875, 1.1162109375, - -5.5703125, 0.76171875, 0.322021484375, 1.0361328125, - 1.197265625, 0.1163330078125, 0.2425537109375, 1.5595703125, - 1.5791015625, -0.0921630859375, 0.484619140625, - 1.9052734375, 5.31640625, 1.6337890625, 0.95947265625, - -0.1751708984375, 0.466552734375, 0.8330078125, 1.03125, - 0.2044677734375, 0.31298828125, -1.1220703125, 0.5517578125, - 0.93505859375, 0.45166015625, 1.951171875, 0.65478515625, - 1.30859375, 1.0859375, 0.56494140625, 2.322265625, - 0.242919921875, 1.81640625, -0.469970703125, -0.841796875, - 0.90869140625, 1.5361328125, 0.923828125, 1.0595703125, - 0.356689453125, -0.46142578125, 2.134765625, 1.3037109375, - -0.32373046875, -9.2265625, 0.4521484375, 0.88037109375, - -0.53955078125, 0.96484375, 0.7705078125, 0.84521484375, - 1.580078125, -0.1448974609375, 0.7607421875, 1.0166015625, - -0.086669921875, 1.611328125, 0.05938720703125, 0.5078125, - 0.8427734375, 2.431640625, 0.66357421875, 3.203125, - 0.132080078125, 0.461181640625, 0.779296875, 1.9482421875, - 1.8720703125, 0.845703125, -1.3837890625, -0.138916015625, - 0.35546875, 0.2457275390625, 0.75341796875, 1.828125, - 1.4169921875, 0.60791015625, 1.0068359375, 1.109375, - 0.484130859375, -0.302001953125, 0.4951171875, 0.802734375, - 1.9482421875, 0.916015625, 0.1646728515625, 2.599609375, - 1.7177734375, -0.2374267578125, 0.98046875, 0.39306640625, - -1.1396484375, 1.6533203125, 0.375244140625], "scores": - [0.09906005859375, 0.12213134765625, 0.05035400390625, - 0.16552734375, 0.0037384033203125, 0.010101318359375, - 0.058197021484375, 0.10552978515625, 0.1141357421875, - 0.115234375, 0.00444793701171875, 0.00812530517578125, - 0.0343017578125, 0.009002685546875, 0.087158203125, - 0.00818634033203125, 0.003238677978515625, 0.024169921875, - 0.00702667236328125, 0.00936126708984375, - 0.00632476806640625, 0.0293731689453125, 0.01800537109375, - 0.0088348388671875, 0.013397216796875, 0.02557373046875, - 0.12109375, 0.004413604736328125, 0.016571044921875, - 0.00274658203125, 0.2142333984375, 0.29638671875, - 0.16015625, 0.007843017578125, 0.007160186767578125, - 0.00508880615234375, 0.054229736328125, 0.048736572265625, - 0.026397705078125, 0.0265350341796875, 0.051361083984375, - 0.018310546875, 0.00974273681640625, 0.0218963623046875, - 0.216064453125, 0.021728515625, 0.0173797607421875, - 0.004520416259765625], "stoks": [52, 52, 52, 52, 52, 41, 41, - 374, 303, 303, 303, 427], "time": {"begin_ms": 640, - "end_ms": 1140}}]} - - id: 7645a0d1-2e64-410d-83a8-b96040432e9a - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244957031 - role: AGENT - type: AGENT_MESSAGE - message_text: Hello! - emotion_features: >- - {"Admiration": 0.044921875, "Adoration": 0.0253753662109375, - "Aesthetic Appreciation": 0.03265380859375, "Amusement": - 0.118408203125, "Anger": 0.06719970703125, "Anxiety": - 0.0411376953125, "Awe": 0.03802490234375, "Awkwardness": - 0.056549072265625, "Boredom": 0.04217529296875, "Calmness": - 0.08709716796875, "Concentration": 0.070556640625, - "Confusion": 0.06964111328125, "Contemplation": - 0.0343017578125, "Contempt": 0.037689208984375, - "Contentment": 0.059417724609375, "Craving": - 0.01132965087890625, "Desire": 0.01406097412109375, - "Determination": 0.1143798828125, "Disappointment": - 0.051177978515625, "Disgust": 0.028594970703125, "Distress": - 0.054901123046875, "Doubt": 0.04638671875, "Ecstasy": - 0.0258026123046875, "Embarrassment": 0.0222015380859375, - "Empathic Pain": 0.015777587890625, "Entrancement": - 0.0160980224609375, "Envy": 0.0163421630859375, - "Excitement": 0.129638671875, "Fear": 0.03125, "Guilt": - 0.01483917236328125, "Horror": 0.0194549560546875, - "Interest": 0.1341552734375, "Joy": 0.0738525390625, "Love": - 0.0216522216796875, "Nostalgia": 0.0210418701171875, "Pain": - 0.020721435546875, "Pride": 0.05499267578125, "Realization": - 0.0728759765625, "Relief": 0.04052734375, "Romance": - 0.0129241943359375, "Sadness": 0.0254669189453125, - "Satisfaction": 0.07159423828125, "Shame": 0.01495361328125, - "Surprise (negative)": 0.05560302734375, "Surprise - (positive)": 0.07965087890625, "Sympathy": - 0.022247314453125, "Tiredness": 0.0194549560546875, - "Triumph": 0.04107666015625} - metadata: "" - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/chat.yml b/.mock/definition/empathic-voice/chat.yml index e5522394..c99729ae 100644 --- a/.mock/definition/empathic-voice/chat.yml +++ b/.mock/definition/empathic-voice/chat.yml @@ -1,146 +1,146 @@ channel: - path: /v0/evi/chat - auth: false - query-parameters: - config_id: - type: optional - docs: >- - The unique identifier for an EVI configuration. - - - Include this ID in your connection request to equip EVI with the Prompt, - Language Model, Voice, and Tools associated with the specified - configuration. If omitted, EVI will apply [default configuration - settings](/docs/empathic-voice-interface-evi/configuration#default-configuration). - - - For help obtaining this ID, see our [Configuration - Guide](/docs/empathic-voice-interface-evi/configuration). - config_version: - type: optional - docs: >- - The version number of the EVI configuration specified by the - `config_id`. - - - Configs, as well as Prompts and Tools, are versioned. This versioning - system supports iterative development, allowing you to progressively - refine configurations and revert to previous versions if needed. - - - Include this parameter to apply a specific version of an EVI - configuration. If omitted, the latest version will be applied. - resumed_chat_group_id: - type: optional - docs: >- - The unique identifier for a Chat Group. Use this field to preserve - context from a previous Chat session. - - - A Chat represents a single session from opening to closing a WebSocket - connection. In contrast, a Chat Group is a series of resumed Chats that - collectively represent a single conversation spanning multiple sessions. - Each Chat includes a Chat Group ID, which is used to preserve the - context of previous Chat sessions when starting a new one. - - - Including the Chat Group ID in the `resumed_chat_group_id` query - parameter is useful for seamlessly resuming a Chat after unexpected - network disconnections and for picking up conversations exactly where - you left off at a later time. This ensures preserved context across - multiple sessions. - - - There are three ways to obtain the Chat Group ID: - - - - [Chat - Metadata](/reference/empathic-voice-interface-evi/chat/chat#receive.Chat%20Metadata.type): - Upon establishing a WebSocket connection with EVI, the user receives a - Chat Metadata message. This message contains a `chat_group_id`, which - can be used to resume conversations within this chat group in future - sessions. - - - - [List Chats - endpoint](/reference/empathic-voice-interface-evi/chats/list-chats): Use - the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of - individual Chat sessions. This endpoint lists all available Chat - sessions and their associated Chat Group ID. - - - - [List Chat Groups - endpoint](/reference/empathic-voice-interface-evi/chat-groups/list-chat-groups): - Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs - of all Chat Groups associated with an API key. This endpoint returns a - list of all available chat groups. - access_token: - type: optional - docs: >- - Access token used for authenticating the client. If not provided, an - `api_key` must be provided to authenticate. - - - The access token is generated using both an API key and a Secret key, - which provides an additional layer of security compared to using just an - API key. - - - For more details, refer to the [Authentication Strategies - Guide](/docs/introduction/api-key#authentication-strategies). - api_key: - type: optional - docs: >- - API key used for authenticating the client. If not provided, an - `access_token` must be provided to authenticate. - - - For more details, refer to the [Authentication Strategies - Guide](/docs/introduction/api-key#authentication-strategies). - messages: - subscribe: - origin: server - body: SubscribeEvent - publish: - origin: client - body: PublishEvent - examples: - - messages: - - type: publish - body: - type: audio_input - data: data - - type: subscribe - body: - type: assistant_end + path: /v0/evi/chat + auth: false + query-parameters: + config_id: + type: optional + docs: >- + The unique identifier for an EVI configuration. + + + Include this ID in your connection request to equip EVI with the Prompt, + Language Model, Voice, and Tools associated with the specified + configuration. If omitted, EVI will apply [default configuration + settings](/docs/empathic-voice-interface-evi/configuration#default-configuration). + + + For help obtaining this ID, see our [Configuration + Guide](/docs/empathic-voice-interface-evi/configuration). + config_version: + type: optional + docs: >- + The version number of the EVI configuration specified by the + `config_id`. + + + Configs, as well as Prompts and Tools, are versioned. This versioning + system supports iterative development, allowing you to progressively + refine configurations and revert to previous versions if needed. + + + Include this parameter to apply a specific version of an EVI + configuration. If omitted, the latest version will be applied. + resumed_chat_group_id: + type: optional + docs: >- + The unique identifier for a Chat Group. Use this field to preserve + context from a previous Chat session. + + + A Chat represents a single session from opening to closing a WebSocket + connection. In contrast, a Chat Group is a series of resumed Chats that + collectively represent a single conversation spanning multiple sessions. + Each Chat includes a Chat Group ID, which is used to preserve the + context of previous Chat sessions when starting a new one. + + + Including the Chat Group ID in the `resumed_chat_group_id` query + parameter is useful for seamlessly resuming a Chat after unexpected + network disconnections and for picking up conversations exactly where + you left off at a later time. This ensures preserved context across + multiple sessions. + + + There are three ways to obtain the Chat Group ID: + + + - [Chat + Metadata](/reference/empathic-voice-interface-evi/chat/chat#receive.Chat%20Metadata.type): + Upon establishing a WebSocket connection with EVI, the user receives a + Chat Metadata message. This message contains a `chat_group_id`, which + can be used to resume conversations within this chat group in future + sessions. + + + - [List Chats + endpoint](/reference/empathic-voice-interface-evi/chats/list-chats): Use + the GET `/v0/evi/chats` endpoint to obtain the Chat Group ID of + individual Chat sessions. This endpoint lists all available Chat + sessions and their associated Chat Group ID. + + + - [List Chat Groups + endpoint](/reference/empathic-voice-interface-evi/chat-groups/list-chat-groups): + Use the GET `/v0/evi/chat_groups` endpoint to obtain the Chat Group IDs + of all Chat Groups associated with an API key. This endpoint returns a + list of all available chat groups. + access_token: + type: optional + docs: >- + Access token used for authenticating the client. If not provided, an + `api_key` must be provided to authenticate. + + + The access token is generated using both an API key and a Secret key, + which provides an additional layer of security compared to using just an + API key. + + + For more details, refer to the [Authentication Strategies + Guide](/docs/introduction/api-key#authentication-strategies). + api_key: + type: optional + docs: >- + API key used for authenticating the client. If not provided, an + `access_token` must be provided to authenticate. + + + For more details, refer to the [Authentication Strategies + Guide](/docs/introduction/api-key#authentication-strategies). + messages: + subscribe: + origin: server + body: SubscribeEvent + publish: + origin: client + body: PublishEvent + examples: + - messages: + - type: publish + body: + type: audio_input + data: data + - type: subscribe + body: + type: assistant_end imports: - root: __package__.yml + root: __package__.yml types: - SubscribeEvent: - discriminated: false - union: - - root.AssistantEnd - - root.AssistantMessage - - root.AudioOutput - - root.ChatMetadata - - root.WebSocketError - - root.UserInterruption - - root.UserMessage - - root.ToolCallMessage - - root.ToolResponseMessage - - root.ToolErrorMessage - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json - PublishEvent: - discriminated: false - union: - - root.AudioInput - - root.SessionSettings - - root.UserInput - - root.AssistantInput - - root.ToolResponseMessage - - root.ToolErrorMessage - - root.PauseAssistantMessage - - root.ResumeAssistantMessage - source: - openapi: ../empathic-voice-interface/assistant-asyncapi.json + SubscribeEvent: + discriminated: false + union: + - root.AssistantEnd + - root.AssistantMessage + - root.AudioOutput + - root.ChatMetadata + - root.WebSocketError + - root.UserInterruption + - root.UserMessage + - root.ToolCallMessage + - root.ToolResponseMessage + - root.ToolErrorMessage + source: + openapi: assistant-asyncapi.json + PublishEvent: + discriminated: false + union: + - root.AudioInput + - root.SessionSettings + - root.UserInput + - root.AssistantInput + - root.ToolResponseMessage + - root.ToolErrorMessage + - root.PauseAssistantMessage + - root.ResumeAssistantMessage + source: + openapi: assistant-asyncapi.json diff --git a/.mock/definition/empathic-voice/chatGroups.yml b/.mock/definition/empathic-voice/chatGroups.yml new file mode 100644 index 00000000..7d15a6e3 --- /dev/null +++ b/.mock/definition/empathic-voice/chatGroups.yml @@ -0,0 +1,448 @@ +imports: + root: __package__.yml +service: + auth: false + base-path: '' + endpoints: + list-chat-groups: + path: /v0/evi/chat_groups + method: GET + auth: true + display-name: List chat_groups + request: + name: ChatGroupsListChatGroupsRequest + query-parameters: + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. + + + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. + + + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + ascending_order: + type: optional + docs: >- + Specifies the sorting order of the results based on their creation + date. Set to true for ascending order (chronological, with the + oldest records first) and false for descending order + (reverse-chronological, with the newest records first). Defaults + to true. + config_id: + type: optional + docs: >- + The unique identifier for an EVI configuration. + + + Filter Chat Groups to only include Chats that used this + `config_id` in their most recent Chat. + response: + docs: Success + type: root.ReturnPagedChatGroups + errors: + - root.BadRequestError + examples: + - query-parameters: + page_number: 0 + page_size: 1 + ascending_order: true + config_id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + response: + body: + page_number: 0 + page_size: 1 + total_pages: 1 + pagination_direction: ASC + chat_groups_page: + - id: 697056f0-6c7e-487d-9bd8-9c19df79f05f + first_start_timestamp: 1721844196397 + most_recent_start_timestamp: 1721861821717 + active: false + most_recent_chat_id: dfdbdd4d-0ddf-418b-8fc4-80a266579d36 + num_chats: 5 + list-chat-group-events: + path: /v0/evi/chat_groups/{id}/events + method: GET + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Chat Group. Formatted as a UUID. + display-name: List chat events from a specific chat_group + request: + name: ChatGroupsListChatGroupEventsRequest + query-parameters: + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. + + + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. + + + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + ascending_order: + type: optional + docs: >- + Specifies the sorting order of the results based on their creation + date. Set to true for ascending order (chronological, with the + oldest records first) and false for descending order + (reverse-chronological, with the newest records first). Defaults + to true. + response: + docs: Success + type: root.ReturnChatGroupPagedEvents + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 697056f0-6c7e-487d-9bd8-9c19df79f05f + query-parameters: + page_number: 0 + page_size: 3 + ascending_order: true + response: + body: + id: 697056f0-6c7e-487d-9bd8-9c19df79f05f + page_number: 0 + page_size: 3 + total_pages: 1 + pagination_direction: ASC + events_page: + - id: 5d44bdbb-49a3-40fb-871d-32bf7e76efe7 + chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + timestamp: 1716244940762 + role: SYSTEM + type: SYSTEM_PROMPT + message_text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + emotion_features: '' + metadata: '' + - id: 5976ddf6-d093-4bb9-ba60-8f6c25832dde + chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + timestamp: 1716244956278 + role: USER + type: USER_MESSAGE + message_text: Hello. + emotion_features: >- + {"Admiration": 0.09906005859375, "Adoration": + 0.12213134765625, "Aesthetic Appreciation": + 0.05035400390625, "Amusement": 0.16552734375, "Anger": + 0.0037384033203125, "Anxiety": 0.010101318359375, "Awe": + 0.058197021484375, "Awkwardness": 0.10552978515625, + "Boredom": 0.1141357421875, "Calmness": 0.115234375, + "Concentration": 0.00444793701171875, "Confusion": + 0.0343017578125, "Contemplation": 0.00812530517578125, + "Contempt": 0.009002685546875, "Contentment": + 0.087158203125, "Craving": 0.00818634033203125, "Desire": + 0.018310546875, "Determination": 0.003238677978515625, + "Disappointment": 0.024169921875, "Disgust": + 0.00702667236328125, "Distress": 0.00936126708984375, + "Doubt": 0.00632476806640625, "Ecstasy": 0.0293731689453125, + "Embarrassment": 0.01800537109375, "Empathic Pain": + 0.0088348388671875, "Entrancement": 0.013397216796875, + "Envy": 0.02557373046875, "Excitement": 0.12109375, "Fear": + 0.004413604736328125, "Guilt": 0.016571044921875, "Horror": + 0.00274658203125, "Interest": 0.2142333984375, "Joy": + 0.29638671875, "Love": 0.16015625, "Nostalgia": + 0.007843017578125, "Pain": 0.007160186767578125, "Pride": + 0.00508880615234375, "Realization": 0.054229736328125, + "Relief": 0.048736572265625, "Romance": 0.026397705078125, + "Sadness": 0.0265350341796875, "Satisfaction": + 0.051361083984375, "Shame": 0.00974273681640625, "Surprise + (negative)": 0.0218963623046875, "Surprise (positive)": + 0.216064453125, "Sympathy": 0.021728515625, "Tiredness": + 0.0173797607421875, "Triumph": 0.004520416259765625} + metadata: >- + {"segments": [{"content": "Hello.", "embedding": + [0.6181640625, 0.1763916015625, -30.921875, 1.2705078125, + 0.927734375, 0.63720703125, 2.865234375, 0.1080322265625, + 0.2978515625, 1.0107421875, 1.34375, 0.74560546875, + 0.416259765625, 0.99462890625, -0.333740234375, + 0.361083984375, -1.388671875, 1.0107421875, 1.3173828125, + 0.55615234375, 0.541015625, -0.1837158203125, 1.697265625, + 0.228515625, 2.087890625, -0.311767578125, + 0.053680419921875, 1.3349609375, 0.95068359375, + 0.00441741943359375, 0.705078125, 1.8916015625, + -0.939453125, 0.93701171875, -0.28955078125, 1.513671875, + 0.5595703125, 1.0126953125, -0.1624755859375, 1.4072265625, + -0.28857421875, -0.4560546875, -0.1500244140625, + -0.1102294921875, -0.222412109375, 0.8779296875, + 1.275390625, 1.6689453125, 0.80712890625, -0.34814453125, + -0.325439453125, 0.412841796875, 0.81689453125, + 0.55126953125, 1.671875, 0.6611328125, 0.7451171875, + 1.50390625, 1.0224609375, -1.671875, 0.7373046875, + 2.1328125, 2.166015625, 0.41015625, -0.127685546875, + 1.9345703125, -4.2734375, 0.332275390625, 0.26171875, + 0.76708984375, 0.2685546875, 0.468017578125, 1.208984375, + -1.517578125, 1.083984375, 0.84814453125, 1.0244140625, + -0.0072174072265625, 1.34375, 1.0712890625, 1.517578125, + -0.52001953125, 0.59228515625, 0.8154296875, -0.951171875, + -0.07757568359375, 1.3330078125, 1.125, 0.61181640625, + 1.494140625, 0.357421875, 1.1796875, 1.482421875, 0.8046875, + 0.1536865234375, 1.8076171875, 0.68115234375, -15.171875, + 1.2294921875, 0.319091796875, 0.499755859375, 1.5771484375, + 0.94677734375, -0.2490234375, 0.88525390625, 3.47265625, + 0.75927734375, 0.71044921875, 1.2333984375, 1.4169921875, + -0.56640625, -1.8095703125, 1.37109375, 0.428955078125, + 1.89453125, -0.39013671875, 0.1734619140625, 1.5595703125, + -1.2294921875, 2.552734375, 0.58349609375, 0.2156982421875, + -0.00984954833984375, -0.6865234375, -0.0272979736328125, + -0.2264404296875, 2.853515625, 1.3896484375, 0.52978515625, + 0.783203125, 3.0390625, 0.75537109375, 0.219970703125, + 0.384521484375, 0.385986328125, 2.0546875, + -0.10443115234375, 1.5146484375, 1.4296875, 1.9716796875, + 1.1318359375, 0.31591796875, 0.338623046875, 1.654296875, + -0.88037109375, -0.21484375, 1.45703125, 1.0380859375, + -0.52294921875, -0.47802734375, 0.1650390625, 1.2392578125, + -1.138671875, 0.56787109375, 1.318359375, 0.4287109375, + 0.1981201171875, 2.4375, 0.281005859375, 0.89404296875, + -0.1552734375, 0.6474609375, -0.08331298828125, + 0.00740814208984375, -0.045501708984375, -0.578125, + 2.02734375, 0.59228515625, 0.35693359375, 1.2919921875, + 1.22265625, 1.0537109375, 0.145263671875, 1.05859375, + -0.369140625, 0.207275390625, 0.78857421875, 0.599609375, + 0.99072265625, 0.24462890625, 1.26953125, 0.08404541015625, + 1.349609375, 0.73291015625, 1.3212890625, 0.388916015625, + 1.0869140625, 0.9931640625, -1.5673828125, 0.0462646484375, + 0.650390625, 0.253662109375, 0.58251953125, 1.8134765625, + 0.8642578125, 2.591796875, 0.7314453125, 0.85986328125, + 0.5615234375, 0.9296875, 0.04144287109375, 1.66015625, + 1.99609375, 1.171875, 1.181640625, 1.5126953125, + 0.0224456787109375, 0.58349609375, -1.4931640625, + 0.81884765625, 0.732421875, -0.6455078125, -0.62451171875, + 1.7802734375, 0.01526641845703125, -0.423095703125, + 0.461669921875, 4.87890625, 1.2392578125, -0.6953125, + 0.6689453125, 0.62451171875, -1.521484375, 1.7685546875, + 0.810546875, 0.65478515625, 0.26123046875, 1.6396484375, + 0.87548828125, 1.7353515625, 2.046875, 1.5634765625, + 0.69384765625, 1.375, 0.8916015625, 1.0107421875, + 0.1304931640625, 2.009765625, 0.06402587890625, + -0.08428955078125, 0.04351806640625, -1.7529296875, + 2.02734375, 3.521484375, 0.404541015625, 1.6337890625, + -0.276611328125, 0.8837890625, -0.1287841796875, + 0.91064453125, 0.8193359375, 0.701171875, 0.036529541015625, + 1.26171875, 1.0478515625, -0.1422119140625, 1.0634765625, + 0.61083984375, 1.3505859375, 1.208984375, 0.57275390625, + 1.3623046875, 2.267578125, 0.484375, 0.9150390625, + 0.56787109375, -0.70068359375, 0.27587890625, + -0.70654296875, 0.8466796875, 0.57568359375, 1.6162109375, + 0.87939453125, 2.248046875, -0.5458984375, 1.7744140625, + 1.328125, 1.232421875, 0.6806640625, 0.9365234375, + 1.052734375, -1.08984375, 1.8330078125, -0.4033203125, + 1.0673828125, 0.297607421875, 1.5703125, 1.67578125, + 1.34765625, 2.8203125, 2.025390625, -0.48583984375, + 0.7626953125, 0.01007843017578125, 1.435546875, + 0.007205963134765625, 0.05157470703125, -0.9853515625, + 0.26708984375, 1.16796875, 1.2041015625, 1.99609375, + -0.07916259765625, 1.244140625, -0.32080078125, + 0.6748046875, 0.419921875, 1.3212890625, 1.291015625, + 0.599609375, 0.0550537109375, 0.9599609375, 0.93505859375, + 0.111083984375, 1.302734375, 0.0833740234375, 2.244140625, + 1.25390625, 1.6015625, 0.58349609375, 1.7568359375, + -0.263427734375, -0.019866943359375, -0.24658203125, + -0.1871337890625, 0.927734375, 0.62255859375, + 0.275146484375, 0.79541015625, 1.1796875, 1.1767578125, + -0.26123046875, -0.268310546875, 1.8994140625, 1.318359375, + 2.1875, 0.2469482421875, 1.41015625, 0.03973388671875, + 1.2685546875, 1.1025390625, 0.9560546875, 0.865234375, + -1.92578125, 1.154296875, 0.389892578125, 1.130859375, + 0.95947265625, 0.72314453125, 2.244140625, + 0.048553466796875, 0.626953125, 0.42919921875, + 0.82275390625, 0.311767578125, -0.320556640625, + 0.01041412353515625, 0.1483154296875, 0.10809326171875, + -0.3173828125, 1.1337890625, -0.8642578125, 1.4033203125, + 0.048828125, 1.1787109375, 0.98779296875, 1.818359375, + 1.1552734375, 0.6015625, 1.2392578125, -1.2685546875, + 0.39208984375, 0.83251953125, 0.224365234375, + 0.0019989013671875, 0.87548828125, 1.6572265625, + 1.107421875, 0.434814453125, 1.8251953125, 0.442626953125, + 1.2587890625, 0.09320068359375, -0.896484375, 1.8017578125, + 1.451171875, -0.0755615234375, 0.6083984375, 2.06640625, + 0.673828125, -0.33740234375, 0.192138671875, 0.21435546875, + 0.80224609375, -1.490234375, 0.9501953125, 0.86083984375, + -0.40283203125, 4.109375, 2.533203125, 1.2529296875, + 0.8271484375, 0.225830078125, 1.0478515625, -1.9755859375, + 0.841796875, 0.392822265625, 0.525390625, 0.33935546875, + -0.79443359375, 0.71630859375, 0.97998046875, + -0.175537109375, 0.97705078125, 1.705078125, 0.29638671875, + 0.68359375, 0.54150390625, 0.435791015625, 0.99755859375, + -0.369140625, 1.009765625, -0.140380859375, 0.426513671875, + 0.189697265625, 1.8193359375, 1.1201171875, -0.5009765625, + -0.331298828125, 0.759765625, -0.09442138671875, 0.74609375, + -1.947265625, 1.3544921875, -3.935546875, 2.544921875, + 1.359375, 0.1363525390625, 0.79296875, 0.79931640625, + -0.3466796875, 1.1396484375, -0.33447265625, 2.0078125, + -0.241455078125, 0.6318359375, 0.365234375, 0.296142578125, + 0.830078125, 1.0458984375, 0.5830078125, 0.61572265625, + 14.0703125, -2.0078125, -0.381591796875, 1.228515625, + 0.08282470703125, -0.67822265625, -0.04339599609375, + 0.397216796875, 0.1656494140625, 0.137451171875, + 0.244873046875, 1.1611328125, -1.3818359375, 0.8447265625, + 1.171875, 0.36328125, 0.252685546875, 0.1197509765625, + 0.232177734375, -0.020172119140625, 0.64404296875, + -0.01100921630859375, -1.9267578125, 0.222412109375, + 0.56005859375, 1.3046875, 1.1630859375, 1.197265625, + 1.02734375, 1.6806640625, -0.043731689453125, 1.4697265625, + 0.81201171875, 1.5390625, 1.240234375, -0.7353515625, + 1.828125, 1.115234375, 1.931640625, -0.517578125, + 0.77880859375, 1.0546875, 0.95361328125, 3.42578125, + 0.0160369873046875, 0.875, 0.56005859375, 1.2421875, + 1.986328125, 1.4814453125, 0.0948486328125, 1.115234375, + 0.00665283203125, 2.09375, 0.3544921875, -0.52783203125, + 1.2099609375, 0.45068359375, 0.65625, 0.1112060546875, + 1.0751953125, -0.9521484375, -0.30029296875, 1.4462890625, + 2.046875, 3.212890625, 1.68359375, 1.07421875, + -0.5263671875, 0.74560546875, 1.37890625, 0.15283203125, + 0.2440185546875, 0.62646484375, -0.1280517578125, + 0.7646484375, -0.515625, -0.35693359375, 1.2958984375, + 0.96923828125, 0.58935546875, 1.3701171875, 1.0673828125, + 0.2337646484375, 0.93115234375, 0.66357421875, 6.0, + 1.1025390625, -0.51708984375, -0.38330078125, 0.7197265625, + 0.246826171875, -0.45166015625, 1.9521484375, 0.5546875, + 0.08807373046875, 0.18505859375, 0.8857421875, + -0.57177734375, 0.251708984375, 0.234375, 2.57421875, + 0.9599609375, 0.5029296875, 0.10382080078125, + 0.08331298828125, 0.66748046875, -0.349609375, 1.287109375, + 0.259765625, 2.015625, 2.828125, -0.3095703125, + -0.164306640625, -0.3408203125, 0.486572265625, + 0.8466796875, 1.9130859375, 0.09088134765625, 0.66552734375, + 0.00972747802734375, -0.83154296875, 1.755859375, + 0.654296875, 0.173828125, 0.27587890625, -0.47607421875, + -0.264404296875, 0.7529296875, 0.6533203125, 0.7275390625, + 0.499755859375, 0.833984375, -0.44775390625, -0.05078125, + -0.454833984375, 0.75439453125, 0.68505859375, + 0.210693359375, -0.283935546875, -0.53564453125, + 0.96826171875, 0.861328125, -3.33984375, -0.26171875, + 0.77734375, 0.26513671875, -0.14111328125, -0.042236328125, + -0.84814453125, 0.2137451171875, 0.94921875, 0.65185546875, + -0.5380859375, 0.1529541015625, -0.360595703125, + -0.0333251953125, -0.69189453125, 0.8974609375, 0.7109375, + 0.81494140625, -0.259521484375, 1.1904296875, 0.62158203125, + 1.345703125, 0.89404296875, 0.70556640625, 1.0673828125, + 1.392578125, 0.5068359375, 0.962890625, 0.736328125, + 1.55078125, 0.50390625, -0.398681640625, 2.361328125, + 0.345947265625, -0.61962890625, 0.330078125, 0.75439453125, + -0.673828125, -0.2379150390625, 1.5673828125, 1.369140625, + 0.1119384765625, -0.1834716796875, 1.4599609375, + -0.77587890625, 0.5556640625, 0.09954833984375, + 0.0285186767578125, 0.58935546875, -0.501953125, + 0.212890625, 0.02679443359375, 0.1715087890625, + 0.03466796875, -0.564453125, 2.029296875, 2.45703125, + -0.72216796875, 2.138671875, 0.50830078125, + -0.09356689453125, 0.230224609375, 1.6943359375, + 1.5126953125, 0.39453125, 0.411376953125, 1.07421875, + -0.8046875, 0.51416015625, 0.2271728515625, -0.283447265625, + 0.38427734375, 0.73388671875, 0.6962890625, 1.4990234375, + 0.02813720703125, 0.40478515625, 1.2451171875, 1.1162109375, + -5.5703125, 0.76171875, 0.322021484375, 1.0361328125, + 1.197265625, 0.1163330078125, 0.2425537109375, 1.5595703125, + 1.5791015625, -0.0921630859375, 0.484619140625, + 1.9052734375, 5.31640625, 1.6337890625, 0.95947265625, + -0.1751708984375, 0.466552734375, 0.8330078125, 1.03125, + 0.2044677734375, 0.31298828125, -1.1220703125, 0.5517578125, + 0.93505859375, 0.45166015625, 1.951171875, 0.65478515625, + 1.30859375, 1.0859375, 0.56494140625, 2.322265625, + 0.242919921875, 1.81640625, -0.469970703125, -0.841796875, + 0.90869140625, 1.5361328125, 0.923828125, 1.0595703125, + 0.356689453125, -0.46142578125, 2.134765625, 1.3037109375, + -0.32373046875, -9.2265625, 0.4521484375, 0.88037109375, + -0.53955078125, 0.96484375, 0.7705078125, 0.84521484375, + 1.580078125, -0.1448974609375, 0.7607421875, 1.0166015625, + -0.086669921875, 1.611328125, 0.05938720703125, 0.5078125, + 0.8427734375, 2.431640625, 0.66357421875, 3.203125, + 0.132080078125, 0.461181640625, 0.779296875, 1.9482421875, + 1.8720703125, 0.845703125, -1.3837890625, -0.138916015625, + 0.35546875, 0.2457275390625, 0.75341796875, 1.828125, + 1.4169921875, 0.60791015625, 1.0068359375, 1.109375, + 0.484130859375, -0.302001953125, 0.4951171875, 0.802734375, + 1.9482421875, 0.916015625, 0.1646728515625, 2.599609375, + 1.7177734375, -0.2374267578125, 0.98046875, 0.39306640625, + -1.1396484375, 1.6533203125, 0.375244140625], "scores": + [0.09906005859375, 0.12213134765625, 0.05035400390625, + 0.16552734375, 0.0037384033203125, 0.010101318359375, + 0.058197021484375, 0.10552978515625, 0.1141357421875, + 0.115234375, 0.00444793701171875, 0.00812530517578125, + 0.0343017578125, 0.009002685546875, 0.087158203125, + 0.00818634033203125, 0.003238677978515625, 0.024169921875, + 0.00702667236328125, 0.00936126708984375, + 0.00632476806640625, 0.0293731689453125, 0.01800537109375, + 0.0088348388671875, 0.013397216796875, 0.02557373046875, + 0.12109375, 0.004413604736328125, 0.016571044921875, + 0.00274658203125, 0.2142333984375, 0.29638671875, + 0.16015625, 0.007843017578125, 0.007160186767578125, + 0.00508880615234375, 0.054229736328125, 0.048736572265625, + 0.026397705078125, 0.0265350341796875, 0.051361083984375, + 0.018310546875, 0.00974273681640625, 0.0218963623046875, + 0.216064453125, 0.021728515625, 0.0173797607421875, + 0.004520416259765625], "stoks": [52, 52, 52, 52, 52, 41, 41, + 374, 303, 303, 303, 427], "time": {"begin_ms": 640, + "end_ms": 1140}}]} + - id: 7645a0d1-2e64-410d-83a8-b96040432e9a + chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + timestamp: 1716244957031 + role: AGENT + type: AGENT_MESSAGE + message_text: Hello! + emotion_features: >- + {"Admiration": 0.044921875, "Adoration": 0.0253753662109375, + "Aesthetic Appreciation": 0.03265380859375, "Amusement": + 0.118408203125, "Anger": 0.06719970703125, "Anxiety": + 0.0411376953125, "Awe": 0.03802490234375, "Awkwardness": + 0.056549072265625, "Boredom": 0.04217529296875, "Calmness": + 0.08709716796875, "Concentration": 0.070556640625, + "Confusion": 0.06964111328125, "Contemplation": + 0.0343017578125, "Contempt": 0.037689208984375, + "Contentment": 0.059417724609375, "Craving": + 0.01132965087890625, "Desire": 0.01406097412109375, + "Determination": 0.1143798828125, "Disappointment": + 0.051177978515625, "Disgust": 0.028594970703125, "Distress": + 0.054901123046875, "Doubt": 0.04638671875, "Ecstasy": + 0.0258026123046875, "Embarrassment": 0.0222015380859375, + "Empathic Pain": 0.015777587890625, "Entrancement": + 0.0160980224609375, "Envy": 0.0163421630859375, + "Excitement": 0.129638671875, "Fear": 0.03125, "Guilt": + 0.01483917236328125, "Horror": 0.0194549560546875, + "Interest": 0.1341552734375, "Joy": 0.0738525390625, "Love": + 0.0216522216796875, "Nostalgia": 0.0210418701171875, "Pain": + 0.020721435546875, "Pride": 0.05499267578125, "Realization": + 0.0728759765625, "Relief": 0.04052734375, "Romance": + 0.0129241943359375, "Sadness": 0.0254669189453125, + "Satisfaction": 0.07159423828125, "Shame": 0.01495361328125, + "Surprise (negative)": 0.05560302734375, "Surprise + (positive)": 0.07965087890625, "Sympathy": + 0.022247314453125, "Tiredness": 0.0194549560546875, + "Triumph": 0.04107666015625} + metadata: '' + source: + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/chats.yml b/.mock/definition/empathic-voice/chats.yml index fd5041f3..f484d9c9 100644 --- a/.mock/definition/empathic-voice/chats.yml +++ b/.mock/definition/empathic-voice/chats.yml @@ -1,453 +1,457 @@ imports: - root: __package__.yml + root: __package__.yml service: - auth: false - base-path: "" - endpoints: - list-chats: - path: /v0/evi/chats - method: GET - auth: true - pagination: - offset: $request.page_number - results: $response.chats_page - display-name: List chats - request: - name: ChatsListChatsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. + auth: false + base-path: '' + endpoints: + list-chats: + path: /v0/evi/chats + method: GET + auth: true + pagination: + offset: $request.page_number + results: $response.chats_page + display-name: List chats + request: + name: ChatsListChatsRequest + query-parameters: + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnPagedChats - examples: - - query-parameters: - page_number: 0 - page_size: 1 - ascending_order: true - response: - body: - page_number: 0 - page_size: 1 - total_pages: 1 - pagination_direction: ASC - chats_page: - - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f - status: USER_ENDED - start_timestamp: 1716244940648 - end_timestamp: 1716244958546 - event_count: 3 - metadata: "" - config: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - list-chat-events: - path: /v0/evi/chats/{id} - method: GET - auth: true - pagination: - offset: $request.page_number - results: $response.events_page - path-parameters: - id: - type: string - docs: Identifier for a Chat. Formatted as a UUID. - display-name: List chat events - request: - name: ChatsListChatEventsRequest - query-parameters: - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + ascending_order: + type: optional + docs: >- + Specifies the sorting order of the results based on their creation + date. Set to true for ascending order (chronological, with the + oldest records first) and false for descending order + (reverse-chronological, with the newest records first). Defaults + to true. + response: + docs: Success + type: root.ReturnPagedChats + errors: + - root.BadRequestError + examples: + - query-parameters: + page_number: 0 + page_size: 1 + ascending_order: true + response: + body: + page_number: 0 + page_size: 1 + total_pages: 1 + pagination_direction: ASC + chats_page: + - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f + status: USER_ENDED + start_timestamp: 1716244940648 + end_timestamp: 1716244958546 + event_count: 3 + metadata: '' + config: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 0 + list-chat-events: + path: /v0/evi/chats/{id} + method: GET + auth: true + pagination: + offset: $request.page_number + results: $response.events_page + path-parameters: + id: + type: string + docs: Identifier for a Chat. Formatted as a UUID. + display-name: List chat events + request: + name: ChatsListChatEventsRequest + query-parameters: + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - ascending_order: - type: optional - docs: >- - Specifies the sorting order of the results based on their creation - date. Set to true for ascending order (chronological, with the - oldest records first) and false for descending order - (reverse-chronological, with the newest records first). Defaults - to true. - response: - docs: Success - type: root.ReturnChatPagedEvents - examples: - - path-parameters: - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - query-parameters: - page_number: 0 - page_size: 3 - ascending_order: true - response: - body: - id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f - status: USER_ENDED - start_timestamp: 1716244940648 - pagination_direction: ASC - events_page: - - id: 5d44bdbb-49a3-40fb-871d-32bf7e76efe7 - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244940762 - role: SYSTEM - type: SYSTEM_PROMPT - message_text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - emotion_features: "" - metadata: "" - - id: 5976ddf6-d093-4bb9-ba60-8f6c25832dde - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244956278 - role: USER - type: USER_MESSAGE - message_text: Hello. - emotion_features: >- - {"Admiration": 0.09906005859375, "Adoration": - 0.12213134765625, "Aesthetic Appreciation": - 0.05035400390625, "Amusement": 0.16552734375, "Anger": - 0.0037384033203125, "Anxiety": 0.010101318359375, "Awe": - 0.058197021484375, "Awkwardness": 0.10552978515625, - "Boredom": 0.1141357421875, "Calmness": 0.115234375, - "Concentration": 0.00444793701171875, "Confusion": - 0.0343017578125, "Contemplation": 0.00812530517578125, - "Contempt": 0.009002685546875, "Contentment": - 0.087158203125, "Craving": 0.00818634033203125, "Desire": - 0.018310546875, "Determination": 0.003238677978515625, - "Disappointment": 0.024169921875, "Disgust": - 0.00702667236328125, "Distress": 0.00936126708984375, - "Doubt": 0.00632476806640625, "Ecstasy": 0.0293731689453125, - "Embarrassment": 0.01800537109375, "Empathic Pain": - 0.0088348388671875, "Entrancement": 0.013397216796875, - "Envy": 0.02557373046875, "Excitement": 0.12109375, "Fear": - 0.004413604736328125, "Guilt": 0.016571044921875, "Horror": - 0.00274658203125, "Interest": 0.2142333984375, "Joy": - 0.29638671875, "Love": 0.16015625, "Nostalgia": - 0.007843017578125, "Pain": 0.007160186767578125, "Pride": - 0.00508880615234375, "Realization": 0.054229736328125, - "Relief": 0.048736572265625, "Romance": 0.026397705078125, - "Sadness": 0.0265350341796875, "Satisfaction": - 0.051361083984375, "Shame": 0.00974273681640625, "Surprise - (negative)": 0.0218963623046875, "Surprise (positive)": - 0.216064453125, "Sympathy": 0.021728515625, "Tiredness": - 0.0173797607421875, "Triumph": 0.004520416259765625} - metadata: >- - {"segments": [{"content": "Hello.", "embedding": - [0.6181640625, 0.1763916015625, -30.921875, 1.2705078125, - 0.927734375, 0.63720703125, 2.865234375, 0.1080322265625, - 0.2978515625, 1.0107421875, 1.34375, 0.74560546875, - 0.416259765625, 0.99462890625, -0.333740234375, - 0.361083984375, -1.388671875, 1.0107421875, 1.3173828125, - 0.55615234375, 0.541015625, -0.1837158203125, 1.697265625, - 0.228515625, 2.087890625, -0.311767578125, - 0.053680419921875, 1.3349609375, 0.95068359375, - 0.00441741943359375, 0.705078125, 1.8916015625, - -0.939453125, 0.93701171875, -0.28955078125, 1.513671875, - 0.5595703125, 1.0126953125, -0.1624755859375, 1.4072265625, - -0.28857421875, -0.4560546875, -0.1500244140625, - -0.1102294921875, -0.222412109375, 0.8779296875, - 1.275390625, 1.6689453125, 0.80712890625, -0.34814453125, - -0.325439453125, 0.412841796875, 0.81689453125, - 0.55126953125, 1.671875, 0.6611328125, 0.7451171875, - 1.50390625, 1.0224609375, -1.671875, 0.7373046875, - 2.1328125, 2.166015625, 0.41015625, -0.127685546875, - 1.9345703125, -4.2734375, 0.332275390625, 0.26171875, - 0.76708984375, 0.2685546875, 0.468017578125, 1.208984375, - -1.517578125, 1.083984375, 0.84814453125, 1.0244140625, - -0.0072174072265625, 1.34375, 1.0712890625, 1.517578125, - -0.52001953125, 0.59228515625, 0.8154296875, -0.951171875, - -0.07757568359375, 1.3330078125, 1.125, 0.61181640625, - 1.494140625, 0.357421875, 1.1796875, 1.482421875, 0.8046875, - 0.1536865234375, 1.8076171875, 0.68115234375, -15.171875, - 1.2294921875, 0.319091796875, 0.499755859375, 1.5771484375, - 0.94677734375, -0.2490234375, 0.88525390625, 3.47265625, - 0.75927734375, 0.71044921875, 1.2333984375, 1.4169921875, - -0.56640625, -1.8095703125, 1.37109375, 0.428955078125, - 1.89453125, -0.39013671875, 0.1734619140625, 1.5595703125, - -1.2294921875, 2.552734375, 0.58349609375, 0.2156982421875, - -0.00984954833984375, -0.6865234375, -0.0272979736328125, - -0.2264404296875, 2.853515625, 1.3896484375, 0.52978515625, - 0.783203125, 3.0390625, 0.75537109375, 0.219970703125, - 0.384521484375, 0.385986328125, 2.0546875, - -0.10443115234375, 1.5146484375, 1.4296875, 1.9716796875, - 1.1318359375, 0.31591796875, 0.338623046875, 1.654296875, - -0.88037109375, -0.21484375, 1.45703125, 1.0380859375, - -0.52294921875, -0.47802734375, 0.1650390625, 1.2392578125, - -1.138671875, 0.56787109375, 1.318359375, 0.4287109375, - 0.1981201171875, 2.4375, 0.281005859375, 0.89404296875, - -0.1552734375, 0.6474609375, -0.08331298828125, - 0.00740814208984375, -0.045501708984375, -0.578125, - 2.02734375, 0.59228515625, 0.35693359375, 1.2919921875, - 1.22265625, 1.0537109375, 0.145263671875, 1.05859375, - -0.369140625, 0.207275390625, 0.78857421875, 0.599609375, - 0.99072265625, 0.24462890625, 1.26953125, 0.08404541015625, - 1.349609375, 0.73291015625, 1.3212890625, 0.388916015625, - 1.0869140625, 0.9931640625, -1.5673828125, 0.0462646484375, - 0.650390625, 0.253662109375, 0.58251953125, 1.8134765625, - 0.8642578125, 2.591796875, 0.7314453125, 0.85986328125, - 0.5615234375, 0.9296875, 0.04144287109375, 1.66015625, - 1.99609375, 1.171875, 1.181640625, 1.5126953125, - 0.0224456787109375, 0.58349609375, -1.4931640625, - 0.81884765625, 0.732421875, -0.6455078125, -0.62451171875, - 1.7802734375, 0.01526641845703125, -0.423095703125, - 0.461669921875, 4.87890625, 1.2392578125, -0.6953125, - 0.6689453125, 0.62451171875, -1.521484375, 1.7685546875, - 0.810546875, 0.65478515625, 0.26123046875, 1.6396484375, - 0.87548828125, 1.7353515625, 2.046875, 1.5634765625, - 0.69384765625, 1.375, 0.8916015625, 1.0107421875, - 0.1304931640625, 2.009765625, 0.06402587890625, - -0.08428955078125, 0.04351806640625, -1.7529296875, - 2.02734375, 3.521484375, 0.404541015625, 1.6337890625, - -0.276611328125, 0.8837890625, -0.1287841796875, - 0.91064453125, 0.8193359375, 0.701171875, 0.036529541015625, - 1.26171875, 1.0478515625, -0.1422119140625, 1.0634765625, - 0.61083984375, 1.3505859375, 1.208984375, 0.57275390625, - 1.3623046875, 2.267578125, 0.484375, 0.9150390625, - 0.56787109375, -0.70068359375, 0.27587890625, - -0.70654296875, 0.8466796875, 0.57568359375, 1.6162109375, - 0.87939453125, 2.248046875, -0.5458984375, 1.7744140625, - 1.328125, 1.232421875, 0.6806640625, 0.9365234375, - 1.052734375, -1.08984375, 1.8330078125, -0.4033203125, - 1.0673828125, 0.297607421875, 1.5703125, 1.67578125, - 1.34765625, 2.8203125, 2.025390625, -0.48583984375, - 0.7626953125, 0.01007843017578125, 1.435546875, - 0.007205963134765625, 0.05157470703125, -0.9853515625, - 0.26708984375, 1.16796875, 1.2041015625, 1.99609375, - -0.07916259765625, 1.244140625, -0.32080078125, - 0.6748046875, 0.419921875, 1.3212890625, 1.291015625, - 0.599609375, 0.0550537109375, 0.9599609375, 0.93505859375, - 0.111083984375, 1.302734375, 0.0833740234375, 2.244140625, - 1.25390625, 1.6015625, 0.58349609375, 1.7568359375, - -0.263427734375, -0.019866943359375, -0.24658203125, - -0.1871337890625, 0.927734375, 0.62255859375, - 0.275146484375, 0.79541015625, 1.1796875, 1.1767578125, - -0.26123046875, -0.268310546875, 1.8994140625, 1.318359375, - 2.1875, 0.2469482421875, 1.41015625, 0.03973388671875, - 1.2685546875, 1.1025390625, 0.9560546875, 0.865234375, - -1.92578125, 1.154296875, 0.389892578125, 1.130859375, - 0.95947265625, 0.72314453125, 2.244140625, - 0.048553466796875, 0.626953125, 0.42919921875, - 0.82275390625, 0.311767578125, -0.320556640625, - 0.01041412353515625, 0.1483154296875, 0.10809326171875, - -0.3173828125, 1.1337890625, -0.8642578125, 1.4033203125, - 0.048828125, 1.1787109375, 0.98779296875, 1.818359375, - 1.1552734375, 0.6015625, 1.2392578125, -1.2685546875, - 0.39208984375, 0.83251953125, 0.224365234375, - 0.0019989013671875, 0.87548828125, 1.6572265625, - 1.107421875, 0.434814453125, 1.8251953125, 0.442626953125, - 1.2587890625, 0.09320068359375, -0.896484375, 1.8017578125, - 1.451171875, -0.0755615234375, 0.6083984375, 2.06640625, - 0.673828125, -0.33740234375, 0.192138671875, 0.21435546875, - 0.80224609375, -1.490234375, 0.9501953125, 0.86083984375, - -0.40283203125, 4.109375, 2.533203125, 1.2529296875, - 0.8271484375, 0.225830078125, 1.0478515625, -1.9755859375, - 0.841796875, 0.392822265625, 0.525390625, 0.33935546875, - -0.79443359375, 0.71630859375, 0.97998046875, - -0.175537109375, 0.97705078125, 1.705078125, 0.29638671875, - 0.68359375, 0.54150390625, 0.435791015625, 0.99755859375, - -0.369140625, 1.009765625, -0.140380859375, 0.426513671875, - 0.189697265625, 1.8193359375, 1.1201171875, -0.5009765625, - -0.331298828125, 0.759765625, -0.09442138671875, 0.74609375, - -1.947265625, 1.3544921875, -3.935546875, 2.544921875, - 1.359375, 0.1363525390625, 0.79296875, 0.79931640625, - -0.3466796875, 1.1396484375, -0.33447265625, 2.0078125, - -0.241455078125, 0.6318359375, 0.365234375, 0.296142578125, - 0.830078125, 1.0458984375, 0.5830078125, 0.61572265625, - 14.0703125, -2.0078125, -0.381591796875, 1.228515625, - 0.08282470703125, -0.67822265625, -0.04339599609375, - 0.397216796875, 0.1656494140625, 0.137451171875, - 0.244873046875, 1.1611328125, -1.3818359375, 0.8447265625, - 1.171875, 0.36328125, 0.252685546875, 0.1197509765625, - 0.232177734375, -0.020172119140625, 0.64404296875, - -0.01100921630859375, -1.9267578125, 0.222412109375, - 0.56005859375, 1.3046875, 1.1630859375, 1.197265625, - 1.02734375, 1.6806640625, -0.043731689453125, 1.4697265625, - 0.81201171875, 1.5390625, 1.240234375, -0.7353515625, - 1.828125, 1.115234375, 1.931640625, -0.517578125, - 0.77880859375, 1.0546875, 0.95361328125, 3.42578125, - 0.0160369873046875, 0.875, 0.56005859375, 1.2421875, - 1.986328125, 1.4814453125, 0.0948486328125, 1.115234375, - 0.00665283203125, 2.09375, 0.3544921875, -0.52783203125, - 1.2099609375, 0.45068359375, 0.65625, 0.1112060546875, - 1.0751953125, -0.9521484375, -0.30029296875, 1.4462890625, - 2.046875, 3.212890625, 1.68359375, 1.07421875, - -0.5263671875, 0.74560546875, 1.37890625, 0.15283203125, - 0.2440185546875, 0.62646484375, -0.1280517578125, - 0.7646484375, -0.515625, -0.35693359375, 1.2958984375, - 0.96923828125, 0.58935546875, 1.3701171875, 1.0673828125, - 0.2337646484375, 0.93115234375, 0.66357421875, 6.0, - 1.1025390625, -0.51708984375, -0.38330078125, 0.7197265625, - 0.246826171875, -0.45166015625, 1.9521484375, 0.5546875, - 0.08807373046875, 0.18505859375, 0.8857421875, - -0.57177734375, 0.251708984375, 0.234375, 2.57421875, - 0.9599609375, 0.5029296875, 0.10382080078125, - 0.08331298828125, 0.66748046875, -0.349609375, 1.287109375, - 0.259765625, 2.015625, 2.828125, -0.3095703125, - -0.164306640625, -0.3408203125, 0.486572265625, - 0.8466796875, 1.9130859375, 0.09088134765625, 0.66552734375, - 0.00972747802734375, -0.83154296875, 1.755859375, - 0.654296875, 0.173828125, 0.27587890625, -0.47607421875, - -0.264404296875, 0.7529296875, 0.6533203125, 0.7275390625, - 0.499755859375, 0.833984375, -0.44775390625, -0.05078125, - -0.454833984375, 0.75439453125, 0.68505859375, - 0.210693359375, -0.283935546875, -0.53564453125, - 0.96826171875, 0.861328125, -3.33984375, -0.26171875, - 0.77734375, 0.26513671875, -0.14111328125, -0.042236328125, - -0.84814453125, 0.2137451171875, 0.94921875, 0.65185546875, - -0.5380859375, 0.1529541015625, -0.360595703125, - -0.0333251953125, -0.69189453125, 0.8974609375, 0.7109375, - 0.81494140625, -0.259521484375, 1.1904296875, 0.62158203125, - 1.345703125, 0.89404296875, 0.70556640625, 1.0673828125, - 1.392578125, 0.5068359375, 0.962890625, 0.736328125, - 1.55078125, 0.50390625, -0.398681640625, 2.361328125, - 0.345947265625, -0.61962890625, 0.330078125, 0.75439453125, - -0.673828125, -0.2379150390625, 1.5673828125, 1.369140625, - 0.1119384765625, -0.1834716796875, 1.4599609375, - -0.77587890625, 0.5556640625, 0.09954833984375, - 0.0285186767578125, 0.58935546875, -0.501953125, - 0.212890625, 0.02679443359375, 0.1715087890625, - 0.03466796875, -0.564453125, 2.029296875, 2.45703125, - -0.72216796875, 2.138671875, 0.50830078125, - -0.09356689453125, 0.230224609375, 1.6943359375, - 1.5126953125, 0.39453125, 0.411376953125, 1.07421875, - -0.8046875, 0.51416015625, 0.2271728515625, -0.283447265625, - 0.38427734375, 0.73388671875, 0.6962890625, 1.4990234375, - 0.02813720703125, 0.40478515625, 1.2451171875, 1.1162109375, - -5.5703125, 0.76171875, 0.322021484375, 1.0361328125, - 1.197265625, 0.1163330078125, 0.2425537109375, 1.5595703125, - 1.5791015625, -0.0921630859375, 0.484619140625, - 1.9052734375, 5.31640625, 1.6337890625, 0.95947265625, - -0.1751708984375, 0.466552734375, 0.8330078125, 1.03125, - 0.2044677734375, 0.31298828125, -1.1220703125, 0.5517578125, - 0.93505859375, 0.45166015625, 1.951171875, 0.65478515625, - 1.30859375, 1.0859375, 0.56494140625, 2.322265625, - 0.242919921875, 1.81640625, -0.469970703125, -0.841796875, - 0.90869140625, 1.5361328125, 0.923828125, 1.0595703125, - 0.356689453125, -0.46142578125, 2.134765625, 1.3037109375, - -0.32373046875, -9.2265625, 0.4521484375, 0.88037109375, - -0.53955078125, 0.96484375, 0.7705078125, 0.84521484375, - 1.580078125, -0.1448974609375, 0.7607421875, 1.0166015625, - -0.086669921875, 1.611328125, 0.05938720703125, 0.5078125, - 0.8427734375, 2.431640625, 0.66357421875, 3.203125, - 0.132080078125, 0.461181640625, 0.779296875, 1.9482421875, - 1.8720703125, 0.845703125, -1.3837890625, -0.138916015625, - 0.35546875, 0.2457275390625, 0.75341796875, 1.828125, - 1.4169921875, 0.60791015625, 1.0068359375, 1.109375, - 0.484130859375, -0.302001953125, 0.4951171875, 0.802734375, - 1.9482421875, 0.916015625, 0.1646728515625, 2.599609375, - 1.7177734375, -0.2374267578125, 0.98046875, 0.39306640625, - -1.1396484375, 1.6533203125, 0.375244140625], "scores": - [0.09906005859375, 0.12213134765625, 0.05035400390625, - 0.16552734375, 0.0037384033203125, 0.010101318359375, - 0.058197021484375, 0.10552978515625, 0.1141357421875, - 0.115234375, 0.00444793701171875, 0.00812530517578125, - 0.0343017578125, 0.009002685546875, 0.087158203125, - 0.00818634033203125, 0.003238677978515625, 0.024169921875, - 0.00702667236328125, 0.00936126708984375, - 0.00632476806640625, 0.0293731689453125, 0.01800537109375, - 0.0088348388671875, 0.013397216796875, 0.02557373046875, - 0.12109375, 0.004413604736328125, 0.016571044921875, - 0.00274658203125, 0.2142333984375, 0.29638671875, - 0.16015625, 0.007843017578125, 0.007160186767578125, - 0.00508880615234375, 0.054229736328125, 0.048736572265625, - 0.026397705078125, 0.0265350341796875, 0.051361083984375, - 0.018310546875, 0.00974273681640625, 0.0218963623046875, - 0.216064453125, 0.021728515625, 0.0173797607421875, - 0.004520416259765625], "stoks": [52, 52, 52, 52, 52, 41, 41, - 374, 303, 303, 303, 427], "time": {"begin_ms": 640, - "end_ms": 1140}}]} - - id: 7645a0d1-2e64-410d-83a8-b96040432e9a - chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 - timestamp: 1716244957031 - role: AGENT - type: AGENT_MESSAGE - message_text: Hello! - emotion_features: >- - {"Admiration": 0.044921875, "Adoration": 0.0253753662109375, - "Aesthetic Appreciation": 0.03265380859375, "Amusement": - 0.118408203125, "Anger": 0.06719970703125, "Anxiety": - 0.0411376953125, "Awe": 0.03802490234375, "Awkwardness": - 0.056549072265625, "Boredom": 0.04217529296875, "Calmness": - 0.08709716796875, "Concentration": 0.070556640625, - "Confusion": 0.06964111328125, "Contemplation": - 0.0343017578125, "Contempt": 0.037689208984375, - "Contentment": 0.059417724609375, "Craving": - 0.01132965087890625, "Desire": 0.01406097412109375, - "Determination": 0.1143798828125, "Disappointment": - 0.051177978515625, "Disgust": 0.028594970703125, "Distress": - 0.054901123046875, "Doubt": 0.04638671875, "Ecstasy": - 0.0258026123046875, "Embarrassment": 0.0222015380859375, - "Empathic Pain": 0.015777587890625, "Entrancement": - 0.0160980224609375, "Envy": 0.0163421630859375, - "Excitement": 0.129638671875, "Fear": 0.03125, "Guilt": - 0.01483917236328125, "Horror": 0.0194549560546875, - "Interest": 0.1341552734375, "Joy": 0.0738525390625, "Love": - 0.0216522216796875, "Nostalgia": 0.0210418701171875, "Pain": - 0.020721435546875, "Pride": 0.05499267578125, "Realization": - 0.0728759765625, "Relief": 0.04052734375, "Romance": - 0.0129241943359375, "Sadness": 0.0254669189453125, - "Satisfaction": 0.07159423828125, "Shame": 0.01495361328125, - "Surprise (negative)": 0.05560302734375, "Surprise - (positive)": 0.07965087890625, "Sympathy": - 0.022247314453125, "Tiredness": 0.0194549560546875, - "Triumph": 0.04107666015625} - metadata: "" - page_number: 0 - page_size: 3 - total_pages: 1 - end_timestamp: 1716244958546 - metadata: "" - config: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + ascending_order: + type: optional + docs: >- + Specifies the sorting order of the results based on their creation + date. Set to true for ascending order (chronological, with the + oldest records first) and false for descending order + (reverse-chronological, with the newest records first). Defaults + to true. + response: + docs: Success + type: root.ReturnChatPagedEvents + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + query-parameters: + page_number: 0 + page_size: 3 + ascending_order: true + response: + body: + id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + chat_group_id: 9fc18597-3567-42d5-94d6-935bde84bf2f + status: USER_ENDED + start_timestamp: 1716244940648 + pagination_direction: ASC + events_page: + - id: 5d44bdbb-49a3-40fb-871d-32bf7e76efe7 + chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + timestamp: 1716244940762 + role: SYSTEM + type: SYSTEM_PROMPT + message_text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + emotion_features: '' + metadata: '' + - id: 5976ddf6-d093-4bb9-ba60-8f6c25832dde + chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + timestamp: 1716244956278 + role: USER + type: USER_MESSAGE + message_text: Hello. + emotion_features: >- + {"Admiration": 0.09906005859375, "Adoration": + 0.12213134765625, "Aesthetic Appreciation": + 0.05035400390625, "Amusement": 0.16552734375, "Anger": + 0.0037384033203125, "Anxiety": 0.010101318359375, "Awe": + 0.058197021484375, "Awkwardness": 0.10552978515625, + "Boredom": 0.1141357421875, "Calmness": 0.115234375, + "Concentration": 0.00444793701171875, "Confusion": + 0.0343017578125, "Contemplation": 0.00812530517578125, + "Contempt": 0.009002685546875, "Contentment": + 0.087158203125, "Craving": 0.00818634033203125, "Desire": + 0.018310546875, "Determination": 0.003238677978515625, + "Disappointment": 0.024169921875, "Disgust": + 0.00702667236328125, "Distress": 0.00936126708984375, + "Doubt": 0.00632476806640625, "Ecstasy": 0.0293731689453125, + "Embarrassment": 0.01800537109375, "Empathic Pain": + 0.0088348388671875, "Entrancement": 0.013397216796875, + "Envy": 0.02557373046875, "Excitement": 0.12109375, "Fear": + 0.004413604736328125, "Guilt": 0.016571044921875, "Horror": + 0.00274658203125, "Interest": 0.2142333984375, "Joy": + 0.29638671875, "Love": 0.16015625, "Nostalgia": + 0.007843017578125, "Pain": 0.007160186767578125, "Pride": + 0.00508880615234375, "Realization": 0.054229736328125, + "Relief": 0.048736572265625, "Romance": 0.026397705078125, + "Sadness": 0.0265350341796875, "Satisfaction": + 0.051361083984375, "Shame": 0.00974273681640625, "Surprise + (negative)": 0.0218963623046875, "Surprise (positive)": + 0.216064453125, "Sympathy": 0.021728515625, "Tiredness": + 0.0173797607421875, "Triumph": 0.004520416259765625} + metadata: >- + {"segments": [{"content": "Hello.", "embedding": + [0.6181640625, 0.1763916015625, -30.921875, 1.2705078125, + 0.927734375, 0.63720703125, 2.865234375, 0.1080322265625, + 0.2978515625, 1.0107421875, 1.34375, 0.74560546875, + 0.416259765625, 0.99462890625, -0.333740234375, + 0.361083984375, -1.388671875, 1.0107421875, 1.3173828125, + 0.55615234375, 0.541015625, -0.1837158203125, 1.697265625, + 0.228515625, 2.087890625, -0.311767578125, + 0.053680419921875, 1.3349609375, 0.95068359375, + 0.00441741943359375, 0.705078125, 1.8916015625, + -0.939453125, 0.93701171875, -0.28955078125, 1.513671875, + 0.5595703125, 1.0126953125, -0.1624755859375, 1.4072265625, + -0.28857421875, -0.4560546875, -0.1500244140625, + -0.1102294921875, -0.222412109375, 0.8779296875, + 1.275390625, 1.6689453125, 0.80712890625, -0.34814453125, + -0.325439453125, 0.412841796875, 0.81689453125, + 0.55126953125, 1.671875, 0.6611328125, 0.7451171875, + 1.50390625, 1.0224609375, -1.671875, 0.7373046875, + 2.1328125, 2.166015625, 0.41015625, -0.127685546875, + 1.9345703125, -4.2734375, 0.332275390625, 0.26171875, + 0.76708984375, 0.2685546875, 0.468017578125, 1.208984375, + -1.517578125, 1.083984375, 0.84814453125, 1.0244140625, + -0.0072174072265625, 1.34375, 1.0712890625, 1.517578125, + -0.52001953125, 0.59228515625, 0.8154296875, -0.951171875, + -0.07757568359375, 1.3330078125, 1.125, 0.61181640625, + 1.494140625, 0.357421875, 1.1796875, 1.482421875, 0.8046875, + 0.1536865234375, 1.8076171875, 0.68115234375, -15.171875, + 1.2294921875, 0.319091796875, 0.499755859375, 1.5771484375, + 0.94677734375, -0.2490234375, 0.88525390625, 3.47265625, + 0.75927734375, 0.71044921875, 1.2333984375, 1.4169921875, + -0.56640625, -1.8095703125, 1.37109375, 0.428955078125, + 1.89453125, -0.39013671875, 0.1734619140625, 1.5595703125, + -1.2294921875, 2.552734375, 0.58349609375, 0.2156982421875, + -0.00984954833984375, -0.6865234375, -0.0272979736328125, + -0.2264404296875, 2.853515625, 1.3896484375, 0.52978515625, + 0.783203125, 3.0390625, 0.75537109375, 0.219970703125, + 0.384521484375, 0.385986328125, 2.0546875, + -0.10443115234375, 1.5146484375, 1.4296875, 1.9716796875, + 1.1318359375, 0.31591796875, 0.338623046875, 1.654296875, + -0.88037109375, -0.21484375, 1.45703125, 1.0380859375, + -0.52294921875, -0.47802734375, 0.1650390625, 1.2392578125, + -1.138671875, 0.56787109375, 1.318359375, 0.4287109375, + 0.1981201171875, 2.4375, 0.281005859375, 0.89404296875, + -0.1552734375, 0.6474609375, -0.08331298828125, + 0.00740814208984375, -0.045501708984375, -0.578125, + 2.02734375, 0.59228515625, 0.35693359375, 1.2919921875, + 1.22265625, 1.0537109375, 0.145263671875, 1.05859375, + -0.369140625, 0.207275390625, 0.78857421875, 0.599609375, + 0.99072265625, 0.24462890625, 1.26953125, 0.08404541015625, + 1.349609375, 0.73291015625, 1.3212890625, 0.388916015625, + 1.0869140625, 0.9931640625, -1.5673828125, 0.0462646484375, + 0.650390625, 0.253662109375, 0.58251953125, 1.8134765625, + 0.8642578125, 2.591796875, 0.7314453125, 0.85986328125, + 0.5615234375, 0.9296875, 0.04144287109375, 1.66015625, + 1.99609375, 1.171875, 1.181640625, 1.5126953125, + 0.0224456787109375, 0.58349609375, -1.4931640625, + 0.81884765625, 0.732421875, -0.6455078125, -0.62451171875, + 1.7802734375, 0.01526641845703125, -0.423095703125, + 0.461669921875, 4.87890625, 1.2392578125, -0.6953125, + 0.6689453125, 0.62451171875, -1.521484375, 1.7685546875, + 0.810546875, 0.65478515625, 0.26123046875, 1.6396484375, + 0.87548828125, 1.7353515625, 2.046875, 1.5634765625, + 0.69384765625, 1.375, 0.8916015625, 1.0107421875, + 0.1304931640625, 2.009765625, 0.06402587890625, + -0.08428955078125, 0.04351806640625, -1.7529296875, + 2.02734375, 3.521484375, 0.404541015625, 1.6337890625, + -0.276611328125, 0.8837890625, -0.1287841796875, + 0.91064453125, 0.8193359375, 0.701171875, 0.036529541015625, + 1.26171875, 1.0478515625, -0.1422119140625, 1.0634765625, + 0.61083984375, 1.3505859375, 1.208984375, 0.57275390625, + 1.3623046875, 2.267578125, 0.484375, 0.9150390625, + 0.56787109375, -0.70068359375, 0.27587890625, + -0.70654296875, 0.8466796875, 0.57568359375, 1.6162109375, + 0.87939453125, 2.248046875, -0.5458984375, 1.7744140625, + 1.328125, 1.232421875, 0.6806640625, 0.9365234375, + 1.052734375, -1.08984375, 1.8330078125, -0.4033203125, + 1.0673828125, 0.297607421875, 1.5703125, 1.67578125, + 1.34765625, 2.8203125, 2.025390625, -0.48583984375, + 0.7626953125, 0.01007843017578125, 1.435546875, + 0.007205963134765625, 0.05157470703125, -0.9853515625, + 0.26708984375, 1.16796875, 1.2041015625, 1.99609375, + -0.07916259765625, 1.244140625, -0.32080078125, + 0.6748046875, 0.419921875, 1.3212890625, 1.291015625, + 0.599609375, 0.0550537109375, 0.9599609375, 0.93505859375, + 0.111083984375, 1.302734375, 0.0833740234375, 2.244140625, + 1.25390625, 1.6015625, 0.58349609375, 1.7568359375, + -0.263427734375, -0.019866943359375, -0.24658203125, + -0.1871337890625, 0.927734375, 0.62255859375, + 0.275146484375, 0.79541015625, 1.1796875, 1.1767578125, + -0.26123046875, -0.268310546875, 1.8994140625, 1.318359375, + 2.1875, 0.2469482421875, 1.41015625, 0.03973388671875, + 1.2685546875, 1.1025390625, 0.9560546875, 0.865234375, + -1.92578125, 1.154296875, 0.389892578125, 1.130859375, + 0.95947265625, 0.72314453125, 2.244140625, + 0.048553466796875, 0.626953125, 0.42919921875, + 0.82275390625, 0.311767578125, -0.320556640625, + 0.01041412353515625, 0.1483154296875, 0.10809326171875, + -0.3173828125, 1.1337890625, -0.8642578125, 1.4033203125, + 0.048828125, 1.1787109375, 0.98779296875, 1.818359375, + 1.1552734375, 0.6015625, 1.2392578125, -1.2685546875, + 0.39208984375, 0.83251953125, 0.224365234375, + 0.0019989013671875, 0.87548828125, 1.6572265625, + 1.107421875, 0.434814453125, 1.8251953125, 0.442626953125, + 1.2587890625, 0.09320068359375, -0.896484375, 1.8017578125, + 1.451171875, -0.0755615234375, 0.6083984375, 2.06640625, + 0.673828125, -0.33740234375, 0.192138671875, 0.21435546875, + 0.80224609375, -1.490234375, 0.9501953125, 0.86083984375, + -0.40283203125, 4.109375, 2.533203125, 1.2529296875, + 0.8271484375, 0.225830078125, 1.0478515625, -1.9755859375, + 0.841796875, 0.392822265625, 0.525390625, 0.33935546875, + -0.79443359375, 0.71630859375, 0.97998046875, + -0.175537109375, 0.97705078125, 1.705078125, 0.29638671875, + 0.68359375, 0.54150390625, 0.435791015625, 0.99755859375, + -0.369140625, 1.009765625, -0.140380859375, 0.426513671875, + 0.189697265625, 1.8193359375, 1.1201171875, -0.5009765625, + -0.331298828125, 0.759765625, -0.09442138671875, 0.74609375, + -1.947265625, 1.3544921875, -3.935546875, 2.544921875, + 1.359375, 0.1363525390625, 0.79296875, 0.79931640625, + -0.3466796875, 1.1396484375, -0.33447265625, 2.0078125, + -0.241455078125, 0.6318359375, 0.365234375, 0.296142578125, + 0.830078125, 1.0458984375, 0.5830078125, 0.61572265625, + 14.0703125, -2.0078125, -0.381591796875, 1.228515625, + 0.08282470703125, -0.67822265625, -0.04339599609375, + 0.397216796875, 0.1656494140625, 0.137451171875, + 0.244873046875, 1.1611328125, -1.3818359375, 0.8447265625, + 1.171875, 0.36328125, 0.252685546875, 0.1197509765625, + 0.232177734375, -0.020172119140625, 0.64404296875, + -0.01100921630859375, -1.9267578125, 0.222412109375, + 0.56005859375, 1.3046875, 1.1630859375, 1.197265625, + 1.02734375, 1.6806640625, -0.043731689453125, 1.4697265625, + 0.81201171875, 1.5390625, 1.240234375, -0.7353515625, + 1.828125, 1.115234375, 1.931640625, -0.517578125, + 0.77880859375, 1.0546875, 0.95361328125, 3.42578125, + 0.0160369873046875, 0.875, 0.56005859375, 1.2421875, + 1.986328125, 1.4814453125, 0.0948486328125, 1.115234375, + 0.00665283203125, 2.09375, 0.3544921875, -0.52783203125, + 1.2099609375, 0.45068359375, 0.65625, 0.1112060546875, + 1.0751953125, -0.9521484375, -0.30029296875, 1.4462890625, + 2.046875, 3.212890625, 1.68359375, 1.07421875, + -0.5263671875, 0.74560546875, 1.37890625, 0.15283203125, + 0.2440185546875, 0.62646484375, -0.1280517578125, + 0.7646484375, -0.515625, -0.35693359375, 1.2958984375, + 0.96923828125, 0.58935546875, 1.3701171875, 1.0673828125, + 0.2337646484375, 0.93115234375, 0.66357421875, 6.0, + 1.1025390625, -0.51708984375, -0.38330078125, 0.7197265625, + 0.246826171875, -0.45166015625, 1.9521484375, 0.5546875, + 0.08807373046875, 0.18505859375, 0.8857421875, + -0.57177734375, 0.251708984375, 0.234375, 2.57421875, + 0.9599609375, 0.5029296875, 0.10382080078125, + 0.08331298828125, 0.66748046875, -0.349609375, 1.287109375, + 0.259765625, 2.015625, 2.828125, -0.3095703125, + -0.164306640625, -0.3408203125, 0.486572265625, + 0.8466796875, 1.9130859375, 0.09088134765625, 0.66552734375, + 0.00972747802734375, -0.83154296875, 1.755859375, + 0.654296875, 0.173828125, 0.27587890625, -0.47607421875, + -0.264404296875, 0.7529296875, 0.6533203125, 0.7275390625, + 0.499755859375, 0.833984375, -0.44775390625, -0.05078125, + -0.454833984375, 0.75439453125, 0.68505859375, + 0.210693359375, -0.283935546875, -0.53564453125, + 0.96826171875, 0.861328125, -3.33984375, -0.26171875, + 0.77734375, 0.26513671875, -0.14111328125, -0.042236328125, + -0.84814453125, 0.2137451171875, 0.94921875, 0.65185546875, + -0.5380859375, 0.1529541015625, -0.360595703125, + -0.0333251953125, -0.69189453125, 0.8974609375, 0.7109375, + 0.81494140625, -0.259521484375, 1.1904296875, 0.62158203125, + 1.345703125, 0.89404296875, 0.70556640625, 1.0673828125, + 1.392578125, 0.5068359375, 0.962890625, 0.736328125, + 1.55078125, 0.50390625, -0.398681640625, 2.361328125, + 0.345947265625, -0.61962890625, 0.330078125, 0.75439453125, + -0.673828125, -0.2379150390625, 1.5673828125, 1.369140625, + 0.1119384765625, -0.1834716796875, 1.4599609375, + -0.77587890625, 0.5556640625, 0.09954833984375, + 0.0285186767578125, 0.58935546875, -0.501953125, + 0.212890625, 0.02679443359375, 0.1715087890625, + 0.03466796875, -0.564453125, 2.029296875, 2.45703125, + -0.72216796875, 2.138671875, 0.50830078125, + -0.09356689453125, 0.230224609375, 1.6943359375, + 1.5126953125, 0.39453125, 0.411376953125, 1.07421875, + -0.8046875, 0.51416015625, 0.2271728515625, -0.283447265625, + 0.38427734375, 0.73388671875, 0.6962890625, 1.4990234375, + 0.02813720703125, 0.40478515625, 1.2451171875, 1.1162109375, + -5.5703125, 0.76171875, 0.322021484375, 1.0361328125, + 1.197265625, 0.1163330078125, 0.2425537109375, 1.5595703125, + 1.5791015625, -0.0921630859375, 0.484619140625, + 1.9052734375, 5.31640625, 1.6337890625, 0.95947265625, + -0.1751708984375, 0.466552734375, 0.8330078125, 1.03125, + 0.2044677734375, 0.31298828125, -1.1220703125, 0.5517578125, + 0.93505859375, 0.45166015625, 1.951171875, 0.65478515625, + 1.30859375, 1.0859375, 0.56494140625, 2.322265625, + 0.242919921875, 1.81640625, -0.469970703125, -0.841796875, + 0.90869140625, 1.5361328125, 0.923828125, 1.0595703125, + 0.356689453125, -0.46142578125, 2.134765625, 1.3037109375, + -0.32373046875, -9.2265625, 0.4521484375, 0.88037109375, + -0.53955078125, 0.96484375, 0.7705078125, 0.84521484375, + 1.580078125, -0.1448974609375, 0.7607421875, 1.0166015625, + -0.086669921875, 1.611328125, 0.05938720703125, 0.5078125, + 0.8427734375, 2.431640625, 0.66357421875, 3.203125, + 0.132080078125, 0.461181640625, 0.779296875, 1.9482421875, + 1.8720703125, 0.845703125, -1.3837890625, -0.138916015625, + 0.35546875, 0.2457275390625, 0.75341796875, 1.828125, + 1.4169921875, 0.60791015625, 1.0068359375, 1.109375, + 0.484130859375, -0.302001953125, 0.4951171875, 0.802734375, + 1.9482421875, 0.916015625, 0.1646728515625, 2.599609375, + 1.7177734375, -0.2374267578125, 0.98046875, 0.39306640625, + -1.1396484375, 1.6533203125, 0.375244140625], "scores": + [0.09906005859375, 0.12213134765625, 0.05035400390625, + 0.16552734375, 0.0037384033203125, 0.010101318359375, + 0.058197021484375, 0.10552978515625, 0.1141357421875, + 0.115234375, 0.00444793701171875, 0.00812530517578125, + 0.0343017578125, 0.009002685546875, 0.087158203125, + 0.00818634033203125, 0.003238677978515625, 0.024169921875, + 0.00702667236328125, 0.00936126708984375, + 0.00632476806640625, 0.0293731689453125, 0.01800537109375, + 0.0088348388671875, 0.013397216796875, 0.02557373046875, + 0.12109375, 0.004413604736328125, 0.016571044921875, + 0.00274658203125, 0.2142333984375, 0.29638671875, + 0.16015625, 0.007843017578125, 0.007160186767578125, + 0.00508880615234375, 0.054229736328125, 0.048736572265625, + 0.026397705078125, 0.0265350341796875, 0.051361083984375, + 0.018310546875, 0.00974273681640625, 0.0218963623046875, + 0.216064453125, 0.021728515625, 0.0173797607421875, + 0.004520416259765625], "stoks": [52, 52, 52, 52, 52, 41, 41, + 374, 303, 303, 303, 427], "time": {"begin_ms": 640, + "end_ms": 1140}}]} + - id: 7645a0d1-2e64-410d-83a8-b96040432e9a + chat_id: 470a49f6-1dec-4afe-8b61-035d3b2d63b0 + timestamp: 1716244957031 + role: AGENT + type: AGENT_MESSAGE + message_text: Hello! + emotion_features: >- + {"Admiration": 0.044921875, "Adoration": 0.0253753662109375, + "Aesthetic Appreciation": 0.03265380859375, "Amusement": + 0.118408203125, "Anger": 0.06719970703125, "Anxiety": + 0.0411376953125, "Awe": 0.03802490234375, "Awkwardness": + 0.056549072265625, "Boredom": 0.04217529296875, "Calmness": + 0.08709716796875, "Concentration": 0.070556640625, + "Confusion": 0.06964111328125, "Contemplation": + 0.0343017578125, "Contempt": 0.037689208984375, + "Contentment": 0.059417724609375, "Craving": + 0.01132965087890625, "Desire": 0.01406097412109375, + "Determination": 0.1143798828125, "Disappointment": + 0.051177978515625, "Disgust": 0.028594970703125, "Distress": + 0.054901123046875, "Doubt": 0.04638671875, "Ecstasy": + 0.0258026123046875, "Embarrassment": 0.0222015380859375, + "Empathic Pain": 0.015777587890625, "Entrancement": + 0.0160980224609375, "Envy": 0.0163421630859375, + "Excitement": 0.129638671875, "Fear": 0.03125, "Guilt": + 0.01483917236328125, "Horror": 0.0194549560546875, + "Interest": 0.1341552734375, "Joy": 0.0738525390625, "Love": + 0.0216522216796875, "Nostalgia": 0.0210418701171875, "Pain": + 0.020721435546875, "Pride": 0.05499267578125, "Realization": + 0.0728759765625, "Relief": 0.04052734375, "Romance": + 0.0129241943359375, "Sadness": 0.0254669189453125, + "Satisfaction": 0.07159423828125, "Shame": 0.01495361328125, + "Surprise (negative)": 0.05560302734375, "Surprise + (positive)": 0.07965087890625, "Sympathy": + 0.022247314453125, "Tiredness": 0.0194549560546875, + "Triumph": 0.04107666015625} + metadata: '' + page_number: 0 + page_size: 3 + total_pages: 1 + end_timestamp: 1716244958546 + metadata: '' + config: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 0 + source: + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/configs.yml b/.mock/definition/empathic-voice/configs.yml index 38d59cb0..95a6c081 100644 --- a/.mock/definition/empathic-voice/configs.yml +++ b/.mock/definition/empathic-voice/configs.yml @@ -1,699 +1,813 @@ imports: - root: __package__.yml + root: __package__.yml service: - auth: false - base-path: "" - endpoints: - list-configs: - path: /v0/evi/configs - method: GET - auth: true - display-name: List configs - request: - name: ConfigsListConfigsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. + auth: false + base-path: '' + endpoints: + list-configs: + path: /v0/evi/configs + method: GET + auth: true + display-name: List configs + request: + name: ConfigsListConfigsRequest + query-parameters: + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each config. To include all versions of - each config in the list, set `restrict_to_most_recent` to false. - name: - type: optional - docs: Filter to only include configs with this name. - response: - docs: Success - type: root.ReturnPagedConfigs - examples: - - query-parameters: - page_number: 0 - page_size: 1 - response: - body: - page_number: 0 - page_size: 1 - total_pages: 1 - configs_page: - - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - version_description: "" - name: Weather Assistant Config - created_on: 1715267200693 - modified_on: 1715267200693 - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to - user queries concisely and clearly. Use simple language - and avoid technical jargon. Provide temperature, - precipitation, wind conditions, and any weather alerts. - Include helpful tips if severe weather is expected. - voice: - provider: HUME_AI - name: KORA - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - create-config: - path: /v0/evi/configs - method: POST - auth: true - display-name: Create config - request: - name: PostedConfig - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Config. - version_description: - type: optional - docs: An optional description of the Config version. - prompt: optional - voice: - type: optional - docs: A voice specification associated with this Config. - language_model: - type: optional - docs: >- - The supplemental language model associated with this Config. + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + restrict_to_most_recent: + type: optional + docs: >- + By default, `restrict_to_most_recent` is set to true, returning + only the latest version of each config. To include all versions of + each config in the list, set `restrict_to_most_recent` to false. + name: + type: optional + docs: Filter to only include configs with this name. + response: + docs: Success + type: root.ReturnPagedConfigs + errors: + - root.BadRequestError + examples: + - query-parameters: + page_number: 0 + page_size: 1 + response: + body: + page_number: 0 + page_size: 1 + total_pages: 1 + configs_page: + - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 0 + version_description: '' + name: Weather Assistant Config + created_on: 1715267200693 + modified_on: 1715267200693 + evi_version: '2' + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to + user queries concisely and clearly. Use simple language + and avoid technical jargon. Provide temperature, + precipitation, wind conditions, and any weather alerts. + Include helpful tips if severe weather is expected. + voice: + provider: HUME_AI + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + ellm_model: + allow_short_responses: false + tools: [] + builtin_tools: [] + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + timeouts: + inactivity: + enabled: true + duration_secs: 600 + max_duration: + enabled: true + duration_secs: 1800 + create-config: + path: /v0/evi/configs + method: POST + auth: true + display-name: Create config + request: + name: PostedConfig + body: + properties: + evi_version: + type: string + docs: >- + Specifies the EVI version to use. Use `"1"` for version 1, or + `"2"` for the latest enhanced version. For a detailed comparison + of the two versions, refer to our + [guide](/docs/empathic-voice-interface-evi/evi-2). + name: + type: string + docs: Name applied to all versions of a particular Config. + version_description: + type: optional + docs: An optional description of the Config version. + prompt: optional + voice: + type: optional + docs: A voice specification associated with this Config. + language_model: + type: optional + docs: >- + The supplemental language model associated with this Config. - This model is used to generate longer, more detailed responses - from EVI. Choosing an appropriate supplemental language model - for your use case is crucial for generating fast, high-quality - responses from EVI. - ellm_model: - type: optional - docs: >- - The eLLM setup associated with this Config. + This model is used to generate longer, more detailed responses + from EVI. Choosing an appropriate supplemental language model + for your use case is crucial for generating fast, high-quality + responses from EVI. + ellm_model: + type: optional + docs: >- + The eLLM setup associated with this Config. - Hume's eLLM (empathic Large Language Model) is a multimodal - language model that takes into account both expression measures - and language. The eLLM generates short, empathic language - responses and guides text-to-speech (TTS) prosody. - tools: - type: optional>> - docs: List of user-defined tools associated with this Config. - builtin_tools: - type: optional>> - docs: List of built-in tools associated with this Config. - event_messages: optional - timeouts: optional - response: - docs: Created - type: root.ReturnConfig - examples: - - request: - name: Weather Assistant Config - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - voice: - provider: HUME_AI - name: KORA - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - version_description: "" - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: KORA - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - list-config-versions: - path: /v0/evi/configs/{id} - method: GET - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: List config versions - request: - name: ConfigsListConfigVersionsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. + Hume's eLLM (empathic Large Language Model) is a multimodal + language model that takes into account both expression measures + and language. The eLLM generates short, empathic language + responses and guides text-to-speech (TTS) prosody. + tools: + type: optional>> + docs: List of user-defined tools associated with this Config. + builtin_tools: + type: optional>> + docs: List of built-in tools associated with this Config. + event_messages: optional + timeouts: optional + response: + docs: Created + type: root.ReturnConfig + errors: + - root.BadRequestError + examples: + - request: + name: Weather Assistant Config + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + evi_version: '2' + voice: + provider: HUME_AI + name: SAMPLE VOICE + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + response: + body: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 0 + version_description: '' + name: Weather Assistant Config + created_on: 1715275452390 + modified_on: 1715275452390 + evi_version: '2' + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + voice: + provider: HUME_AI + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + ellm_model: + allow_short_responses: false + tools: [] + builtin_tools: [] + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + timeouts: + inactivity: + enabled: true + duration_secs: 600 + max_duration: + enabled: true + duration_secs: 1800 + list-config-versions: + path: /v0/evi/configs/{id} + method: GET + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + display-name: List config versions + request: + name: ConfigsListConfigVersionsRequest + query-parameters: + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each config. To include all versions of - each config in the list, set `restrict_to_most_recent` to false. - response: - docs: Success - type: root.ReturnPagedConfigs - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - response: - body: - page_number: 0 - page_size: 10 - total_pages: 1 - configs_page: - - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 0 - version_description: "" - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to - user queries concisely and clearly. Use simple language - and avoid technical jargon. Provide temperature, - precipitation, wind conditions, and any weather alerts. - Include helpful tips if severe weather is expected. - voice: - provider: HUME_AI - name: KORA - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - create-config-version: - path: /v0/evi/configs/{id} - method: POST - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: Create config version - request: - name: PostedConfigVersion - body: - properties: - version_description: - type: optional - docs: An optional description of the Config version. - prompt: optional - voice: - type: optional - docs: A voice specification associated with this Config version. - language_model: - type: optional - docs: >- - The supplemental language model associated with this Config - version. + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + restrict_to_most_recent: + type: optional + docs: >- + By default, `restrict_to_most_recent` is set to true, returning + only the latest version of each config. To include all versions of + each config in the list, set `restrict_to_most_recent` to false. + response: + docs: Success + type: root.ReturnPagedConfigs + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + response: + body: + page_number: 0 + page_size: 10 + total_pages: 1 + configs_page: + - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 0 + version_description: '' + name: Weather Assistant Config + created_on: 1715275452390 + modified_on: 1715275452390 + evi_version: '2' + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to + user queries concisely and clearly. Use simple language + and avoid technical jargon. Provide temperature, + precipitation, wind conditions, and any weather alerts. + Include helpful tips if severe weather is expected. + voice: + provider: HUME_AI + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + ellm_model: + allow_short_responses: false + tools: [] + builtin_tools: [] + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + timeouts: + inactivity: + enabled: true + duration_secs: 600 + max_duration: + enabled: true + duration_secs: 1800 + create-config-version: + path: /v0/evi/configs/{id} + method: POST + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + display-name: Create config version + request: + name: PostedConfigVersion + body: + properties: + evi_version: + type: string + docs: The version of the EVI used with this config. + version_description: + type: optional + docs: An optional description of the Config version. + prompt: optional + voice: + type: optional + docs: A voice specification associated with this Config version. + language_model: + type: optional + docs: >- + The supplemental language model associated with this Config + version. - This model is used to generate longer, more detailed responses - from EVI. Choosing an appropriate supplemental language model - for your use case is crucial for generating fast, high-quality - responses from EVI. - ellm_model: - type: optional - docs: >- - The eLLM setup associated with this Config version. + This model is used to generate longer, more detailed responses + from EVI. Choosing an appropriate supplemental language model + for your use case is crucial for generating fast, high-quality + responses from EVI. + ellm_model: + type: optional + docs: >- + The eLLM setup associated with this Config version. - Hume's eLLM (empathic Large Language Model) is a multimodal - language model that takes into account both expression measures - and language. The eLLM generates short, empathic language - responses and guides text-to-speech (TTS) prosody. - tools: - type: optional>> - docs: List of user-defined tools associated with this Config version. - builtin_tools: - type: optional>> - docs: List of built-in tools associated with this Config version. - event_messages: optional - timeouts: optional - response: - docs: Created - type: root.ReturnConfig - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - request: - version_description: This is an updated version of the Weather Assistant Config. - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - voice: - provider: HUME_AI - name: ITO - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - ellm_model: - allow_short_responses: true - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - version_description: This is an updated version of the Weather Assistant Config. - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1722642242998 - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: ITO - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - ellm_model: - allow_short_responses: true - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - delete-config: - path: /v0/evi/configs/{id} - method: DELETE - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: Delete config - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - update-config-name: - path: /v0/evi/configs/{id} - method: PATCH - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - display-name: Update config name - request: - name: PostedConfigName - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Config. - response: - docs: Success - type: text - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - request: - name: Updated Weather Assistant Config Name - get-config-version: - path: /v0/evi/configs/{id}/version/{version} - method: GET - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Config. + Hume's eLLM (empathic Large Language Model) is a multimodal + language model that takes into account both expression measures + and language. The eLLM generates short, empathic language + responses and guides text-to-speech (TTS) prosody. + tools: + type: optional>> + docs: List of user-defined tools associated with this Config version. + builtin_tools: + type: optional>> + docs: List of built-in tools associated with this Config version. + event_messages: optional + timeouts: optional + response: + docs: Created + type: root.ReturnConfig + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + request: + version_description: This is an updated version of the Weather Assistant Config. + evi_version: '2' + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + voice: + provider: HUME_AI + name: ITO + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + ellm_model: + allow_short_responses: true + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + response: + body: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 1 + version_description: This is an updated version of the Weather Assistant Config. + name: Weather Assistant Config + created_on: 1715275452390 + modified_on: 1722642242998 + evi_version: '2' + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + voice: + provider: HUME_AI + name: ITO + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + ellm_model: + allow_short_responses: true + tools: [] + builtin_tools: [] + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + timeouts: + inactivity: + enabled: true + duration_secs: 600 + max_duration: + enabled: true + duration_secs: 1800 + delete-config: + path: /v0/evi/configs/{id} + method: DELETE + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + display-name: Delete config + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + update-config-name: + path: /v0/evi/configs/{id} + method: PATCH + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + display-name: Update config name + request: + name: PostedConfigName + body: + properties: + name: + type: string + docs: Name applied to all versions of a particular Config. + response: + docs: Success + type: text + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + request: + name: Updated Weather Assistant Config Name + get-config-version: + path: /v0/evi/configs/{id}/version/{version} + method: GET + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions - if needed. + Configs, Prompts, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions + if needed. - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - display-name: Get config version - response: - docs: Success - type: root.ReturnConfig - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - version_description: "" - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: KORA - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - delete-config-version: - path: /v0/evi/configs/{id}/version/{version} - method: DELETE - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Config. + Version numbers are integer values representing different iterations + of the Config. Each update to the Config increments its version + number. + display-name: Get config version + response: + docs: Success + type: root.ReturnConfig + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 1 + response: + body: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 1 + version_description: '' + name: Weather Assistant Config + created_on: 1715275452390 + modified_on: 1715275452390 + evi_version: '2' + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + voice: + provider: HUME_AI + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + ellm_model: + allow_short_responses: false + tools: [] + builtin_tools: [] + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + timeouts: + inactivity: + enabled: true + duration_secs: 600 + max_duration: + enabled: true + duration_secs: 1800 + delete-config-version: + path: /v0/evi/configs/{id}/version/{version} + method: DELETE + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions - if needed. + Configs, Prompts, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions + if needed. - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - display-name: Delete config version - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - update-config-description: - path: /v0/evi/configs/{id}/version/{version} - method: PATCH - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Config. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Config. + Version numbers are integer values representing different iterations + of the Config. Each update to the Config increments its version + number. + display-name: Delete config version + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 1 + update-config-description: + path: /v0/evi/configs/{id}/version/{version} + method: PATCH + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Config. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Config. - Configs, as well as Prompts and Tools, are versioned. This - versioning system supports iterative development, allowing you to - progressively refine configurations and revert to previous versions - if needed. + Configs, Prompts, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine configurations and revert to previous versions + if needed. - Version numbers are integer values representing different iterations - of the Config. Each update to the Config increments its version - number. - display-name: Update config description - request: - name: PostedConfigVersionDescription - body: - properties: - version_description: - type: optional - docs: An optional description of the Config version. - response: - docs: Success - type: root.ReturnConfig - examples: - - path-parameters: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - request: - version_description: This is an updated version_description. - response: - body: - id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 - version: 1 - version_description: This is an updated version_description. - name: Weather Assistant Config - created_on: 1715275452390 - modified_on: 1715275452390 - prompt: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - voice: - provider: HUME_AI - name: KORA - language_model: - model_provider: ANTHROPIC - model_resource: claude-3-5-sonnet-20240620 - temperature: 1 - ellm_model: - allow_short_responses: false - tools: [] - builtin_tools: [] - event_messages: - on_new_chat: - enabled: false - text: "" - on_inactivity_timeout: - enabled: false - text: "" - on_max_duration_timeout: - enabled: false - text: "" - timeouts: - inactivity: - enabled: true - duration_secs: 600 - max_duration: - enabled: true - duration_secs: 1800 - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + Version numbers are integer values representing different iterations + of the Config. Each update to the Config increments its version + number. + display-name: Update config description + request: + name: PostedConfigVersionDescription + body: + properties: + version_description: + type: optional + docs: An optional description of the Config version. + response: + docs: Success + type: root.ReturnConfig + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 1 + request: + version_description: This is an updated version_description. + response: + body: + id: 1b60e1a0-cc59-424a-8d2c-189d354db3f3 + version: 1 + version_description: This is an updated version_description. + name: Weather Assistant Config + created_on: 1715275452390 + modified_on: 1715275452390 + evi_version: '2' + prompt: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + voice: + provider: HUME_AI + name: SAMPLE VOICE + custom_voice: + id: 00aa8ee9-c50e-4ea1-9af0-7b08ad451704 + version: 1 + name: SAMPLE VOICE + created_on: 1724704587367 + modified_on: 1725489961583 + base_voice: KORA + parameter_model: 20240715-4parameter + parameters: + gender: -7 + huskiness: -2 + nasality: -8 + pitch: -9 + language_model: + model_provider: ANTHROPIC + model_resource: claude-3-5-sonnet-20240620 + temperature: 1 + ellm_model: + allow_short_responses: false + tools: [] + builtin_tools: [] + event_messages: + on_new_chat: + enabled: false + text: '' + on_inactivity_timeout: + enabled: false + text: '' + on_max_duration_timeout: + enabled: false + text: '' + timeouts: + inactivity: + enabled: true + duration_secs: 600 + max_duration: + enabled: true + duration_secs: 1800 + source: + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/customVoices.yml b/.mock/definition/empathic-voice/customVoices.yml new file mode 100644 index 00000000..6fa448ad --- /dev/null +++ b/.mock/definition/empathic-voice/customVoices.yml @@ -0,0 +1,197 @@ +imports: + root: __package__.yml +service: + auth: false + base-path: '' + endpoints: + getReturnCustomVoicesForUser: + path: /v0/evi/custom_voices + method: GET + auth: true + display-name: List custom voices + request: + name: GetReturnCustomVoicesForUserRequest + query-parameters: + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. + + + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. + + + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + name: + type: optional + docs: Filter to only include custom voices with this name. + response: + docs: Success + type: root.ReturnPagedCustomVoices + errors: + - root.BadRequestError + examples: + - response: + body: + page_number: 1 + page_size: 1 + total_pages: 1 + custom_voices_page: + - id: id + version: 1 + name: name + created_on: 1000000 + modified_on: 1000000 + base_voice: ITO + parameter_model: 20240715-4parameter + parameters: {} + createNewCustomVoice: + path: /v0/evi/custom_voices + method: POST + auth: true + display-name: Create custom voice + request: + body: root.PostedCustomVoice + response: + docs: Created + type: root.ReturnCustomVoice + errors: + - root.BadRequestError + examples: + - request: + name: name + base_voice: ITO + parameter_model: 20240715-4parameter + response: + body: + id: id + version: 1 + name: name + created_on: 1000000 + modified_on: 1000000 + base_voice: ITO + parameter_model: 20240715-4parameter + parameters: + gender: 1 + huskiness: 1 + nasality: 1 + pitch: 1 + getReturnCustomVoiceByCustomVoiceId: + path: /v0/evi/custom_voices/{id} + method: GET + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Custom Voice. Formatted as a UUID. + display-name: Get specific custom voice by ID + response: + docs: Success + type: root.ReturnCustomVoice + errors: + - root.BadRequestError + examples: + - path-parameters: + id: id + response: + body: + id: id + version: 1 + name: name + created_on: 1000000 + modified_on: 1000000 + base_voice: ITO + parameter_model: 20240715-4parameter + parameters: + gender: 1 + huskiness: 1 + nasality: 1 + pitch: 1 + addNewCustomVoiceVersion: + path: /v0/evi/custom_voices/{id} + method: POST + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Custom Voice. Formatted as a UUID. + display-name: Create new version of existing custom voice + request: + body: root.PostedCustomVoice + response: + docs: Created + type: root.ReturnCustomVoice + errors: + - root.BadRequestError + examples: + - path-parameters: + id: id + request: + name: name + base_voice: ITO + parameter_model: 20240715-4parameter + response: + body: + id: id + version: 1 + name: name + created_on: 1000000 + modified_on: 1000000 + base_voice: ITO + parameter_model: 20240715-4parameter + parameters: + gender: 1 + huskiness: 1 + nasality: 1 + pitch: 1 + deleteCustomVoice: + path: /v0/evi/custom_voices/{id} + method: DELETE + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Custom Voice. Formatted as a UUID. + display-name: Delete a custom voice + errors: + - root.BadRequestError + examples: + - path-parameters: + id: id + updateCustomVoiceName: + path: /v0/evi/custom_voices/{id} + method: PATCH + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Custom Voice. Formatted as a UUID. + display-name: Update custom voice name + request: + name: PostedCustomVoiceName + body: + properties: + name: + type: string + docs: >- + The name of the Custom Voice. Maximum length of 75 characters. + Will be converted to all-uppercase. (e.g., "sample voice" + becomes "SAMPLE VOICE") + response: + docs: Success + type: text + errors: + - root.BadRequestError + source: + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/prompts.yml b/.mock/definition/empathic-voice/prompts.yml index 77a74c10..3eb701ea 100644 --- a/.mock/definition/empathic-voice/prompts.yml +++ b/.mock/definition/empathic-voice/prompts.yml @@ -1,447 +1,465 @@ imports: - root: __package__.yml + root: __package__.yml service: - auth: false - base-path: "" - endpoints: - list-prompts: - path: /v0/evi/prompts - method: GET - auth: true - pagination: - offset: $request.page_number - results: $response.prompts_page - display-name: List prompts - request: - name: PromptsListPromptsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. + auth: false + base-path: '' + endpoints: + list-prompts: + path: /v0/evi/prompts + method: GET + auth: true + pagination: + offset: $request.page_number + results: $response.prompts_page + display-name: List prompts + request: + name: PromptsListPromptsRequest + query-parameters: + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each prompt. To include all versions of - each prompt in the list, set `restrict_to_most_recent` to false. - name: - type: optional - docs: Filter to only include prompts with this name. - response: - docs: Success - type: root.ReturnPagedPrompts - examples: - - query-parameters: - page_number: 0 - page_size: 2 - response: - body: - page_number: 0 - page_size: 2 - total_pages: 1 - prompts_page: - - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - - id: 616b2b4c-a096-4445-9c23-64058b564fc2 - version: 0 - version_type: FIXED - version_description: "" - name: Web Search Assistant Prompt - created_on: 1715267200693 - modified_on: 1715267200693 - text: >- - You are an AI web search assistant designed to help - users find accurate and relevant information on the web. - Respond to user queries promptly, using the built-in web - search tool to retrieve up-to-date results. Present - information clearly and concisely, summarizing key points - where necessary. Use simple language and avoid technical - jargon. If needed, provide helpful tips for refining search - queries to obtain better results. - create-prompt: - path: /v0/evi/prompts - method: POST - auth: true - display-name: Create prompt - request: - name: PostedPrompt - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Prompt. - version_description: - type: optional - docs: An optional description of the Prompt version. - text: - type: string - docs: >- - Instructions used to shape EVI’s behavior, responses, and style. + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + restrict_to_most_recent: + type: optional + docs: >- + By default, `restrict_to_most_recent` is set to true, returning + only the latest version of each prompt. To include all versions of + each prompt in the list, set `restrict_to_most_recent` to false. + name: + type: optional + docs: Filter to only include prompts with this name. + response: + docs: Success + type: root.ReturnPagedPrompts + errors: + - root.BadRequestError + examples: + - query-parameters: + page_number: 0 + page_size: 2 + response: + body: + page_number: 0 + page_size: 2 + total_pages: 1 + prompts_page: + - id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + - id: 616b2b4c-a096-4445-9c23-64058b564fc2 + version: 0 + version_type: FIXED + version_description: '' + name: Web Search Assistant Prompt + created_on: 1715267200693 + modified_on: 1715267200693 + text: >- + You are an AI web search assistant designed to help + users find accurate and relevant information on the web. + Respond to user queries promptly, using the built-in web + search tool to retrieve up-to-date results. Present + information clearly and concisely, summarizing key points + where necessary. Use simple language and avoid technical + jargon. If needed, provide helpful tips for refining search + queries to obtain better results. + create-prompt: + path: /v0/evi/prompts + method: POST + auth: true + display-name: Create prompt + request: + name: PostedPrompt + body: + properties: + name: + type: string + docs: Name applied to all versions of a particular Prompt. + version_description: + type: optional + docs: An optional description of the Prompt version. + text: + type: string + docs: >- + Instructions used to shape EVI’s behavior, responses, and style. - You can use the Prompt to define a specific goal or role for - EVI, specifying how it should act or what it should focus on - during the conversation. For example, EVI can be instructed to - act as a customer support representative, a fitness coach, or a - travel advisor, each with its own set of behaviors and response - styles. + You can use the Prompt to define a specific goal or role for + EVI, specifying how it should act or what it should focus on + during the conversation. For example, EVI can be instructed to + act as a customer support representative, a fitness coach, or a + travel advisor, each with its own set of behaviors and response + styles. - For help writing a system prompt, see our [Prompting - Guide](/docs/empathic-voice-interface-evi/prompting). - response: - docs: Created - type: optional - examples: - - request: - name: Weather Assistant Prompt - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if severe - weather is expected. - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722633247488 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - list-prompt-versions: - path: /v0/evi/prompts/{id} - method: GET - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: List prompt versions - request: - name: PromptsListPromptVersionsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. + For help writing a system prompt, see our [Prompting + Guide](/docs/empathic-voice-interface-evi/prompting). + response: + docs: Created + type: optional + errors: + - root.BadRequestError + examples: + - request: + name: Weather Assistant Prompt + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if severe + weather is expected. + response: + body: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + name: Weather Assistant Prompt + created_on: 1722633247488 + modified_on: 1722633247488 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + list-prompt-versions: + path: /v0/evi/prompts/{id} + method: GET + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + display-name: List prompt versions + request: + name: PromptsListPromptVersionsRequest + query-parameters: + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each prompt. To include all versions of - each prompt in the list, set `restrict_to_most_recent` to false. - response: - docs: Success - type: root.ReturnPagedPrompts - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - response: - body: - page_number: 0 - page_size: 10 - total_pages: 1 - prompts_page: - - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722633247488 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - create-prompt-verison: - path: /v0/evi/prompts/{id} - method: POST - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: Create prompt version - request: - name: PostedPromptVersion - body: - properties: - version_description: - type: optional - docs: An optional description of the Prompt version. - text: - type: string - docs: >- - Instructions used to shape EVI’s behavior, responses, and style - for this version of the Prompt. + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + restrict_to_most_recent: + type: optional + docs: >- + By default, `restrict_to_most_recent` is set to true, returning + only the latest version of each prompt. To include all versions of + each prompt in the list, set `restrict_to_most_recent` to false. + response: + docs: Success + type: root.ReturnPagedPrompts + errors: + - root.BadRequestError + examples: + - path-parameters: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + response: + body: + page_number: 0 + page_size: 10 + total_pages: 1 + prompts_page: + - id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1722633247488 + modified_on: 1722633247488 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + create-prompt-verison: + path: /v0/evi/prompts/{id} + method: POST + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + display-name: Create prompt version + request: + name: PostedPromptVersion + body: + properties: + version_description: + type: optional + docs: An optional description of the Prompt version. + text: + type: string + docs: >- + Instructions used to shape EVI’s behavior, responses, and style + for this version of the Prompt. - You can use the Prompt to define a specific goal or role for - EVI, specifying how it should act or what it should focus on - during the conversation. For example, EVI can be instructed to - act as a customer support representative, a fitness coach, or a - travel advisor, each with its own set of behaviors and response - styles. + You can use the Prompt to define a specific goal or role for + EVI, specifying how it should act or what it should focus on + during the conversation. For example, EVI can be instructed to + act as a customer support representative, a fitness coach, or a + travel advisor, each with its own set of behaviors and response + styles. - For help writing a system prompt, see our [Prompting - Guide](/docs/empathic-voice-interface-evi/prompting). - response: - docs: Created - type: optional - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - request: - text: >- - You are an updated version of an AI weather assistant - providing users with accurate and up-to-date weather information. - Respond to user queries concisely and clearly. Use simple language - and avoid technical jargon. Provide temperature, precipitation, - wind conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - version_description: This is an updated version of the Weather Assistant Prompt. - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - version_type: FIXED - version_description: This is an updated version of the Weather Assistant Prompt. - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722635140150 - text: >- - You are an updated version of an AI weather assistant - providing users with accurate and up-to-date weather - information. Respond to user queries concisely and clearly. Use - simple language and avoid technical jargon. Provide temperature, - precipitation, wind conditions, and any weather alerts. Include - helpful tips if severe weather is expected. - delete-prompt: - path: /v0/evi/prompts/{id} - method: DELETE - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: Delete prompt - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - update-prompt-name: - path: /v0/evi/prompts/{id} - method: PATCH - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - display-name: Update prompt name - request: - name: PostedPromptName - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Prompt. - response: - docs: Success - type: text - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - request: - name: Updated Weather Assistant Prompt Name - get-prompt-version: - path: /v0/evi/prompts/{id}/version/{version} - method: GET - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Prompt. + For help writing a system prompt, see our [Prompting + Guide](/docs/empathic-voice-interface-evi/prompting). + response: + docs: Created + type: optional + errors: + - root.BadRequestError + examples: + - path-parameters: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + request: + text: >- + You are an updated version of an AI weather assistant + providing users with accurate and up-to-date weather information. + Respond to user queries concisely and clearly. Use simple language + and avoid technical jargon. Provide temperature, precipitation, + wind conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + version_description: This is an updated version of the Weather Assistant Prompt. + response: + body: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 1 + version_type: FIXED + version_description: This is an updated version of the Weather Assistant Prompt. + name: Weather Assistant Prompt + created_on: 1722633247488 + modified_on: 1722635140150 + text: >- + You are an updated version of an AI weather assistant + providing users with accurate and up-to-date weather + information. Respond to user queries concisely and clearly. Use + simple language and avoid technical jargon. Provide temperature, + precipitation, wind conditions, and any weather alerts. Include + helpful tips if severe weather is expected. + delete-prompt: + path: /v0/evi/prompts/{id} + method: DELETE + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + display-name: Delete prompt + errors: + - root.BadRequestError + examples: + - path-parameters: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + update-prompt-name: + path: /v0/evi/prompts/{id} + method: PATCH + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + display-name: Update prompt name + request: + name: PostedPromptName + body: + properties: + name: + type: string + docs: Name applied to all versions of a particular Prompt. + response: + docs: Success + type: text + errors: + - root.BadRequestError + examples: + - path-parameters: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + request: + name: Updated Weather Assistant Prompt Name + get-prompt-version: + path: /v0/evi/prompts/{id}/version/{version} + method: GET + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Prompt. - Prompts, as well as Configs and Tools, are versioned. This - versioning system supports iterative development, allowing you to - progressively refine prompts and revert to previous versions if - needed. + Prompts, Configs, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine prompts and revert to previous versions if + needed. - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - display-name: Get prompt version - response: - docs: Success - type: optional - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 0 - version_type: FIXED - version_description: "" - name: Weather Assistant Prompt - created_on: 1722633247488 - modified_on: 1722633247488 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - delete-prompt-version: - path: /v0/evi/prompts/{id}/version/{version} - method: DELETE - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Prompt. + Version numbers are integer values representing different iterations + of the Prompt. Each update to the Prompt increments its version + number. + display-name: Get prompt version + response: + docs: Success + type: optional + errors: + - root.BadRequestError + examples: + - path-parameters: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + response: + body: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 0 + version_type: FIXED + version_description: '' + name: Weather Assistant Prompt + created_on: 1722633247488 + modified_on: 1722633247488 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + delete-prompt-version: + path: /v0/evi/prompts/{id}/version/{version} + method: DELETE + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Prompt. - Prompts, as well as Configs and Tools, are versioned. This - versioning system supports iterative development, allowing you to - progressively refine prompts and revert to previous versions if - needed. + Prompts, Configs, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine prompts and revert to previous versions if + needed. - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - display-name: Delete prompt version - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - update-prompt-description: - path: /v0/evi/prompts/{id}/version/{version} - method: PATCH - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Prompt. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Prompt. + Version numbers are integer values representing different iterations + of the Prompt. Each update to the Prompt increments its version + number. + display-name: Delete prompt version + errors: + - root.BadRequestError + examples: + - path-parameters: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 1 + update-prompt-description: + path: /v0/evi/prompts/{id}/version/{version} + method: PATCH + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Prompt. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Prompt. - Prompts, as well as Configs and Tools, are versioned. This - versioning system supports iterative development, allowing you to - progressively refine prompts and revert to previous versions if - needed. + Prompts, Configs, Custom Voices, and Tools are versioned. This + versioning system supports iterative development, allowing you to + progressively refine prompts and revert to previous versions if + needed. - Version numbers are integer values representing different iterations - of the Prompt. Each update to the Prompt increments its version - number. - display-name: Update prompt description - request: - name: PostedPromptVersionDescription - body: - properties: - version_description: - type: optional - docs: An optional description of the Prompt version. - response: - docs: Success - type: optional - examples: - - path-parameters: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - request: - version_description: This is an updated version_description. - response: - body: - id: af699d45-2985-42cc-91b9-af9e5da3bac5 - version: 1 - version_type: FIXED - version_description: This is an updated version_description. - name: string - created_on: 1722633247488 - modified_on: 1722634770585 - text: >- - You are an AI weather assistant providing users with - accurate and up-to-date weather information. Respond to user - queries concisely and clearly. Use simple language and avoid - technical jargon. Provide temperature, precipitation, wind - conditions, and any weather alerts. Include helpful tips if - severe weather is expected. - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + Version numbers are integer values representing different iterations + of the Prompt. Each update to the Prompt increments its version + number. + display-name: Update prompt description + request: + name: PostedPromptVersionDescription + body: + properties: + version_description: + type: optional + docs: An optional description of the Prompt version. + response: + docs: Success + type: optional + errors: + - root.BadRequestError + examples: + - path-parameters: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 1 + request: + version_description: This is an updated version_description. + response: + body: + id: af699d45-2985-42cc-91b9-af9e5da3bac5 + version: 1 + version_type: FIXED + version_description: This is an updated version_description. + name: string + created_on: 1722633247488 + modified_on: 1722634770585 + text: >- + You are an AI weather assistant providing users with + accurate and up-to-date weather information. Respond to user + queries concisely and clearly. Use simple language and avoid + technical jargon. Provide temperature, precipitation, wind + conditions, and any weather alerts. Include helpful tips if + severe weather is expected. + source: + openapi: stenographer-openapi.json diff --git a/.mock/definition/empathic-voice/tools.yml b/.mock/definition/empathic-voice/tools.yml index 75e6c87d..a7fd95aa 100644 --- a/.mock/definition/empathic-voice/tools.yml +++ b/.mock/definition/empathic-voice/tools.yml @@ -1,501 +1,519 @@ imports: - root: __package__.yml + root: __package__.yml service: - auth: false - base-path: "" - endpoints: - list-tools: - path: /v0/evi/tools - method: GET - auth: true - pagination: - offset: $request.page_number - results: $response.tools_page - display-name: List tools - request: - name: ToolsListToolsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. + auth: false + base-path: '' + endpoints: + list-tools: + path: /v0/evi/tools + method: GET + auth: true + pagination: + offset: $request.page_number + results: $response.tools_page + display-name: List tools + request: + name: ToolsListToolsRequest + query-parameters: + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each tool. To include all versions of - each tool in the list, set `restrict_to_most_recent` to false. - name: - type: optional - docs: Filter to only include tools with this name. - response: - docs: Success - type: root.ReturnPagedUserDefinedTools - examples: - - query-parameters: - page_number: 0 - page_size: 2 - response: - body: - page_number: 0 - page_size: 2 - total_pages: 1 - tools_page: - - tool_type: FUNCTION - id: d20827af-5d8d-4f66-b6b9-ce2e3e1ea2b2 - version: 0 - version_type: FIXED - version_description: Fetches user's current location. - name: get_current_location - created_on: 1715267200693 - modified_on: 1715267200693 - fallback_content: Unable to fetch location. - description: Fetches user's current location. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }}, "required": ["location"] } - - tool_type: FUNCTION - id: 4442f3ea-9038-40e3-a2ce-1522b7de770f - version: 0 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius or fahrenheit based - on location of user. - name: get_current_weather - created_on: 1715266126705 - modified_on: 1715266126705 - fallback_content: Unable to fetch location. - description: >- - Fetches current weather and uses celsius or fahrenheit based - on location of user. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit"], "description": "The temperature - unit to use. Infer this from the users location." } }, - "required": ["location", "format"] } - create-tool: - path: /v0/evi/tools - method: POST - auth: true - display-name: Create tool - request: - name: PostedUserDefinedTool - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Tool. - version_description: - type: optional - docs: An optional description of the Tool version. - description: - type: optional - docs: >- - An optional description of what the Tool does, used by the - supplemental LLM to choose when and how to call the function. - parameters: - type: string - docs: >- - Stringified JSON defining the parameters used by this version of - the Tool. + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + restrict_to_most_recent: + type: optional + docs: >- + By default, `restrict_to_most_recent` is set to true, returning + only the latest version of each tool. To include all versions of + each tool in the list, set `restrict_to_most_recent` to false. + name: + type: optional + docs: Filter to only include tools with this name. + response: + docs: Success + type: root.ReturnPagedUserDefinedTools + errors: + - root.BadRequestError + examples: + - query-parameters: + page_number: 0 + page_size: 2 + response: + body: + page_number: 0 + page_size: 2 + total_pages: 1 + tools_page: + - tool_type: FUNCTION + id: d20827af-5d8d-4f66-b6b9-ce2e3e1ea2b2 + version: 0 + version_type: FIXED + version_description: Fetches user's current location. + name: get_current_location + created_on: 1715267200693 + modified_on: 1715267200693 + fallback_content: Unable to fetch location. + description: Fetches user's current location. + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San + Francisco, CA" }}, "required": ["location"] } + - tool_type: FUNCTION + id: 4442f3ea-9038-40e3-a2ce-1522b7de770f + version: 0 + version_type: FIXED + version_description: >- + Fetches current weather and uses celsius or fahrenheit based + on location of user. + name: get_current_weather + created_on: 1715266126705 + modified_on: 1715266126705 + fallback_content: Unable to fetch location. + description: >- + Fetches current weather and uses celsius or fahrenheit based + on location of user. + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San + Francisco, CA" }, "format": { "type": "string", "enum": + ["celsius", "fahrenheit"], "description": "The temperature + unit to use. Infer this from the users location." } }, + "required": ["location", "format"] } + create-tool: + path: /v0/evi/tools + method: POST + auth: true + display-name: Create tool + request: + name: PostedUserDefinedTool + body: + properties: + name: + type: string + docs: Name applied to all versions of a particular Tool. + version_description: + type: optional + docs: An optional description of the Tool version. + description: + type: optional + docs: >- + An optional description of what the Tool does, used by the + supplemental LLM to choose when and how to call the function. + parameters: + type: string + docs: >- + Stringified JSON defining the parameters used by this version of + the Tool. - These parameters define the inputs needed for the Tool’s - execution, including the expected data type and description for - each input field. Structured as a stringified JSON schema, this - format ensures the Tool receives data in the expected format. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the - tool call result. The LLM then uses this text to generate a - response back to the user, ensuring continuity in the - conversation if the Tool errors. - response: - docs: Created - type: optional - examples: - - request: - name: get_current_weather - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San Francisco, - CA" }, "format": { "type": "string", "enum": ["celsius", - "fahrenheit"], "description": "The temperature unit to use. Infer - this from the users location." } }, "required": ["location", - "format"] } - version_description: >- - Fetches current weather and uses celsius or fahrenheit based on - location of user. - description: This tool is for getting the current weather. - fallback_content: Unable to fetch current weather. - response: - body: - tool_type: FUNCTION - id: aa9b71c4-723c-47ff-9f83-1a1829e74376 - version: 0 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius or fahrenheit based on - location of user. - name: get_current_weather - created_on: 1715275452390 - modified_on: 1715275452390 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit"], "description": "The temperature unit - to use. Infer this from the users location." } }, "required": - ["location", "format"] } - list-tool-versions: - path: /v0/evi/tools/{id} - method: GET - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: List tool versions - request: - name: ToolsListToolVersionsRequest - query-parameters: - page_number: - type: optional - docs: >- - Specifies the page number to retrieve, enabling pagination. + These parameters define the inputs needed for the Tool’s + execution, including the expected data type and description for + each input field. Structured as a stringified JSON schema, this + format ensures the Tool receives data in the expected format. + fallback_content: + type: optional + docs: >- + Optional text passed to the supplemental LLM in place of the + tool call result. The LLM then uses this text to generate a + response back to the user, ensuring continuity in the + conversation if the Tool errors. + response: + docs: Created + type: optional + errors: + - root.BadRequestError + examples: + - request: + name: get_current_weather + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San Francisco, + CA" }, "format": { "type": "string", "enum": ["celsius", + "fahrenheit"], "description": "The temperature unit to use. Infer + this from the users location." } }, "required": ["location", + "format"] } + version_description: >- + Fetches current weather and uses celsius or fahrenheit based on + location of user. + description: This tool is for getting the current weather. + fallback_content: Unable to fetch current weather. + response: + body: + tool_type: FUNCTION + id: aa9b71c4-723c-47ff-9f83-1a1829e74376 + version: 0 + version_type: FIXED + version_description: >- + Fetches current weather and uses celsius or fahrenheit based on + location of user. + name: get_current_weather + created_on: 1715275452390 + modified_on: 1715275452390 + fallback_content: Unable to fetch current weather. + description: This tool is for getting the current weather. + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San + Francisco, CA" }, "format": { "type": "string", "enum": + ["celsius", "fahrenheit"], "description": "The temperature unit + to use. Infer this from the users location." } }, "required": + ["location", "format"] } + list-tool-versions: + path: /v0/evi/tools/{id} + method: GET + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + display-name: List tool versions + request: + name: ToolsListToolVersionsRequest + query-parameters: + page_number: + type: optional + docs: >- + Specifies the page number to retrieve, enabling pagination. - This parameter uses zero-based indexing. For example, setting - `page_number` to 0 retrieves the first page of results (items 0-9 - if `page_size` is 10), setting `page_number` to 1 retrieves the - second page (items 10-19), and so on. Defaults to 0, which - retrieves the first page. - page_size: - type: optional - docs: >- - Specifies the maximum number of results to include per page, - enabling pagination. The value must be between 1 and 100, - inclusive. + This parameter uses zero-based indexing. For example, setting + `page_number` to 0 retrieves the first page of results (items 0-9 + if `page_size` is 10), setting `page_number` to 1 retrieves the + second page (items 10-19), and so on. Defaults to 0, which + retrieves the first page. + page_size: + type: optional + docs: >- + Specifies the maximum number of results to include per page, + enabling pagination. The value must be between 1 and 100, + inclusive. - For example, if `page_size` is set to 10, each page will include - up to 10 items. Defaults to 10. - restrict_to_most_recent: - type: optional - docs: >- - By default, `restrict_to_most_recent` is set to true, returning - only the latest version of each tool. To include all versions of - each tool in the list, set `restrict_to_most_recent` to false. - response: - docs: Success - type: root.ReturnPagedUserDefinedTools - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - response: - body: - page_number: 0 - page_size: 10 - total_pages: 1 - tools_page: - - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or - kelvin based on location of user. - name: get_current_weather - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users - location." } }, "required": ["location", "format"] } - create-tool-version: - path: /v0/evi/tools/{id} - method: POST - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: Create tool version - request: - name: PostedUserDefinedToolVersion - body: - properties: - version_description: - type: optional - docs: An optional description of the Tool version. - description: - type: optional - docs: >- - An optional description of what the Tool does, used by the - supplemental LLM to choose when and how to call the function. - parameters: - type: string - docs: >- - Stringified JSON defining the parameters used by this version of - the Tool. + For example, if `page_size` is set to 10, each page will include + up to 10 items. Defaults to 10. + restrict_to_most_recent: + type: optional + docs: >- + By default, `restrict_to_most_recent` is set to true, returning + only the latest version of each tool. To include all versions of + each tool in the list, set `restrict_to_most_recent` to false. + response: + docs: Success + type: root.ReturnPagedUserDefinedTools + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 00183a3f-79ba-413d-9f3b-609864268bea + response: + body: + page_number: 0 + page_size: 10 + total_pages: 1 + tools_page: + - tool_type: FUNCTION + id: 00183a3f-79ba-413d-9f3b-609864268bea + version: 1 + version_type: FIXED + version_description: >- + Fetches current weather and uses celsius, fahrenheit, or + kelvin based on location of user. + name: get_current_weather + created_on: 1715277014228 + modified_on: 1715277602313 + fallback_content: Unable to fetch current weather. + description: This tool is for getting the current weather. + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San + Francisco, CA" }, "format": { "type": "string", "enum": + ["celsius", "fahrenheit", "kelvin"], "description": "The + temperature unit to use. Infer this from the users + location." } }, "required": ["location", "format"] } + create-tool-version: + path: /v0/evi/tools/{id} + method: POST + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + display-name: Create tool version + request: + name: PostedUserDefinedToolVersion + body: + properties: + version_description: + type: optional + docs: An optional description of the Tool version. + description: + type: optional + docs: >- + An optional description of what the Tool does, used by the + supplemental LLM to choose when and how to call the function. + parameters: + type: string + docs: >- + Stringified JSON defining the parameters used by this version of + the Tool. - These parameters define the inputs needed for the Tool’s - execution, including the expected data type and description for - each input field. Structured as a stringified JSON schema, this - format ensures the Tool receives data in the expected format. - fallback_content: - type: optional - docs: >- - Optional text passed to the supplemental LLM in place of the - tool call result. The LLM then uses this text to generate a - response back to the user, ensuring continuity in the - conversation if the Tool errors. - response: - docs: Created - type: optional - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - request: - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San Francisco, - CA" }, "format": { "type": "string", "enum": ["celsius", - "fahrenheit", "kelvin"], "description": "The temperature unit to - use. Infer this from the users location." } }, "required": - ["location", "format"] } - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or kelvin - based on location of user. - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - response: - body: - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or kelvin - based on location of user. - name: get_current_weather - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users location." } - }, "required": ["location", "format"] } - delete-tool: - path: /v0/evi/tools/{id} - method: DELETE - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: Delete tool - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - update-tool-name: - path: /v0/evi/tools/{id} - method: PATCH - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - display-name: Update tool name - request: - name: PostedUserDefinedToolName - body: - properties: - name: - type: string - docs: Name applied to all versions of a particular Tool. - response: - docs: Success - type: text - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - request: - name: get_current_temperature - get-tool-version: - path: /v0/evi/tools/{id}/version/{version} - method: GET - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Tool. + These parameters define the inputs needed for the Tool’s + execution, including the expected data type and description for + each input field. Structured as a stringified JSON schema, this + format ensures the Tool receives data in the expected format. + fallback_content: + type: optional + docs: >- + Optional text passed to the supplemental LLM in place of the + tool call result. The LLM then uses this text to generate a + response back to the user, ensuring continuity in the + conversation if the Tool errors. + response: + docs: Created + type: optional + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 00183a3f-79ba-413d-9f3b-609864268bea + request: + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San Francisco, + CA" }, "format": { "type": "string", "enum": ["celsius", + "fahrenheit", "kelvin"], "description": "The temperature unit to + use. Infer this from the users location." } }, "required": + ["location", "format"] } + version_description: >- + Fetches current weather and uses celsius, fahrenheit, or kelvin + based on location of user. + fallback_content: Unable to fetch current weather. + description: This tool is for getting the current weather. + response: + body: + tool_type: FUNCTION + id: 00183a3f-79ba-413d-9f3b-609864268bea + version: 1 + version_type: FIXED + version_description: >- + Fetches current weather and uses celsius, fahrenheit, or kelvin + based on location of user. + name: get_current_weather + created_on: 1715277014228 + modified_on: 1715277602313 + fallback_content: Unable to fetch current weather. + description: This tool is for getting the current weather. + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San + Francisco, CA" }, "format": { "type": "string", "enum": + ["celsius", "fahrenheit", "kelvin"], "description": "The + temperature unit to use. Infer this from the users location." } + }, "required": ["location", "format"] } + delete-tool: + path: /v0/evi/tools/{id} + method: DELETE + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + display-name: Delete tool + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 00183a3f-79ba-413d-9f3b-609864268bea + update-tool-name: + path: /v0/evi/tools/{id} + method: PATCH + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + display-name: Update tool name + request: + name: PostedUserDefinedToolName + body: + properties: + name: + type: string + docs: Name applied to all versions of a particular Tool. + response: + docs: Success + type: text + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 00183a3f-79ba-413d-9f3b-609864268bea + request: + name: get_current_temperature + get-tool-version: + path: /v0/evi/tools/{id}/version/{version} + method: GET + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if - needed. + Tools, Configs, Custom Voices, and Prompts are versioned. This + versioning system supports iterative development, allowing you to + progressively refine tools and revert to previous versions if + needed. - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - display-name: Get tool version - response: - docs: Success - type: optional - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - response: - body: - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current weather and uses celsius, fahrenheit, or kelvin - based on location of user. - name: string - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users location." } - }, "required": ["location", "format"] } - delete-tool-version: - path: /v0/evi/tools/{id}/version/{version} - method: DELETE - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Tool. + Version numbers are integer values representing different iterations + of the Tool. Each update to the Tool increments its version number. + display-name: Get tool version + response: + docs: Success + type: optional + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 00183a3f-79ba-413d-9f3b-609864268bea + version: 1 + response: + body: + tool_type: FUNCTION + id: 00183a3f-79ba-413d-9f3b-609864268bea + version: 1 + version_type: FIXED + version_description: >- + Fetches current weather and uses celsius, fahrenheit, or kelvin + based on location of user. + name: string + created_on: 1715277014228 + modified_on: 1715277602313 + fallback_content: Unable to fetch current weather. + description: This tool is for getting the current weather. + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San + Francisco, CA" }, "format": { "type": "string", "enum": + ["celsius", "fahrenheit", "kelvin"], "description": "The + temperature unit to use. Infer this from the users location." } + }, "required": ["location", "format"] } + delete-tool-version: + path: /v0/evi/tools/{id}/version/{version} + method: DELETE + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if - needed. + Tools, Configs, Custom Voices, and Prompts are versioned. This + versioning system supports iterative development, allowing you to + progressively refine tools and revert to previous versions if + needed. - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - display-name: Delete tool version - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - update-tool-description: - path: /v0/evi/tools/{id}/version/{version} - method: PATCH - auth: true - path-parameters: - id: - type: string - docs: Identifier for a Tool. Formatted as a UUID. - version: - type: integer - docs: >- - Version number for a Tool. + Version numbers are integer values representing different iterations + of the Tool. Each update to the Tool increments its version number. + display-name: Delete tool version + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 00183a3f-79ba-413d-9f3b-609864268bea + version: 1 + update-tool-description: + path: /v0/evi/tools/{id}/version/{version} + method: PATCH + auth: true + path-parameters: + id: + type: string + docs: Identifier for a Tool. Formatted as a UUID. + version: + type: integer + docs: >- + Version number for a Tool. - Tools, as well as Configs and Prompts, are versioned. This - versioning system supports iterative development, allowing you to - progressively refine tools and revert to previous versions if - needed. + Tools, Configs, Custom Voices, and Prompts are versioned. This + versioning system supports iterative development, allowing you to + progressively refine tools and revert to previous versions if + needed. - Version numbers are integer values representing different iterations - of the Tool. Each update to the Tool increments its version number. - display-name: Update tool description - request: - name: PostedUserDefinedToolVersionDescription - body: - properties: - version_description: - type: optional - docs: An optional description of the Tool version. - response: - docs: Success - type: optional - examples: - - path-parameters: - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - request: - version_description: >- - Fetches current temperature, precipitation, wind speed, AQI, and - other weather conditions. Uses Celsius, Fahrenheit, or kelvin - depending on user's region. - response: - body: - tool_type: FUNCTION - id: 00183a3f-79ba-413d-9f3b-609864268bea - version: 1 - version_type: FIXED - version_description: >- - Fetches current temperature, precipitation, wind speed, AQI, and - other weather conditions. Uses Celsius, Fahrenheit, or kelvin - depending on user's region. - name: string - created_on: 1715277014228 - modified_on: 1715277602313 - fallback_content: Unable to fetch current weather. - description: This tool is for getting the current weather. - parameters: >- - { "type": "object", "properties": { "location": { "type": - "string", "description": "The city and state, e.g. San - Francisco, CA" }, "format": { "type": "string", "enum": - ["celsius", "fahrenheit", "kelvin"], "description": "The - temperature unit to use. Infer this from the users location." } - }, "required": ["location", "format"] } - source: - openapi: ../empathic-voice-interface/stenographer-openapi.json + Version numbers are integer values representing different iterations + of the Tool. Each update to the Tool increments its version number. + display-name: Update tool description + request: + name: PostedUserDefinedToolVersionDescription + body: + properties: + version_description: + type: optional + docs: An optional description of the Tool version. + response: + docs: Success + type: optional + errors: + - root.BadRequestError + examples: + - path-parameters: + id: 00183a3f-79ba-413d-9f3b-609864268bea + version: 1 + request: + version_description: >- + Fetches current temperature, precipitation, wind speed, AQI, and + other weather conditions. Uses Celsius, Fahrenheit, or kelvin + depending on user's region. + response: + body: + tool_type: FUNCTION + id: 00183a3f-79ba-413d-9f3b-609864268bea + version: 1 + version_type: FIXED + version_description: >- + Fetches current temperature, precipitation, wind speed, AQI, and + other weather conditions. Uses Celsius, Fahrenheit, or kelvin + depending on user's region. + name: string + created_on: 1715277014228 + modified_on: 1715277602313 + fallback_content: Unable to fetch current weather. + description: This tool is for getting the current weather. + parameters: >- + { "type": "object", "properties": { "location": { "type": + "string", "description": "The city and state, e.g. San + Francisco, CA" }, "format": { "type": "string", "enum": + ["celsius", "fahrenheit", "kelvin"], "description": "The + temperature unit to use. Infer this from the users location." } + }, "required": ["location", "format"] } + source: + openapi: stenographer-openapi.json diff --git a/.mock/definition/expression-measurement/batch.yml b/.mock/definition/expression-measurement/batch.yml deleted file mode 100644 index f2d3ed3e..00000000 --- a/.mock/definition/expression-measurement/batch.yml +++ /dev/null @@ -1,1774 +0,0 @@ -service: - auth: false - base-path: "" - endpoints: - list-jobs: - path: /v0/batch/jobs - method: GET - auth: true - docs: Sort and filter jobs. - display-name: List jobs - request: - name: BatchListJobsRequest - query-parameters: - limit: - type: optional - docs: The maximum number of jobs to include in the response. - status: - type: optional - allow-multiple: true - docs: >- - Include only jobs of this status in the response. There are four - possible statuses: - - - - `QUEUED`: The job has been received and is waiting to be - processed. - - - - `IN_PROGRESS`: The job is currently being processed. - - - - `COMPLETED`: The job has finished processing. - - - - `FAILED`: The job encountered an error and could not be - completed successfully. - when: - type: optional - docs: >- - Specify whether to include jobs created before or after a given - `timestamp_ms`. - timestamp_ms: - type: optional - docs: |- - Provide a timestamp in milliseconds to filter jobs. - - When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. - sort_by: - type: optional - docs: >- - Specify which timestamp to sort the jobs by. - - - - `created`: Sort jobs by the time of creation, indicated by - `created_timestamp_ms`. - - - - `started`: Sort jobs by the time processing started, indicated - by `started_timestamp_ms`. - - - - `ended`: Sort jobs by the time processing ended, indicated by - `ended_timestamp_ms`. - direction: - type: optional - docs: >- - Specify the order in which to sort the jobs. Defaults to - descending order. - - - - `asc`: Sort in ascending order (chronological, with the oldest - records first). - - - - `desc`: Sort in descending order (reverse-chronological, with - the newest records first). - response: - docs: "" - type: list - examples: - - response: - body: - - job_id: job_id - request: - files: - - filename: filename - md5sum: md5sum - content_type: content_type - models: - burst: {} - face: - fps_pred: 3 - identify_faces: false - min_face_size: 60 - prob_threshold: 0.99 - save_faces: false - facemesh: {} - language: - granularity: word - identify_speakers: false - ner: - identify_speakers: false - prosody: - granularity: utterance - identify_speakers: false - notify: true - text: [] - urls: - - https://hume-tutorials.s3.amazonaws.com/faces.zip - state: - created_timestamp_ms: 1712587158717 - ended_timestamp_ms: 1712587159274 - num_errors: 0 - num_predictions: 10 - started_timestamp_ms: 1712587158800 - status: COMPLETED - type: INFERENCE - start-inference-job: - path: /v0/batch/jobs - method: POST - auth: true - docs: Start a new measurement inference job. - display-name: Start inference job - request: - body: InferenceBaseRequest - response: - docs: "" - type: JobId - property: job_id - examples: - - request: - urls: - - https://hume-tutorials.s3.amazonaws.com/faces.zip - notify: true - response: - body: - job_id: job_id - get-job-details: - path: /v0/batch/jobs/{id} - method: GET - auth: true - docs: Get the request details and state of a given job. - path-parameters: - id: - type: string - docs: The unique identifier for the job. - display-name: Get job details - response: - docs: "" - type: UnionJob - examples: - - name: Inference - path-parameters: - id: job_id - response: - body: - type: INFERENCE - job_id: job_id - request: - files: [] - models: - burst: {} - face: - fps_pred: 3 - identify_faces: false - min_face_size: 60 - prob_threshold: 0.99 - save_faces: false - facemesh: {} - language: - granularity: word - identify_speakers: false - ner: - identify_speakers: false - prosody: - granularity: utterance - identify_speakers: false - notify: true - text: [] - urls: - - https://hume-tutorials.s3.amazonaws.com/faces.zip - state: - created_timestamp_ms: 1712590457884 - ended_timestamp_ms: 1712590462252 - num_errors: 0 - num_predictions: 10 - started_timestamp_ms: 1712590457995 - status: COMPLETED - get-job-predictions: - path: /v0/batch/jobs/{id}/predictions - method: GET - auth: true - docs: Get the JSON predictions of a completed inference job. - path-parameters: - id: - type: string - docs: The unique identifier for the job. - display-name: Get job predictions - response: - docs: "" - type: list - examples: - - path-parameters: - id: job_id - response: - body: - - source: - type: url - url: https://hume-tutorials.s3.amazonaws.com/faces.zip - results: - predictions: - - file: faces/100.jpg - models: - face: - grouped_predictions: - - id: unknown - predictions: - - frame: 0 - time: 0 - prob: 0.9994111061096191 - box: - x: 1187.885986328125 - "y": 1397.697509765625 - w: 1401.668701171875 - h: 1961.424560546875 - emotions: - - name: Admiration - score: 0.10722749680280685 - - name: Adoration - score: 0.06395940482616425 - - name: Aesthetic Appreciation - score: 0.05811462551355362 - - name: Amusement - score: 0.14187128841876984 - - name: Anger - score: 0.02804684266448021 - - name: Anxiety - score: 0.2713485360145569 - - name: Awe - score: 0.33812594413757324 - - name: Awkwardness - score: 0.1745193600654602 - - name: Boredom - score: 0.23600080609321594 - - name: Calmness - score: 0.18988418579101562 - - name: Concentration - score: 0.44288986921310425 - - name: Confusion - score: 0.39346569776535034 - - name: Contemplation - score: 0.31002455949783325 - - name: Contempt - score: 0.048870109021663666 - - name: Contentment - score: 0.0579497292637825 - - name: Craving - score: 0.06544201076030731 - - name: Desire - score: 0.05526508390903473 - - name: Determination - score: 0.08590991795063019 - - name: Disappointment - score: 0.19508258998394012 - - name: Disgust - score: 0.031529419124126434 - - name: Distress - score: 0.23210826516151428 - - name: Doubt - score: 0.3284550905227661 - - name: Ecstasy - score: 0.040716782212257385 - - name: Embarrassment - score: 0.1467227339744568 - - name: Empathic Pain - score: 0.07633581757545471 - - name: Entrancement - score: 0.16245244443416595 - - name: Envy - score: 0.03267110139131546 - - name: Excitement - score: 0.10656816512346268 - - name: Fear - score: 0.3115977346897125 - - name: Guilt - score: 0.11615975946187973 - - name: Horror - score: 0.19795553386211395 - - name: Interest - score: 0.3136432468891144 - - name: Joy - score: 0.06285581737756729 - - name: Love - score: 0.06339752674102783 - - name: Nostalgia - score: 0.05866732448339462 - - name: Pain - score: 0.07684041559696198 - - name: Pride - score: 0.026822954416275024 - - name: Realization - score: 0.30000734329223633 - - name: Relief - score: 0.04414166510105133 - - name: Romance - score: 0.042728863656520844 - - name: Sadness - score: 0.14773206412792206 - - name: Satisfaction - score: 0.05902980640530586 - - name: Shame - score: 0.08103451132774353 - - name: Surprise (negative) - score: 0.25518184900283813 - - name: Surprise (positive) - score: 0.28845661878585815 - - name: Sympathy - score: 0.062488824129104614 - - name: Tiredness - score: 0.1559651643037796 - - name: Triumph - score: 0.01955239288508892 - errors: [] - get-job-artifacts: - path: /v0/batch/jobs/{id}/artifacts - method: GET - auth: true - docs: Get the artifacts ZIP of a completed inference job. - path-parameters: - id: - type: string - docs: The unique identifier for the job. - display-name: Get job artifacts - response: - docs: "" - type: file - start-inference-job-from-local-file: - path: /v0/batch/jobs - method: POST - auth: true - docs: Start a new batch inference job. - display-name: Start inference job from local file - request: - name: BatchStartInferenceJobFromLocalFileRequest - body: - properties: - json: - type: optional - docs: >- - Stringified JSON object containing the inference job - configuration. - file: list - content-type: multipart/form-data - response: - docs: "" - type: JobId - property: job_id - examples: - - request: {} - response: - body: - job_id: job_id - source: - openapi: ../expression-measurement/batch-files-openapi.yml -types: - Alternative: literal<"language_only"> - Bcp47Tag: - enum: - - zh - - da - - nl - - en - - value: en-AU - name: EnAu - - value: en-IN - name: EnIn - - value: en-NZ - name: EnNz - - value: en-GB - name: EnGb - - fr - - value: fr-CA - name: FrCa - - de - - hi - - value: hi-Latn - name: HiLatn - - id - - it - - ja - - ko - - "no" - - pl - - pt - - value: pt-BR - name: PtBr - - value: pt-PT - name: PtPt - - ru - - es - - value: es-419 - name: Es419 - - sv - - ta - - tr - - uk - source: - openapi: ../expression-measurement/batch-openapi.json - BoundingBox: - docs: A bounding box around a face. - properties: - x: - type: double - docs: x-coordinate of bounding box top left corner. - "y": - type: double - docs: y-coordinate of bounding box top left corner. - w: - type: double - docs: Bounding box width. - h: - type: double - docs: Bounding box height. - source: - openapi: ../expression-measurement/batch-openapi.json - BurstPrediction: - properties: - time: TimeInterval - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - descriptions: - docs: Modality-specific descriptive features and their scores. - type: list - source: - openapi: ../expression-measurement/batch-openapi.json - Classification: map - CompletedEmbeddingGeneration: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - source: - openapi: ../expression-measurement/batch-openapi.json - CompletedInference: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - num_predictions: - type: uint64 - docs: The number of predictions that were generated by this job. - num_errors: - type: uint64 - docs: The number of errors that occurred while running this job. - source: - openapi: ../expression-measurement/batch-openapi.json - CompletedTlInference: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - num_predictions: - type: uint64 - docs: The number of predictions that were generated by this job. - num_errors: - type: uint64 - docs: The number of errors that occurred while running this job. - source: - openapi: ../expression-measurement/batch-openapi.json - CompletedTraining: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - custom_model: TrainingCustomModel - alternatives: optional> - source: - openapi: ../expression-measurement/batch-openapi.json - CustomModelPrediction: - properties: - output: map - error: string - task_type: string - source: - openapi: ../expression-measurement/batch-openapi.json - CustomModelRequest: - properties: - name: string - description: optional - tags: optional> - source: - openapi: ../expression-measurement/batch-openapi.json - Dataset: - discriminated: false - union: - - DatasetId - - DatasetVersionId - source: - openapi: ../expression-measurement/batch-openapi.json - DatasetId: - properties: - id: - type: string - validation: - format: uuid - source: - openapi: ../expression-measurement/batch-openapi.json - DatasetVersionId: - properties: - version_id: - type: string - validation: - format: uuid - source: - openapi: ../expression-measurement/batch-openapi.json - DescriptionsScore: - properties: - name: - type: string - docs: Name of the descriptive feature being expressed. - score: - type: float - docs: Embedding value for the descriptive feature being expressed. - source: - openapi: ../expression-measurement/batch-openapi.json - Direction: - enum: - - asc - - desc - source: - openapi: ../expression-measurement/batch-openapi.json - EmbeddingGenerationBaseRequest: - properties: - registry_file_details: - type: optional> - docs: File ID and File URL pairs for an asset registry file - source: - openapi: ../expression-measurement/batch-openapi.json - EmotionScore: - properties: - name: - type: string - docs: Name of the emotion being expressed. - score: - type: float - docs: Embedding value for the emotion being expressed. - source: - openapi: ../expression-measurement/batch-openapi.json - Error: - properties: - message: - type: string - docs: An error message. - file: - type: string - docs: A file path relative to the top level source URL or file. - source: - openapi: ../expression-measurement/batch-openapi.json - EvaluationArgs: - properties: - validation: optional - source: - openapi: ../expression-measurement/batch-openapi.json - Face: - docs: >- - The Facial Emotional Expression model analyzes human facial expressions in - images and videos. Results will be provided per frame for video files. - - - Recommended input file types: `.png`, `.jpeg`, `.mp4` - properties: - fps_pred: - type: optional - docs: >- - Number of frames per second to process. Other frames will be omitted - from the response. Set to `0` to process every frame. - default: 3 - prob_threshold: - type: optional - docs: >- - Face detection probability threshold. Faces detected with a - probability less than this threshold will be omitted from the - response. - default: 0.99 - validation: - min: 0 - max: 1 - identify_faces: - type: optional - docs: >- - Whether to return identifiers for faces across frames. If `true`, - unique identifiers will be assigned to face bounding boxes to - differentiate different faces. If `false`, all faces will be tagged - with an `unknown` ID. - default: false - min_face_size: - type: optional - docs: >- - Minimum bounding box side length in pixels to treat as a face. Faces - detected with a bounding box side length in pixels less than this - threshold will be omitted from the response. - facs: optional - descriptions: optional - save_faces: - type: optional - docs: >- - Whether to extract and save the detected faces in the artifacts zip - created by each job. - default: false - source: - openapi: ../expression-measurement/batch-openapi.json - FacePrediction: - properties: - frame: - type: uint64 - docs: Frame number - time: - type: double - docs: Time in seconds when face detection occurred. - prob: - type: double - docs: The predicted probability that a detected face was actually a face. - box: BoundingBox - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - facs: - type: optional> - docs: FACS 2.0 features and their scores. - descriptions: - type: optional> - docs: Modality-specific descriptive features and their scores. - source: - openapi: ../expression-measurement/batch-openapi.json - FacemeshPrediction: - properties: - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - source: - openapi: ../expression-measurement/batch-openapi.json - FacsScore: - properties: - name: - type: string - docs: Name of the FACS 2.0 feature being expressed. - score: - type: float - docs: Embedding value for the FACS 2.0 feature being expressed. - source: - openapi: ../expression-measurement/batch-openapi.json - Failed: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - ended_timestamp_ms: - type: long - docs: When this job ended (Unix timestamp in milliseconds). - message: - type: string - docs: An error message. - source: - openapi: ../expression-measurement/batch-openapi.json - File: - docs: The list of files submitted for analysis. - properties: - filename: - type: optional - docs: The name of the file. - content_type: - type: optional - docs: The content type of the file. - md5sum: - type: string - docs: The MD5 checksum of the file. - source: - openapi: ../expression-measurement/batch-openapi.json - Granularity: - enum: - - word - - sentence - - utterance - - conversational_turn - docs: >- - The granularity at which to generate predictions. The `granularity` field - is ignored if transcription is not enabled or if the `window` field has - been set. - - - - `word`: At the word level, our model provides a separate output for each - word, offering the most granular insight into emotional expression during - speech. - - - - `sentence`: At the sentence level of granularity, we annotate the - emotional tone of each spoken sentence with our Prosody and Emotional - Language models. - - - - `utterance`: Utterance-level granularity is between word- and - sentence-level. It takes into account natural pauses or breaks in speech, - providing more rapidly updated measures of emotional expression within a - flowing conversation. For text inputs, utterance-level granularity will - produce results identical to sentence-level granularity. - - - - `conversational_turn`: Conversational turn-level granularity provides a - distinct output for each change in speaker. It captures the full sequence - of words and sentences spoken uninterrupted by each person. This approach - provides a higher-level view of the emotional dynamics in a - multi-participant dialogue. For text inputs, specifying conversational - turn-level granularity for our Emotional Language model will produce - results for the entire passage. - source: - openapi: ../expression-measurement/batch-openapi.json - GroupedPredictionsBurstPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: ../expression-measurement/batch-openapi.json - GroupedPredictionsFacePrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: ../expression-measurement/batch-openapi.json - GroupedPredictionsFacemeshPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: ../expression-measurement/batch-openapi.json - GroupedPredictionsLanguagePrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: ../expression-measurement/batch-openapi.json - GroupedPredictionsNerPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: ../expression-measurement/batch-openapi.json - GroupedPredictionsProsodyPrediction: - properties: - id: - type: string - docs: >- - An automatically generated label to identify individuals in your media - file. Will be `unknown` if you have chosen to disable identification, - or if the model is unable to distinguish between individuals. - predictions: list - source: - openapi: ../expression-measurement/batch-openapi.json - InProgress: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - started_timestamp_ms: - type: long - docs: When this job started (Unix timestamp in milliseconds). - source: - openapi: ../expression-measurement/batch-openapi.json - InferenceBaseRequest: - properties: - models: - type: optional - docs: >- - Specify the models to use for inference. - - - If this field is not explicitly set, then all models will run by - default. - transcription: optional - urls: - type: optional> - docs: >- - URLs to the media files to be processed. Each must be a valid public - URL to a media file (see recommended input filetypes) or an archive - (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. - - - If you wish to supply more than 100 URLs, consider providing them as - an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - text: - type: optional> - docs: >- - Text supplied directly to our Emotional Language and NER models for - analysis. - callback_url: - type: optional - docs: >- - If provided, a `POST` request will be made to the URL with the - generated predictions on completion or the error message on failure. - notify: - type: optional - docs: >- - Whether to send an email notification to the user upon job - completion/failure. - default: false - source: - openapi: ../expression-measurement/batch-openapi.json - InferencePrediction: - properties: - file: - type: string - docs: A file path relative to the top level source URL or file. - models: ModelsPredictions - source: - openapi: ../expression-measurement/batch-openapi.json - InferenceRequest: - properties: - models: optional - transcription: optional - urls: - type: optional> - docs: >- - URLs to the media files to be processed. Each must be a valid public - URL to a media file (see recommended input filetypes) or an archive - (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. - - - If you wish to supply more than 100 URLs, consider providing them as - an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - text: - type: optional> - docs: Text to supply directly to our language and NER models. - callback_url: - type: optional - docs: >- - If provided, a `POST` request will be made to the URL with the - generated predictions on completion or the error message on failure. - notify: - type: optional - docs: >- - Whether to send an email notification to the user upon job - completion/failure. - default: false - files: list - source: - openapi: ../expression-measurement/batch-openapi.json - InferenceResults: - properties: - predictions: list - errors: list - source: - openapi: ../expression-measurement/batch-openapi.json - InferenceSourcePredictResult: - properties: - source: Source - results: optional - error: - type: optional - docs: An error message. - source: - openapi: ../expression-measurement/batch-openapi.json - JobEmbeddingGeneration: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - user_id: - type: string - validation: - format: uuid - request: EmbeddingGenerationBaseRequest - state: StateEmbeddingGeneration - source: - openapi: ../expression-measurement/batch-openapi.json - JobInference: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - request: - type: InferenceRequest - docs: The request that initiated the job. - state: - type: StateInference - docs: The current state of the job. - source: - openapi: ../expression-measurement/batch-openapi.json - JobTlInference: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - user_id: - type: string - validation: - format: uuid - request: TlInferenceBaseRequest - state: StateTlInference - source: - openapi: ../expression-measurement/batch-openapi.json - JobTraining: - properties: - job_id: - type: string - docs: The ID associated with this job. - validation: - format: uuid - user_id: - type: string - validation: - format: uuid - request: TrainingBaseRequest - state: StateTraining - source: - openapi: ../expression-measurement/batch-openapi.json - JobId: - properties: - job_id: - type: string - docs: The ID of the started job. - validation: - format: uuid - source: - openapi: ../expression-measurement/batch-files-openapi.yml - Language: - docs: >- - The Emotional Language model analyzes passages of text. This also supports - audio and video files by transcribing and then directly analyzing the - transcribed text. - - - Recommended input filetypes: `.txt`, `.mp3`, `.wav`, `.mp4` - properties: - granularity: optional - sentiment: optional - toxicity: optional - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - source: - openapi: ../expression-measurement/batch-openapi.json - LanguagePrediction: - properties: - text: - type: string - docs: A segment of text (like a word or a sentence). - position: PositionInterval - time: optional - confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence in this text. - speaker_confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence that this text was spoken by this speaker. - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - sentiment: - type: optional> - docs: >- - Sentiment predictions returned as a distribution. This model predicts - the probability that a given text could be interpreted as having each - sentiment level from `1` (negative) to `9` (positive). - - - Compared to returning one estimate of sentiment, this enables a more - nuanced analysis of a text's meaning. For example, a text with very - neutral sentiment would have an average rating of `5`. But also a text - that could be interpreted as having very positive sentiment or very - negative sentiment would also have an average rating of `5`. The - average sentiment is less informative than the distribution over - sentiment, so this API returns a value for each sentiment level. - toxicity: - type: optional> - docs: >- - Toxicity predictions returned as probabilities that the text can be - classified into the following categories: `toxic`, `severe_toxic`, - `obscene`, `threat`, `insult`, and `identity_hate`. - source: - openapi: ../expression-measurement/batch-openapi.json - Models: - docs: The models used for inference. - properties: - face: optional - burst: optional - prosody: optional - language: optional - ner: optional - facemesh: optional - source: - openapi: ../expression-measurement/batch-openapi.json - ModelsPredictions: - properties: - face: optional - burst: optional - prosody: optional - language: optional - ner: optional - facemesh: optional - source: - openapi: ../expression-measurement/batch-openapi.json - Ner: - docs: >- - The NER (Named-entity Recognition) model identifies real-world objects and - concepts in passages of text. This also supports audio and video files by - transcribing and then directly analyzing the transcribed text. - - - Recommended input filetypes: `.txt`, `.mp3`, `.wav`, `.mp4` - properties: - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - source: - openapi: ../expression-measurement/batch-openapi.json - NerPrediction: - properties: - entity: - type: string - docs: The recognized topic or entity. - position: PositionInterval - entity_confidence: - type: double - docs: Our NER model's relative confidence in the recognized topic or entity. - support: - type: double - docs: A measure of how often the entity is linked to by other entities. - uri: - type: string - docs: >- - A URL which provides more information about the recognized topic or - entity. - link_word: - type: string - docs: The specific word to which the emotion predictions are linked. - time: optional - confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence in this text. - speaker_confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence that this text was spoken by this speaker. - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - source: - openapi: ../expression-measurement/batch-openapi.json - "Null": - type: map - docs: No associated metadata for this model. Value will be `null`. - PositionInterval: - docs: >- - Position of a segment of text within a larger document, measured in - characters. Uses zero-based indexing. The beginning index is inclusive and - the end index is exclusive. - properties: - begin: - type: uint64 - docs: The index of the first character in the text segment, inclusive. - end: - type: uint64 - docs: The index of the last character in the text segment, exclusive. - source: - openapi: ../expression-measurement/batch-openapi.json - PredictionsOptionalNullBurstPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: ../expression-measurement/batch-openapi.json - PredictionsOptionalNullFacePrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: ../expression-measurement/batch-openapi.json - PredictionsOptionalNullFacemeshPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: ../expression-measurement/batch-openapi.json - PredictionsOptionalTranscriptionMetadataLanguagePrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: ../expression-measurement/batch-openapi.json - PredictionsOptionalTranscriptionMetadataNerPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: ../expression-measurement/batch-openapi.json - PredictionsOptionalTranscriptionMetadataProsodyPrediction: - properties: - metadata: optional - grouped_predictions: list - source: - openapi: ../expression-measurement/batch-openapi.json - Prosody: - docs: >- - The Speech Prosody model analyzes the intonation, stress, and rhythm of - spoken word. - - - Recommended input file types: `.wav`, `.mp3`, `.mp4` - properties: - granularity: optional - window: optional - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - source: - openapi: ../expression-measurement/batch-openapi.json - ProsodyPrediction: - properties: - text: - type: optional - docs: A segment of text (like a word or a sentence). - time: TimeInterval - confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence in this text. - speaker_confidence: - type: optional - docs: >- - Value between `0.0` and `1.0` that indicates our transcription model's - relative confidence that this text was spoken by this speaker. - emotions: - docs: A high-dimensional embedding in emotion space. - type: list - source: - openapi: ../expression-measurement/batch-openapi.json - Queued: - properties: - created_timestamp_ms: - type: long - docs: When this job was created (Unix timestamp in milliseconds). - source: - openapi: ../expression-measurement/batch-openapi.json - RegistryFileDetail: - properties: - file_id: - type: string - docs: File ID in the Asset Registry - file_url: - type: string - docs: URL to the file in the Asset Registry - source: - openapi: ../expression-measurement/batch-openapi.json - Regression: map - SentimentScore: - properties: - name: - type: string - docs: Level of sentiment, ranging from `1` (negative) to `9` (positive) - score: - type: float - docs: Prediction for this level of sentiment - source: - openapi: ../expression-measurement/batch-openapi.json - SortBy: - enum: - - created - - started - - ended - source: - openapi: ../expression-measurement/batch-openapi.json - Source: - discriminated: false - union: - - SourceUrl - - SourceFile - - SourceTextSource - source: - openapi: ../expression-measurement/batch-openapi.json - SourceFile: - properties: - type: literal<"file"> - extends: - - File - source: - openapi: ../expression-measurement/batch-openapi.json - SourceTextSource: - properties: - type: literal<"text"> - source: - openapi: ../expression-measurement/batch-openapi.json - SourceUrl: - properties: - type: literal<"url"> - extends: - - Url - source: - openapi: ../expression-measurement/batch-openapi.json - StateEmbeddingGeneration: - discriminated: false - union: - - StateEmbeddingGenerationQueued - - StateEmbeddingGenerationInProgress - - StateEmbeddingGenerationCompletedEmbeddingGeneration - - StateEmbeddingGenerationFailed - source: - openapi: ../expression-measurement/batch-openapi.json - StateEmbeddingGenerationCompletedEmbeddingGeneration: - properties: - status: literal<"COMPLETED"> - extends: - - CompletedEmbeddingGeneration - source: - openapi: ../expression-measurement/batch-openapi.json - StateEmbeddingGenerationFailed: - properties: - status: literal<"FAILED"> - extends: - - Failed - source: - openapi: ../expression-measurement/batch-openapi.json - StateEmbeddingGenerationInProgress: - properties: - status: literal<"IN_PROGRESS"> - extends: - - InProgress - source: - openapi: ../expression-measurement/batch-openapi.json - StateEmbeddingGenerationQueued: - properties: - status: literal<"QUEUED"> - extends: - - Queued - source: - openapi: ../expression-measurement/batch-openapi.json - StateInference: - discriminated: false - union: - - QueuedState - - InProgressState - - CompletedState - - FailedState - source: - openapi: ../expression-measurement/batch-openapi.json - CompletedState: - properties: - status: literal<"COMPLETED"> - extends: - - CompletedInference - source: - openapi: ../expression-measurement/batch-openapi.json - FailedState: - properties: - status: literal<"FAILED"> - extends: - - Failed - source: - openapi: ../expression-measurement/batch-openapi.json - InProgressState: - properties: - status: literal<"IN_PROGRESS"> - extends: - - InProgress - source: - openapi: ../expression-measurement/batch-openapi.json - QueuedState: - properties: - status: literal<"QUEUED"> - extends: - - Queued - source: - openapi: ../expression-measurement/batch-openapi.json - StateTlInference: - discriminated: false - union: - - StateTlInferenceQueued - - StateTlInferenceInProgress - - StateTlInferenceCompletedTlInference - - StateTlInferenceFailed - source: - openapi: ../expression-measurement/batch-openapi.json - StateTlInferenceCompletedTlInference: - properties: - status: literal<"COMPLETED"> - extends: - - CompletedTlInference - source: - openapi: ../expression-measurement/batch-openapi.json - StateTlInferenceFailed: - properties: - status: literal<"FAILED"> - extends: - - Failed - source: - openapi: ../expression-measurement/batch-openapi.json - StateTlInferenceInProgress: - properties: - status: literal<"IN_PROGRESS"> - extends: - - InProgress - source: - openapi: ../expression-measurement/batch-openapi.json - StateTlInferenceQueued: - properties: - status: literal<"QUEUED"> - extends: - - Queued - source: - openapi: ../expression-measurement/batch-openapi.json - StateTraining: - discriminated: false - union: - - StateTrainingQueued - - StateTrainingInProgress - - StateTrainingCompletedTraining - - StateTrainingFailed - source: - openapi: ../expression-measurement/batch-openapi.json - StateTrainingCompletedTraining: - properties: - status: literal<"COMPLETED"> - extends: - - CompletedTraining - source: - openapi: ../expression-measurement/batch-openapi.json - StateTrainingFailed: - properties: - status: literal<"FAILED"> - extends: - - Failed - source: - openapi: ../expression-measurement/batch-openapi.json - StateTrainingInProgress: - properties: - status: literal<"IN_PROGRESS"> - extends: - - InProgress - source: - openapi: ../expression-measurement/batch-openapi.json - StateTrainingQueued: - properties: - status: literal<"QUEUED"> - extends: - - Queued - source: - openapi: ../expression-measurement/batch-openapi.json - Status: - enum: - - QUEUED - - IN_PROGRESS - - COMPLETED - - FAILED - source: - openapi: ../expression-measurement/batch-openapi.json - TlInferencePrediction: - properties: - file: - type: string - docs: A file path relative to the top level source URL or file. - file_type: string - custom_models: map - source: - openapi: ../expression-measurement/batch-openapi.json - TlInferenceResults: - properties: - predictions: list - errors: list - source: - openapi: ../expression-measurement/batch-openapi.json - TlInferenceSourcePredictResult: - properties: - source: Source - results: optional - error: - type: optional - docs: An error message. - source: - openapi: ../expression-measurement/batch-openapi.json - Tag: - properties: - key: string - value: string - source: - openapi: ../expression-measurement/batch-openapi.json - Target: - discriminated: false - union: - - long - - double - - string - source: - openapi: ../expression-measurement/batch-openapi.json - Task: - discriminated: false - union: - - TaskClassification - - TaskRegression - source: - openapi: ../expression-measurement/batch-openapi.json - TaskClassification: - properties: - type: literal<"classification"> - source: - openapi: ../expression-measurement/batch-openapi.json - TaskRegression: - properties: - type: literal<"regression"> - source: - openapi: ../expression-measurement/batch-openapi.json - TextSource: map - TimeInterval: - docs: A time range with a beginning and end, measured in seconds. - properties: - begin: - type: double - docs: Beginning of time range in seconds. - end: - type: double - docs: End of time range in seconds. - source: - openapi: ../expression-measurement/batch-openapi.json - TlInferenceBaseRequest: - properties: - custom_model: CustomModel - urls: - type: optional> - docs: >- - URLs to the media files to be processed. Each must be a valid public - URL to a media file (see recommended input filetypes) or an archive - (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. - - - If you wish to supply more than 100 URLs, consider providing them as - an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). - callback_url: - type: optional - docs: >- - If provided, a `POST` request will be made to the URL with the - generated predictions on completion or the error message on failure. - notify: - type: optional - docs: >- - Whether to send an email notification to the user upon job - completion/failure. - default: false - source: - openapi: ../expression-measurement/batch-openapi.json - CustomModel: - discriminated: false - union: - - CustomModelId - - CustomModelVersionId - source: - openapi: ../expression-measurement/batch-openapi.json - CustomModelId: - properties: - id: string - source: - openapi: ../expression-measurement/batch-openapi.json - CustomModelVersionId: - properties: - version_id: string - source: - openapi: ../expression-measurement/batch-openapi.json - ToxicityScore: - properties: - name: - type: string - docs: Category of toxicity. - score: - type: float - docs: Prediction for this category of toxicity - source: - openapi: ../expression-measurement/batch-openapi.json - TrainingBaseRequest: - properties: - custom_model: CustomModelRequest - dataset: Dataset - target_feature: - type: optional - default: label - task: optional - evaluation: optional - alternatives: optional> - callback_url: optional - notify: - type: optional - default: false - source: - openapi: ../expression-measurement/batch-openapi.json - TrainingCustomModel: - properties: - id: string - version_id: optional - source: - openapi: ../expression-measurement/batch-openapi.json - Transcription: - docs: |- - Transcription-related configuration options. - - To disable transcription, explicitly set this field to `null`. - properties: - language: - type: optional - docs: >- - By default, we use an automated language detection method for our - Speech Prosody, Language, and NER models. However, if you know what - language is being spoken in your media samples, you can specify it via - its BCP-47 tag and potentially obtain more accurate results. - - - You can specify any of the following languages: - - - Chinese: `zh` - - - Danish: `da` - - - Dutch: `nl` - - - English: `en` - - - English (Australia): `en-AU` - - - English (India): `en-IN` - - - English (New Zealand): `en-NZ` - - - English (United Kingdom): `en-GB` - - - French: `fr` - - - French (Canada): `fr-CA` - - - German: `de` - - - Hindi: `hi` - - - Hindi (Roman Script): `hi-Latn` - - - Indonesian: `id` - - - Italian: `it` - - - Japanese: `ja` - - - Korean: `ko` - - - Norwegian: `no` - - - Polish: `pl` - - - Portuguese: `pt` - - - Portuguese (Brazil): `pt-BR` - - - Portuguese (Portugal): `pt-PT` - - - Russian: `ru` - - - Spanish: `es` - - - Spanish (Latin America): `es-419` - - - Swedish: `sv` - - - Tamil: `ta` - - - Turkish: `tr` - - - Ukrainian: `uk` - identify_speakers: - type: optional - docs: >- - Whether to return identifiers for speakers over time. If `true`, - unique identifiers will be assigned to spoken words to differentiate - different speakers. If `false`, all speakers will be tagged with an - `unknown` ID. - default: false - confidence_threshold: - type: optional - docs: >- - Transcript confidence threshold. Transcripts generated with a - confidence less than this threshold will be considered invalid and not - used as an input for model inference. - default: 0.5 - validation: - min: 0 - max: 1 - source: - openapi: ../expression-measurement/batch-openapi.json - TranscriptionMetadata: - docs: Transcription metadata for your media file. - properties: - confidence: - type: double - docs: >- - Value between `0.0` and `1.0` indicating our transcription model's - relative confidence in the transcription of your media file. - detected_language: optional - source: - openapi: ../expression-measurement/batch-openapi.json - Type: - enum: - - EMBEDDING_GENERATION - - INFERENCE - - TL_INFERENCE - - TRAINING - source: - openapi: ../expression-measurement/batch-openapi.json - Unconfigurable: - type: map - docs: >- - To include predictions for this model type, set this field to `{}`. It is - currently not configurable further. - UnionJob: InferenceJob - EmbeddingGenerationJob: - properties: - type: string - extends: - - JobEmbeddingGeneration - source: - openapi: ../expression-measurement/batch-openapi.json - InferenceJob: - properties: - type: - type: string - docs: >- - Denotes the job type. - - - Jobs created with the Expression Measurement API will have this field - set to `INFERENCE`. - extends: - - JobInference - source: - openapi: ../expression-measurement/batch-openapi.json - CustomModelsInferenceJob: - properties: - type: string - extends: - - JobTlInference - source: - openapi: ../expression-measurement/batch-openapi.json - CustomModelsTrainingJob: - properties: - type: string - extends: - - JobTraining - source: - openapi: ../expression-measurement/batch-openapi.json - UnionPredictResult: InferenceSourcePredictResult - Url: - properties: - url: - type: string - docs: The URL of the source media file. - source: - openapi: ../expression-measurement/batch-openapi.json - ValidationArgs: - properties: - positive_label: optional - source: - openapi: ../expression-measurement/batch-openapi.json - When: - enum: - - created_before - - created_after - source: - openapi: ../expression-measurement/batch-openapi.json - Window: - docs: >- - Generate predictions based on time. - - - Setting the `window` field allows for a 'sliding window' approach, where a - fixed-size window moves across the audio or video file in defined steps. - This enables continuous analysis of prosody within subsets of the file, - providing dynamic and localized insights into emotional expression. - properties: - length: - type: optional - docs: The length of the sliding window. - default: 4 - validation: - min: 0.5 - step: - type: optional - docs: The step size of the sliding window. - default: 1 - validation: - min: 0.5 - source: - openapi: ../expression-measurement/batch-openapi.json diff --git a/.mock/definition/expression-measurement/batch/__package__.yml b/.mock/definition/expression-measurement/batch/__package__.yml new file mode 100644 index 00000000..80eeb3b3 --- /dev/null +++ b/.mock/definition/expression-measurement/batch/__package__.yml @@ -0,0 +1,1759 @@ +service: + auth: false + base-path: '' + endpoints: + list-jobs: + path: /v0/batch/jobs + method: GET + auth: true + docs: Sort and filter jobs. + display-name: List jobs + request: + name: BatchListJobsRequest + query-parameters: + limit: + type: optional + docs: The maximum number of jobs to include in the response. + status: + type: optional + allow-multiple: true + docs: >- + Include only jobs of this status in the response. There are four + possible statuses: + + + - `QUEUED`: The job has been received and is waiting to be + processed. + + + - `IN_PROGRESS`: The job is currently being processed. + + + - `COMPLETED`: The job has finished processing. + + + - `FAILED`: The job encountered an error and could not be + completed successfully. + when: + type: optional + docs: >- + Specify whether to include jobs created before or after a given + `timestamp_ms`. + timestamp_ms: + type: optional + docs: |- + Provide a timestamp in milliseconds to filter jobs. + + When combined with the `when` parameter, you can filter jobs before or after the given timestamp. Defaults to the current Unix timestamp if one is not provided. + sort_by: + type: optional + docs: >- + Specify which timestamp to sort the jobs by. + + + - `created`: Sort jobs by the time of creation, indicated by + `created_timestamp_ms`. + + + - `started`: Sort jobs by the time processing started, indicated + by `started_timestamp_ms`. + + + - `ended`: Sort jobs by the time processing ended, indicated by + `ended_timestamp_ms`. + direction: + type: optional + docs: >- + Specify the order in which to sort the jobs. Defaults to + descending order. + + + - `asc`: Sort in ascending order (chronological, with the oldest + records first). + + + - `desc`: Sort in descending order (reverse-chronological, with + the newest records first). + response: + docs: '' + type: list + examples: + - response: + body: + - job_id: job_id + request: + files: + - filename: filename + md5sum: md5sum + content_type: content_type + models: + burst: {} + face: + fps_pred: 3 + identify_faces: false + min_face_size: 60 + prob_threshold: 0.99 + save_faces: false + facemesh: {} + language: + granularity: word + identify_speakers: false + ner: + identify_speakers: false + prosody: + granularity: utterance + identify_speakers: false + notify: true + text: [] + urls: + - https://hume-tutorials.s3.amazonaws.com/faces.zip + state: + created_timestamp_ms: 1712587158717 + ended_timestamp_ms: 1712587159274 + num_errors: 0 + num_predictions: 10 + started_timestamp_ms: 1712587158800 + status: COMPLETED + type: INFERENCE + start-inference-job: + path: /v0/batch/jobs + method: POST + auth: true + docs: Start a new measurement inference job. + display-name: Start inference job + request: + body: InferenceBaseRequest + response: + docs: '' + type: JobId + property: job_id + examples: + - request: + urls: + - https://hume-tutorials.s3.amazonaws.com/faces.zip + notify: true + response: + body: + job_id: job_id + get-job-details: + path: /v0/batch/jobs/{id} + method: GET + auth: true + docs: Get the request details and state of a given job. + path-parameters: + id: + type: string + docs: The unique identifier for the job. + display-name: Get job details + response: + docs: '' + type: UnionJob + examples: + - name: Inference + path-parameters: + id: job_id + response: + body: + type: INFERENCE + job_id: job_id + request: + files: [] + models: + burst: {} + face: + fps_pred: 3 + identify_faces: false + min_face_size: 60 + prob_threshold: 0.99 + save_faces: false + facemesh: {} + language: + granularity: word + identify_speakers: false + ner: + identify_speakers: false + prosody: + granularity: utterance + identify_speakers: false + notify: true + text: [] + urls: + - https://hume-tutorials.s3.amazonaws.com/faces.zip + state: + created_timestamp_ms: 1712590457884 + ended_timestamp_ms: 1712590462252 + num_errors: 0 + num_predictions: 10 + started_timestamp_ms: 1712590457995 + status: COMPLETED + get-job-predictions: + path: /v0/batch/jobs/{id}/predictions + method: GET + auth: true + docs: Get the JSON predictions of a completed inference job. + path-parameters: + id: + type: string + docs: The unique identifier for the job. + display-name: Get job predictions + response: + docs: '' + type: list + examples: + - path-parameters: + id: job_id + response: + body: + - source: + type: url + url: https://hume-tutorials.s3.amazonaws.com/faces.zip + results: + predictions: + - file: faces/100.jpg + models: + face: + grouped_predictions: + - id: unknown + predictions: + - frame: 0 + time: 0 + prob: 0.9994111061096191 + box: + x: 1187.885986328125 + 'y': 1397.697509765625 + w: 1401.668701171875 + h: 1961.424560546875 + emotions: + - name: Admiration + score: 0.10722749680280685 + - name: Adoration + score: 0.06395940482616425 + - name: Aesthetic Appreciation + score: 0.05811462551355362 + - name: Amusement + score: 0.14187128841876984 + - name: Anger + score: 0.02804684266448021 + - name: Anxiety + score: 0.2713485360145569 + - name: Awe + score: 0.33812594413757324 + - name: Awkwardness + score: 0.1745193600654602 + - name: Boredom + score: 0.23600080609321594 + - name: Calmness + score: 0.18988418579101562 + - name: Concentration + score: 0.44288986921310425 + - name: Confusion + score: 0.39346569776535034 + - name: Contemplation + score: 0.31002455949783325 + - name: Contempt + score: 0.048870109021663666 + - name: Contentment + score: 0.0579497292637825 + - name: Craving + score: 0.06544201076030731 + - name: Desire + score: 0.05526508390903473 + - name: Determination + score: 0.08590991795063019 + - name: Disappointment + score: 0.19508258998394012 + - name: Disgust + score: 0.031529419124126434 + - name: Distress + score: 0.23210826516151428 + - name: Doubt + score: 0.3284550905227661 + - name: Ecstasy + score: 0.040716782212257385 + - name: Embarrassment + score: 0.1467227339744568 + - name: Empathic Pain + score: 0.07633581757545471 + - name: Entrancement + score: 0.16245244443416595 + - name: Envy + score: 0.03267110139131546 + - name: Excitement + score: 0.10656816512346268 + - name: Fear + score: 0.3115977346897125 + - name: Guilt + score: 0.11615975946187973 + - name: Horror + score: 0.19795553386211395 + - name: Interest + score: 0.3136432468891144 + - name: Joy + score: 0.06285581737756729 + - name: Love + score: 0.06339752674102783 + - name: Nostalgia + score: 0.05866732448339462 + - name: Pain + score: 0.07684041559696198 + - name: Pride + score: 0.026822954416275024 + - name: Realization + score: 0.30000734329223633 + - name: Relief + score: 0.04414166510105133 + - name: Romance + score: 0.042728863656520844 + - name: Sadness + score: 0.14773206412792206 + - name: Satisfaction + score: 0.05902980640530586 + - name: Shame + score: 0.08103451132774353 + - name: Surprise (negative) + score: 0.25518184900283813 + - name: Surprise (positive) + score: 0.28845661878585815 + - name: Sympathy + score: 0.062488824129104614 + - name: Tiredness + score: 0.1559651643037796 + - name: Triumph + score: 0.01955239288508892 + errors: [] + get-job-artifacts: + path: /v0/batch/jobs/{id}/artifacts + method: GET + auth: true + docs: Get the artifacts ZIP of a completed inference job. + path-parameters: + id: + type: string + docs: The unique identifier for the job. + display-name: Get job artifacts + response: + docs: '' + type: file + start-inference-job-from-local-file: + path: /v0/batch/jobs + method: POST + auth: true + docs: Start a new batch inference job. + display-name: Start inference job from local file + request: + name: BatchStartInferenceJobFromLocalFileRequest + body: + properties: + json: + type: optional + docs: >- + Stringified JSON object containing the inference job + configuration. + file: list + content-type: multipart/form-data + response: + docs: '' + type: JobId + property: job_id + examples: + - request: {} + response: + body: + job_id: job_id + source: + openapi: batch-files-openapi.yml +types: + Alternative: literal<"language_only"> + Bcp47Tag: + enum: + - zh + - da + - nl + - en + - value: en-AU + name: EnAu + - value: en-IN + name: EnIn + - value: en-NZ + name: EnNz + - value: en-GB + name: EnGb + - fr + - value: fr-CA + name: FrCa + - de + - hi + - value: hi-Latn + name: HiLatn + - id + - it + - ja + - ko + - 'no' + - pl + - pt + - value: pt-BR + name: PtBr + - value: pt-PT + name: PtPt + - ru + - es + - value: es-419 + name: Es419 + - sv + - ta + - tr + - uk + source: + openapi: batch-openapi.json + BoundingBox: + docs: A bounding box around a face. + properties: + x: + type: double + docs: x-coordinate of bounding box top left corner. + 'y': + type: double + docs: y-coordinate of bounding box top left corner. + w: + type: double + docs: Bounding box width. + h: + type: double + docs: Bounding box height. + source: + openapi: batch-openapi.json + BurstPrediction: + properties: + time: TimeInterval + emotions: + docs: A high-dimensional embedding in emotion space. + type: list + descriptions: + docs: Modality-specific descriptive features and their scores. + type: list + source: + openapi: batch-openapi.json + Classification: map + CompletedEmbeddingGeneration: + properties: + created_timestamp_ms: + type: long + docs: When this job was created (Unix timestamp in milliseconds). + started_timestamp_ms: + type: long + docs: When this job started (Unix timestamp in milliseconds). + ended_timestamp_ms: + type: long + docs: When this job ended (Unix timestamp in milliseconds). + source: + openapi: batch-openapi.json + CompletedInference: + properties: + created_timestamp_ms: + type: long + docs: When this job was created (Unix timestamp in milliseconds). + started_timestamp_ms: + type: long + docs: When this job started (Unix timestamp in milliseconds). + ended_timestamp_ms: + type: long + docs: When this job ended (Unix timestamp in milliseconds). + num_predictions: + type: uint64 + docs: The number of predictions that were generated by this job. + num_errors: + type: uint64 + docs: The number of errors that occurred while running this job. + source: + openapi: batch-openapi.json + CompletedTlInference: + properties: + created_timestamp_ms: + type: long + docs: When this job was created (Unix timestamp in milliseconds). + started_timestamp_ms: + type: long + docs: When this job started (Unix timestamp in milliseconds). + ended_timestamp_ms: + type: long + docs: When this job ended (Unix timestamp in milliseconds). + num_predictions: + type: uint64 + docs: The number of predictions that were generated by this job. + num_errors: + type: uint64 + docs: The number of errors that occurred while running this job. + source: + openapi: batch-openapi.json + CompletedTraining: + properties: + created_timestamp_ms: + type: long + docs: When this job was created (Unix timestamp in milliseconds). + started_timestamp_ms: + type: long + docs: When this job started (Unix timestamp in milliseconds). + ended_timestamp_ms: + type: long + docs: When this job ended (Unix timestamp in milliseconds). + custom_model: TrainingCustomModel + alternatives: optional> + source: + openapi: batch-openapi.json + CustomModelPrediction: + properties: + output: map + error: string + task_type: string + source: + openapi: batch-openapi.json + CustomModelRequest: + properties: + name: string + description: optional + tags: optional> + source: + openapi: batch-openapi.json + Dataset: + discriminated: false + union: + - DatasetId + - DatasetVersionId + source: + openapi: batch-openapi.json + DatasetId: + properties: + id: + type: string + validation: + format: uuid + source: + openapi: batch-openapi.json + DatasetVersionId: + properties: + version_id: + type: string + validation: + format: uuid + source: + openapi: batch-openapi.json + DescriptionsScore: + properties: + name: + type: string + docs: Name of the descriptive feature being expressed. + score: + type: float + docs: Embedding value for the descriptive feature being expressed. + source: + openapi: batch-openapi.json + Direction: + enum: + - asc + - desc + source: + openapi: batch-openapi.json + EmbeddingGenerationBaseRequest: + properties: + registry_file_details: + type: optional> + docs: File ID and File URL pairs for an asset registry file + source: + openapi: batch-openapi.json + EmotionScore: + properties: + name: + type: string + docs: Name of the emotion being expressed. + score: + type: float + docs: Embedding value for the emotion being expressed. + source: + openapi: batch-openapi.json + Error: + properties: + message: + type: string + docs: An error message. + file: + type: string + docs: A file path relative to the top level source URL or file. + source: + openapi: batch-openapi.json + EvaluationArgs: + properties: + validation: optional + source: + openapi: batch-openapi.json + Face: + docs: >- + The Facial Emotional Expression model analyzes human facial expressions in + images and videos. Results will be provided per frame for video files. + + + Recommended input file types: `.png`, `.jpeg`, `.mp4` + properties: + fps_pred: + type: optional + docs: >- + Number of frames per second to process. Other frames will be omitted + from the response. Set to `0` to process every frame. + default: 3 + prob_threshold: + type: optional + docs: >- + Face detection probability threshold. Faces detected with a + probability less than this threshold will be omitted from the + response. + default: 0.99 + validation: + min: 0 + max: 1 + identify_faces: + type: optional + docs: >- + Whether to return identifiers for faces across frames. If `true`, + unique identifiers will be assigned to face bounding boxes to + differentiate different faces. If `false`, all faces will be tagged + with an `unknown` ID. + default: false + min_face_size: + type: optional + docs: >- + Minimum bounding box side length in pixels to treat as a face. Faces + detected with a bounding box side length in pixels less than this + threshold will be omitted from the response. + facs: optional + descriptions: optional + save_faces: + type: optional + docs: >- + Whether to extract and save the detected faces in the artifacts zip + created by each job. + default: false + source: + openapi: batch-openapi.json + FacePrediction: + properties: + frame: + type: uint64 + docs: Frame number + time: + type: double + docs: Time in seconds when face detection occurred. + prob: + type: double + docs: The predicted probability that a detected face was actually a face. + box: BoundingBox + emotions: + docs: A high-dimensional embedding in emotion space. + type: list + facs: + type: optional> + docs: FACS 2.0 features and their scores. + descriptions: + type: optional> + docs: Modality-specific descriptive features and their scores. + source: + openapi: batch-openapi.json + FacemeshPrediction: + properties: + emotions: + docs: A high-dimensional embedding in emotion space. + type: list + source: + openapi: batch-openapi.json + FacsScore: + properties: + name: + type: string + docs: Name of the FACS 2.0 feature being expressed. + score: + type: float + docs: Embedding value for the FACS 2.0 feature being expressed. + source: + openapi: batch-openapi.json + Failed: + properties: + created_timestamp_ms: + type: long + docs: When this job was created (Unix timestamp in milliseconds). + started_timestamp_ms: + type: long + docs: When this job started (Unix timestamp in milliseconds). + ended_timestamp_ms: + type: long + docs: When this job ended (Unix timestamp in milliseconds). + message: + type: string + docs: An error message. + source: + openapi: batch-openapi.json + File: + docs: The list of files submitted for analysis. + properties: + filename: + type: optional + docs: The name of the file. + content_type: + type: optional + docs: The content type of the file. + md5sum: + type: string + docs: The MD5 checksum of the file. + source: + openapi: batch-openapi.json + Granularity: + enum: + - word + - sentence + - utterance + - conversational_turn + docs: >- + The granularity at which to generate predictions. The `granularity` field + is ignored if transcription is not enabled or if the `window` field has + been set. + + + - `word`: At the word level, our model provides a separate output for each + word, offering the most granular insight into emotional expression during + speech. + + + - `sentence`: At the sentence level of granularity, we annotate the + emotional tone of each spoken sentence with our Prosody and Emotional + Language models. + + + - `utterance`: Utterance-level granularity is between word- and + sentence-level. It takes into account natural pauses or breaks in speech, + providing more rapidly updated measures of emotional expression within a + flowing conversation. For text inputs, utterance-level granularity will + produce results identical to sentence-level granularity. + + + - `conversational_turn`: Conversational turn-level granularity provides a + distinct output for each change in speaker. It captures the full sequence + of words and sentences spoken uninterrupted by each person. This approach + provides a higher-level view of the emotional dynamics in a + multi-participant dialogue. For text inputs, specifying conversational + turn-level granularity for our Emotional Language model will produce + results for the entire passage. + source: + openapi: batch-openapi.json + GroupedPredictionsBurstPrediction: + properties: + id: + type: string + docs: >- + An automatically generated label to identify individuals in your media + file. Will be `unknown` if you have chosen to disable identification, + or if the model is unable to distinguish between individuals. + predictions: list + source: + openapi: batch-openapi.json + GroupedPredictionsFacePrediction: + properties: + id: + type: string + docs: >- + An automatically generated label to identify individuals in your media + file. Will be `unknown` if you have chosen to disable identification, + or if the model is unable to distinguish between individuals. + predictions: list + source: + openapi: batch-openapi.json + GroupedPredictionsFacemeshPrediction: + properties: + id: + type: string + docs: >- + An automatically generated label to identify individuals in your media + file. Will be `unknown` if you have chosen to disable identification, + or if the model is unable to distinguish between individuals. + predictions: list + source: + openapi: batch-openapi.json + GroupedPredictionsLanguagePrediction: + properties: + id: + type: string + docs: >- + An automatically generated label to identify individuals in your media + file. Will be `unknown` if you have chosen to disable identification, + or if the model is unable to distinguish between individuals. + predictions: list + source: + openapi: batch-openapi.json + GroupedPredictionsNerPrediction: + properties: + id: + type: string + docs: >- + An automatically generated label to identify individuals in your media + file. Will be `unknown` if you have chosen to disable identification, + or if the model is unable to distinguish between individuals. + predictions: list + source: + openapi: batch-openapi.json + GroupedPredictionsProsodyPrediction: + properties: + id: + type: string + docs: >- + An automatically generated label to identify individuals in your media + file. Will be `unknown` if you have chosen to disable identification, + or if the model is unable to distinguish between individuals. + predictions: list + source: + openapi: batch-openapi.json + InProgress: + properties: + created_timestamp_ms: + type: long + docs: When this job was created (Unix timestamp in milliseconds). + started_timestamp_ms: + type: long + docs: When this job started (Unix timestamp in milliseconds). + source: + openapi: batch-openapi.json + InferenceBaseRequest: + properties: + models: + type: optional + docs: >- + Specify the models to use for inference. + + + If this field is not explicitly set, then all models will run by + default. + transcription: optional + urls: + type: optional> + docs: >- + URLs to the media files to be processed. Each must be a valid public + URL to a media file (see recommended input filetypes) or an archive + (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. + + + If you wish to supply more than 100 URLs, consider providing them as + an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). + text: + type: optional> + docs: >- + Text supplied directly to our Emotional Language and NER models for + analysis. + callback_url: + type: optional + docs: >- + If provided, a `POST` request will be made to the URL with the + generated predictions on completion or the error message on failure. + notify: + type: optional + docs: >- + Whether to send an email notification to the user upon job + completion/failure. + default: false + source: + openapi: batch-openapi.json + InferencePrediction: + properties: + file: + type: string + docs: A file path relative to the top level source URL or file. + models: ModelsPredictions + source: + openapi: batch-openapi.json + InferenceRequest: + properties: + models: optional + transcription: optional + urls: + type: optional> + docs: >- + URLs to the media files to be processed. Each must be a valid public + URL to a media file (see recommended input filetypes) or an archive + (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. + + + If you wish to supply more than 100 URLs, consider providing them as + an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). + text: + type: optional> + docs: Text to supply directly to our language and NER models. + callback_url: + type: optional + docs: >- + If provided, a `POST` request will be made to the URL with the + generated predictions on completion or the error message on failure. + notify: + type: optional + docs: >- + Whether to send an email notification to the user upon job + completion/failure. + default: false + files: list + source: + openapi: batch-openapi.json + InferenceResults: + properties: + predictions: list + errors: list + source: + openapi: batch-openapi.json + InferenceSourcePredictResult: + properties: + source: Source + results: optional + error: + type: optional + docs: An error message. + source: + openapi: batch-openapi.json + JobEmbeddingGeneration: + properties: + job_id: + type: string + docs: The ID associated with this job. + validation: + format: uuid + user_id: + type: string + validation: + format: uuid + request: EmbeddingGenerationBaseRequest + state: StateEmbeddingGeneration + source: + openapi: batch-openapi.json + JobInference: + properties: + job_id: + type: string + docs: The ID associated with this job. + validation: + format: uuid + request: + type: InferenceRequest + docs: The request that initiated the job. + state: + type: StateInference + docs: The current state of the job. + source: + openapi: batch-openapi.json + JobTlInference: + properties: + job_id: + type: string + docs: The ID associated with this job. + validation: + format: uuid + user_id: + type: string + validation: + format: uuid + request: TlInferenceBaseRequest + state: StateTlInference + source: + openapi: batch-openapi.json + JobTraining: + properties: + job_id: + type: string + docs: The ID associated with this job. + validation: + format: uuid + user_id: + type: string + validation: + format: uuid + request: TrainingBaseRequest + state: StateTraining + source: + openapi: batch-openapi.json + JobId: + properties: + job_id: + type: string + docs: The ID of the started job. + validation: + format: uuid + source: + openapi: batch-files-openapi.yml + Language: + docs: >- + The Emotional Language model analyzes passages of text. This also supports + audio and video files by transcribing and then directly analyzing the + transcribed text. + + + Recommended input filetypes: `.txt`, `.mp3`, `.wav`, `.mp4` + properties: + granularity: optional + sentiment: optional + toxicity: optional + identify_speakers: + type: optional + docs: >- + Whether to return identifiers for speakers over time. If `true`, + unique identifiers will be assigned to spoken words to differentiate + different speakers. If `false`, all speakers will be tagged with an + `unknown` ID. + default: false + source: + openapi: batch-openapi.json + LanguagePrediction: + properties: + text: + type: string + docs: A segment of text (like a word or a sentence). + position: PositionInterval + time: optional + confidence: + type: optional + docs: >- + Value between `0.0` and `1.0` that indicates our transcription model's + relative confidence in this text. + speaker_confidence: + type: optional + docs: >- + Value between `0.0` and `1.0` that indicates our transcription model's + relative confidence that this text was spoken by this speaker. + emotions: + docs: A high-dimensional embedding in emotion space. + type: list + sentiment: + type: optional> + docs: >- + Sentiment predictions returned as a distribution. This model predicts + the probability that a given text could be interpreted as having each + sentiment level from `1` (negative) to `9` (positive). + + + Compared to returning one estimate of sentiment, this enables a more + nuanced analysis of a text's meaning. For example, a text with very + neutral sentiment would have an average rating of `5`. But also a text + that could be interpreted as having very positive sentiment or very + negative sentiment would also have an average rating of `5`. The + average sentiment is less informative than the distribution over + sentiment, so this API returns a value for each sentiment level. + toxicity: + type: optional> + docs: >- + Toxicity predictions returned as probabilities that the text can be + classified into the following categories: `toxic`, `severe_toxic`, + `obscene`, `threat`, `insult`, and `identity_hate`. + source: + openapi: batch-openapi.json + Models: + docs: The models used for inference. + properties: + face: optional + burst: optional + prosody: optional + language: optional + ner: optional + facemesh: optional + source: + openapi: batch-openapi.json + ModelsPredictions: + properties: + face: optional + burst: optional + prosody: optional + language: optional + ner: optional + facemesh: optional + source: + openapi: batch-openapi.json + Ner: + docs: >- + The NER (Named-entity Recognition) model identifies real-world objects and + concepts in passages of text. This also supports audio and video files by + transcribing and then directly analyzing the transcribed text. + + + Recommended input filetypes: `.txt`, `.mp3`, `.wav`, `.mp4` + properties: + identify_speakers: + type: optional + docs: >- + Whether to return identifiers for speakers over time. If `true`, + unique identifiers will be assigned to spoken words to differentiate + different speakers. If `false`, all speakers will be tagged with an + `unknown` ID. + default: false + source: + openapi: batch-openapi.json + NerPrediction: + properties: + entity: + type: string + docs: The recognized topic or entity. + position: PositionInterval + entity_confidence: + type: double + docs: Our NER model's relative confidence in the recognized topic or entity. + support: + type: double + docs: A measure of how often the entity is linked to by other entities. + uri: + type: string + docs: >- + A URL which provides more information about the recognized topic or + entity. + link_word: + type: string + docs: The specific word to which the emotion predictions are linked. + time: optional + confidence: + type: optional + docs: >- + Value between `0.0` and `1.0` that indicates our transcription model's + relative confidence in this text. + speaker_confidence: + type: optional + docs: >- + Value between `0.0` and `1.0` that indicates our transcription model's + relative confidence that this text was spoken by this speaker. + emotions: + docs: A high-dimensional embedding in emotion space. + type: list + source: + openapi: batch-openapi.json + 'Null': + type: map + docs: No associated metadata for this model. Value will be `null`. + PositionInterval: + docs: >- + Position of a segment of text within a larger document, measured in + characters. Uses zero-based indexing. The beginning index is inclusive and + the end index is exclusive. + properties: + begin: + type: uint64 + docs: The index of the first character in the text segment, inclusive. + end: + type: uint64 + docs: The index of the last character in the text segment, exclusive. + source: + openapi: batch-openapi.json + PredictionsOptionalNullBurstPrediction: + properties: + metadata: optional + grouped_predictions: list + source: + openapi: batch-openapi.json + PredictionsOptionalNullFacePrediction: + properties: + metadata: optional + grouped_predictions: list + source: + openapi: batch-openapi.json + PredictionsOptionalNullFacemeshPrediction: + properties: + metadata: optional + grouped_predictions: list + source: + openapi: batch-openapi.json + PredictionsOptionalTranscriptionMetadataLanguagePrediction: + properties: + metadata: optional + grouped_predictions: list + source: + openapi: batch-openapi.json + PredictionsOptionalTranscriptionMetadataNerPrediction: + properties: + metadata: optional + grouped_predictions: list + source: + openapi: batch-openapi.json + PredictionsOptionalTranscriptionMetadataProsodyPrediction: + properties: + metadata: optional + grouped_predictions: list + source: + openapi: batch-openapi.json + Prosody: + docs: >- + The Speech Prosody model analyzes the intonation, stress, and rhythm of + spoken word. + + + Recommended input file types: `.wav`, `.mp3`, `.mp4` + properties: + granularity: optional + window: optional + identify_speakers: + type: optional + docs: >- + Whether to return identifiers for speakers over time. If `true`, + unique identifiers will be assigned to spoken words to differentiate + different speakers. If `false`, all speakers will be tagged with an + `unknown` ID. + default: false + source: + openapi: batch-openapi.json + ProsodyPrediction: + properties: + text: + type: optional + docs: A segment of text (like a word or a sentence). + time: TimeInterval + confidence: + type: optional + docs: >- + Value between `0.0` and `1.0` that indicates our transcription model's + relative confidence in this text. + speaker_confidence: + type: optional + docs: >- + Value between `0.0` and `1.0` that indicates our transcription model's + relative confidence that this text was spoken by this speaker. + emotions: + docs: A high-dimensional embedding in emotion space. + type: list + source: + openapi: batch-openapi.json + Queued: + properties: + created_timestamp_ms: + type: long + docs: When this job was created (Unix timestamp in milliseconds). + source: + openapi: batch-openapi.json + RegistryFileDetail: + properties: + file_id: + type: string + docs: File ID in the Asset Registry + file_url: + type: string + docs: URL to the file in the Asset Registry + source: + openapi: batch-openapi.json + Regression: map + SentimentScore: + properties: + name: + type: string + docs: Level of sentiment, ranging from `1` (negative) to `9` (positive) + score: + type: float + docs: Prediction for this level of sentiment + source: + openapi: batch-openapi.json + SortBy: + enum: + - created + - started + - ended + source: + openapi: batch-openapi.json + Source: + discriminant: type + base-properties: {} + union: + url: SourceUrl + file: SourceFile + text: SourceTextSource + source: + openapi: batch-openapi.json + SourceFile: + properties: {} + extends: + - File + source: + openapi: batch-openapi.json + SourceTextSource: + properties: {} + source: + openapi: batch-openapi.json + SourceUrl: + properties: {} + extends: + - Url + source: + openapi: batch-openapi.json + Url: + properties: + url: + type: string + docs: The URL of the source media file. + source: + openapi: batch-openapi.json + StateEmbeddingGeneration: + discriminant: status + base-properties: {} + union: + QUEUED: StateEmbeddingGenerationQueued + IN_PROGRESS: StateEmbeddingGenerationInProgress + COMPLETED: StateEmbeddingGenerationCompletedEmbeddingGeneration + FAILED: StateEmbeddingGenerationFailed + source: + openapi: batch-openapi.json + StateEmbeddingGenerationCompletedEmbeddingGeneration: + properties: {} + extends: + - CompletedEmbeddingGeneration + source: + openapi: batch-openapi.json + StateEmbeddingGenerationFailed: + properties: {} + extends: + - Failed + source: + openapi: batch-openapi.json + StateEmbeddingGenerationInProgress: + properties: {} + extends: + - InProgress + source: + openapi: batch-openapi.json + StateEmbeddingGenerationQueued: + properties: {} + extends: + - Queued + source: + openapi: batch-openapi.json + StateInference: + discriminant: status + base-properties: {} + union: + QUEUED: QueuedState + IN_PROGRESS: InProgressState + COMPLETED: CompletedState + FAILED: FailedState + source: + openapi: batch-openapi.json + CompletedState: + properties: {} + extends: + - CompletedInference + source: + openapi: batch-openapi.json + FailedState: + properties: {} + extends: + - Failed + source: + openapi: batch-openapi.json + InProgressState: + properties: {} + extends: + - InProgress + source: + openapi: batch-openapi.json + QueuedState: + properties: {} + extends: + - Queued + source: + openapi: batch-openapi.json + StateTlInference: + discriminant: status + base-properties: {} + union: + QUEUED: StateTlInferenceQueued + IN_PROGRESS: StateTlInferenceInProgress + COMPLETED: StateTlInferenceCompletedTlInference + FAILED: StateTlInferenceFailed + source: + openapi: batch-openapi.json + StateTlInferenceCompletedTlInference: + properties: {} + extends: + - CompletedTlInference + source: + openapi: batch-openapi.json + StateTlInferenceFailed: + properties: {} + extends: + - Failed + source: + openapi: batch-openapi.json + StateTlInferenceInProgress: + properties: {} + extends: + - InProgress + source: + openapi: batch-openapi.json + StateTlInferenceQueued: + properties: {} + extends: + - Queued + source: + openapi: batch-openapi.json + StateTraining: + discriminant: status + base-properties: {} + union: + QUEUED: StateTrainingQueued + IN_PROGRESS: StateTrainingInProgress + COMPLETED: StateTrainingCompletedTraining + FAILED: StateTrainingFailed + source: + openapi: batch-openapi.json + StateTrainingCompletedTraining: + properties: {} + extends: + - CompletedTraining + source: + openapi: batch-openapi.json + StateTrainingFailed: + properties: {} + extends: + - Failed + source: + openapi: batch-openapi.json + StateTrainingInProgress: + properties: {} + extends: + - InProgress + source: + openapi: batch-openapi.json + StateTrainingQueued: + properties: {} + extends: + - Queued + source: + openapi: batch-openapi.json + Status: + enum: + - QUEUED + - IN_PROGRESS + - COMPLETED + - FAILED + source: + openapi: batch-openapi.json + TlInferencePrediction: + properties: + file: + type: string + docs: A file path relative to the top level source URL or file. + file_type: string + custom_models: map + source: + openapi: batch-openapi.json + TlInferenceResults: + properties: + predictions: list + errors: list + source: + openapi: batch-openapi.json + TlInferenceSourcePredictResult: + properties: + source: Source + results: optional + error: + type: optional + docs: An error message. + source: + openapi: batch-openapi.json + Tag: + properties: + key: string + value: string + source: + openapi: batch-openapi.json + Target: + discriminated: false + union: + - long + - double + - string + source: + openapi: batch-openapi.json + Task: + discriminant: type + base-properties: {} + union: + classification: TaskClassification + regression: TaskRegression + source: + openapi: batch-openapi.json + TaskClassification: + properties: {} + source: + openapi: batch-openapi.json + TaskRegression: + properties: {} + source: + openapi: batch-openapi.json + TextSource: map + TimeInterval: + docs: A time range with a beginning and end, measured in seconds. + properties: + begin: + type: double + docs: Beginning of time range in seconds. + end: + type: double + docs: End of time range in seconds. + source: + openapi: batch-openapi.json + TlInferenceBaseRequest: + properties: + custom_model: CustomModel + urls: + type: optional> + docs: >- + URLs to the media files to be processed. Each must be a valid public + URL to a media file (see recommended input filetypes) or an archive + (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. + + + If you wish to supply more than 100 URLs, consider providing them as + an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`). + callback_url: + type: optional + docs: >- + If provided, a `POST` request will be made to the URL with the + generated predictions on completion or the error message on failure. + notify: + type: optional + docs: >- + Whether to send an email notification to the user upon job + completion/failure. + default: false + source: + openapi: batch-openapi.json + CustomModel: + discriminated: false + union: + - CustomModelId + - CustomModelVersionId + source: + openapi: batch-openapi.json + CustomModelId: + properties: + id: string + source: + openapi: batch-openapi.json + CustomModelVersionId: + properties: + version_id: string + source: + openapi: batch-openapi.json + ToxicityScore: + properties: + name: + type: string + docs: Category of toxicity. + score: + type: float + docs: Prediction for this category of toxicity + source: + openapi: batch-openapi.json + TrainingBaseRequest: + properties: + custom_model: CustomModelRequest + dataset: Dataset + target_feature: + type: optional + default: label + task: optional + evaluation: optional + alternatives: optional> + callback_url: optional + notify: + type: optional + default: false + source: + openapi: batch-openapi.json + TrainingCustomModel: + properties: + id: string + version_id: optional + source: + openapi: batch-openapi.json + Transcription: + docs: |- + Transcription-related configuration options. + + To disable transcription, explicitly set this field to `null`. + properties: + language: + type: optional + docs: >- + By default, we use an automated language detection method for our + Speech Prosody, Language, and NER models. However, if you know what + language is being spoken in your media samples, you can specify it via + its BCP-47 tag and potentially obtain more accurate results. + + + You can specify any of the following languages: + + - Chinese: `zh` + + - Danish: `da` + + - Dutch: `nl` + + - English: `en` + + - English (Australia): `en-AU` + + - English (India): `en-IN` + + - English (New Zealand): `en-NZ` + + - English (United Kingdom): `en-GB` + + - French: `fr` + + - French (Canada): `fr-CA` + + - German: `de` + + - Hindi: `hi` + + - Hindi (Roman Script): `hi-Latn` + + - Indonesian: `id` + + - Italian: `it` + + - Japanese: `ja` + + - Korean: `ko` + + - Norwegian: `no` + + - Polish: `pl` + + - Portuguese: `pt` + + - Portuguese (Brazil): `pt-BR` + + - Portuguese (Portugal): `pt-PT` + + - Russian: `ru` + + - Spanish: `es` + + - Spanish (Latin America): `es-419` + + - Swedish: `sv` + + - Tamil: `ta` + + - Turkish: `tr` + + - Ukrainian: `uk` + identify_speakers: + type: optional + docs: >- + Whether to return identifiers for speakers over time. If `true`, + unique identifiers will be assigned to spoken words to differentiate + different speakers. If `false`, all speakers will be tagged with an + `unknown` ID. + default: false + confidence_threshold: + type: optional + docs: >- + Transcript confidence threshold. Transcripts generated with a + confidence less than this threshold will be considered invalid and not + used as an input for model inference. + default: 0.5 + validation: + min: 0 + max: 1 + source: + openapi: batch-openapi.json + TranscriptionMetadata: + docs: Transcription metadata for your media file. + properties: + confidence: + type: double + docs: >- + Value between `0.0` and `1.0` indicating our transcription model's + relative confidence in the transcription of your media file. + detected_language: optional + source: + openapi: batch-openapi.json + Type: + enum: + - EMBEDDING_GENERATION + - INFERENCE + - TL_INFERENCE + - TRAINING + source: + openapi: batch-openapi.json + Unconfigurable: + type: map + docs: >- + To include predictions for this model type, set this field to `{}`. It is + currently not configurable further. + UnionJob: InferenceJob + EmbeddingGenerationJob: + properties: + type: string + extends: + - JobEmbeddingGeneration + source: + openapi: batch-openapi.json + InferenceJob: + properties: + type: + type: string + docs: >- + Denotes the job type. + + + Jobs created with the Expression Measurement API will have this field + set to `INFERENCE`. + extends: + - JobInference + source: + openapi: batch-openapi.json + CustomModelsInferenceJob: + properties: + type: string + extends: + - JobTlInference + source: + openapi: batch-openapi.json + CustomModelsTrainingJob: + properties: + type: string + extends: + - JobTraining + source: + openapi: batch-openapi.json + UnionPredictResult: InferenceSourcePredictResult + ValidationArgs: + properties: + positive_label: optional + source: + openapi: batch-openapi.json + When: + enum: + - created_before + - created_after + source: + openapi: batch-openapi.json + Window: + docs: >- + Generate predictions based on time. + + + Setting the `window` field allows for a 'sliding window' approach, where a + fixed-size window moves across the audio or video file in defined steps. + This enables continuous analysis of prosody within subsets of the file, + providing dynamic and localized insights into emotional expression. + properties: + length: + type: optional + docs: The length of the sliding window. + default: 4 + validation: + min: 0.5 + step: + type: optional + docs: The step size of the sliding window. + default: 1 + validation: + min: 0.5 + source: + openapi: batch-openapi.json diff --git a/.mock/definition/expression-measurement/stream.yml b/.mock/definition/expression-measurement/stream.yml deleted file mode 100644 index 1a4abd53..00000000 --- a/.mock/definition/expression-measurement/stream.yml +++ /dev/null @@ -1,499 +0,0 @@ -channel: - path: /v0/stream/models - auth: false - headers: - X-Hume-Api-Key: - type: string - name: humeApiKey - messages: - subscribe: - origin: server - body: SubscribeEvent - publish: - origin: client - body: - type: StreamModelsEndpointPayload - docs: Models endpoint payload - examples: - - messages: - - type: publish - body: {} - - type: subscribe - body: {} -types: - StreamModelPredictionsJobDetails: - docs: > - If the job_details flag was set in the request, details about the current - streaming job will be returned in the response body. - properties: - job_id: - type: optional - docs: ID of the current streaming job. - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamModelPredictionsBurstPredictionsItem: - properties: - time: optional - emotions: optional - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamModelPredictionsBurst: - docs: Response for the vocal burst emotion model. - properties: - predictions: optional> - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamModelPredictionsFacePredictionsItem: - properties: - frame: - type: optional - docs: Frame number - time: - type: optional - docs: Time in seconds when face detection occurred. - bbox: optional - prob: - type: optional - docs: The predicted probability that a detected face was actually a face. - face_id: - type: optional - docs: >- - Identifier for a face. Not that this defaults to `unknown` unless face - identification is enabled in the face model configuration. - emotions: optional - facs: optional - descriptions: optional - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamModelPredictionsFace: - docs: Response for the facial expression emotion model. - properties: - predictions: optional> - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamModelPredictionsFacemeshPredictionsItem: - properties: - emotions: optional - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamModelPredictionsFacemesh: - docs: Response for the facemesh emotion model. - properties: - predictions: optional> - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamModelPredictionsLanguagePredictionsItem: - properties: - text: - type: optional - docs: A segment of text (like a word or a sentence). - position: optional - emotions: optional - sentiment: optional - toxicity: optional - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamModelPredictionsLanguage: - docs: Response for the language emotion model. - properties: - predictions: optional> - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamModelPredictionsProsodyPredictionsItem: - properties: - time: optional - emotions: optional - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamModelPredictionsProsody: - docs: Response for the speech prosody emotion model. - properties: - predictions: optional> - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - Config: - docs: > - Configuration used to specify which models should be used and with what - settings. - properties: - burst: - type: optional> - docs: | - Configuration for the vocal burst emotion model. - - Note: Model configuration is not currently available in streaming. - - Please use the default configuration by passing an empty object `{}`. - face: - type: optional - docs: > - Configuration for the facial expression emotion model. - - - Note: Using the `reset_stream` parameter does not have any effect on - face identification. A single face identifier cache is maintained over - a full session whether `reset_stream` is used or not. - facemesh: - type: optional> - docs: | - Configuration for the facemesh emotion model. - - Note: Model configuration is not currently available in streaming. - - Please use the default configuration by passing an empty object `{}`. - language: - type: optional - docs: Configuration for the language emotion model. - prosody: - type: optional> - docs: | - Configuration for the speech prosody emotion model. - - Note: Model configuration is not currently available in streaming. - - Please use the default configuration by passing an empty object `{}`. - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - JobDetails: - docs: > - If the job_details flag was set in the request, details about the current - streaming job will be returned in the response body. - properties: - job_id: - type: optional - docs: ID of the current streaming job. - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamErrorMessage: - docs: Error message - properties: - error: - type: optional - docs: Error message text. - code: - type: optional - docs: Unique identifier for the error. - payload_id: - type: optional - docs: > - If a payload ID was passed in the request, the same payload ID will be - sent back in the response body. - job_details: - type: optional - docs: > - If the job_details flag was set in the request, details about the - current streaming job will be returned in the response body. - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamWarningMessageJobDetails: - docs: > - If the job_details flag was set in the request, details about the current - streaming job will be returned in the response body. - properties: - job_id: - type: optional - docs: ID of the current streaming job. - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamWarningMessage: - docs: Warning message - properties: - warning: - type: optional - docs: Warning message text. - code: - type: optional - docs: Unique identifier for the error. - payload_id: - type: optional - docs: > - If a payload ID was passed in the request, the same payload ID will be - sent back in the response body. - job_details: - type: optional - docs: > - If the job_details flag was set in the request, details about the - current streaming job will be returned in the response body. - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - SubscribeEvent: - discriminated: false - union: - - type: Config - docs: Model predictions - - type: StreamErrorMessage - docs: Error message - - type: StreamWarningMessage - docs: Warning message - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamModelsEndpointPayloadModelsFace: - docs: > - Configuration for the facial expression emotion model. - - - Note: Using the `reset_stream` parameter does not have any effect on face - identification. A single face identifier cache is maintained over a full - session whether `reset_stream` is used or not. - properties: - facs: - type: optional> - docs: >- - Configuration for FACS predictions. If missing or null, no FACS - predictions will be generated. - descriptions: - type: optional> - docs: >- - Configuration for Descriptions predictions. If missing or null, no - Descriptions predictions will be generated. - identify_faces: - type: optional - docs: > - Whether to return identifiers for faces across frames. If true, unique - identifiers will be assigned to face bounding boxes to differentiate - different faces. If false, all faces will be tagged with an "unknown" - ID. - default: false - fps_pred: - type: optional - docs: > - Number of frames per second to process. Other frames will be omitted - from the response. - default: 3 - prob_threshold: - type: optional - docs: > - Face detection probability threshold. Faces detected with a - probability less than this threshold will be omitted from the - response. - default: 3 - min_face_size: - type: optional - docs: > - Minimum bounding box side length in pixels to treat as a face. Faces - detected with a bounding box side length in pixels less than this - threshold will be omitted from the response. - default: 3 - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamModelsEndpointPayloadModelsLanguage: - docs: Configuration for the language emotion model. - properties: - sentiment: - type: optional> - docs: >- - Configuration for sentiment predictions. If missing or null, no - sentiment predictions will be generated. - toxicity: - type: optional> - docs: >- - Configuration for toxicity predictions. If missing or null, no - toxicity predictions will be generated. - granularity: - type: optional - docs: >- - The granularity at which to generate predictions. Values are `word`, - `sentence`, `utterance`, or `passage`. To get a single prediction for - the entire text of your streaming payload use `passage`. Default value - is `word`. - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - StreamModelsEndpointPayload: - docs: Models endpoint payload - properties: - data: optional - models: - type: optional - docs: > - Configuration used to specify which models should be used and with - what settings. - stream_window_ms: - type: optional - docs: > - Length in milliseconds of streaming sliding window. - - - Extending the length of this window will prepend media context from - past payloads into the current payload. - - - For example, if on the first payload you send 500ms of data and on the - second payload you send an additional 500ms of data, a window of at - least 1000ms will allow the model to process all 1000ms of stream - data. - - - A window of 600ms would append the full 500ms of the second payload to - the last 100ms of the first payload. - - - Note: This feature is currently only supported for audio data and - audio models. For other file types and models this parameter will be - ignored. - default: 5000 - validation: - min: 500 - max: 10000 - reset_stream: - type: optional - docs: > - Whether to reset the streaming sliding window before processing the - current payload. - - - If this parameter is set to `true` then past context will be deleted - before processing the current payload. - - - Use reset_stream when one audio file is done being processed and you - do not want context to leak across files. - default: false - raw_text: - type: optional - docs: > - Set to `true` to enable the data parameter to be parsed as raw text - rather than base64 encoded bytes. - - This parameter is useful if you want to send text to be processed by - the language model, but it cannot be used with other file types like - audio, image, or video. - default: false - job_details: - type: optional - docs: > - Set to `true` to get details about the job. - - - This parameter can be set in the same payload as data or it can be set - without data and models configuration to get the job details between - payloads. - - - This parameter is useful to get the unique job ID. - default: false - payload_id: - type: optional - docs: > - Pass an arbitrary string as the payload ID and get it back at the top - level of the socket response. - - - This can be useful if you have multiple requests running - asynchronously and want to disambiguate responses as they are - received. - face: optional - language: optional - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - EmotionEmbeddingItem: - properties: - name: - type: optional - docs: Name of the emotion being expressed. - score: - type: optional - docs: Embedding value for the emotion being expressed. - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - EmotionEmbedding: - docs: A high-dimensional embedding in emotion space. - type: list - StreamBoundingBox: - docs: A bounding box around a face. - properties: - x: - type: optional - docs: x-coordinate of bounding box top left corner. - validation: - min: 0 - "y": - type: optional - docs: y-coordinate of bounding box top left corner. - validation: - min: 0 - w: - type: optional - docs: Bounding box width. - validation: - min: 0 - h: - type: optional - docs: Bounding box height. - validation: - min: 0 - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - TimeRange: - docs: A time range with a beginning and end, measured in seconds. - properties: - begin: - type: optional - docs: Beginning of time range in seconds. - validation: - min: 0 - end: - type: optional - docs: End of time range in seconds. - validation: - min: 0 - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - TextPosition: - docs: > - Position of a segment of text within a larger document, measured in - characters. Uses zero-based indexing. The beginning index is inclusive and - the end index is exclusive. - properties: - begin: - type: optional - docs: The index of the first character in the text segment, inclusive. - validation: - min: 0 - end: - type: optional - docs: The index of the last character in the text segment, exclusive. - validation: - min: 0 - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - SentimentItem: - properties: - name: - type: optional - docs: Level of sentiment, ranging from 1 (negative) to 9 (positive) - score: - type: optional - docs: Prediction for this level of sentiment - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - Sentiment: - docs: >- - Sentiment predictions returned as a distribution. This model predicts the - probability that a given text could be interpreted as having each - sentiment level from 1 (negative) to 9 (positive). - - - Compared to returning one estimate of sentiment, this enables a more - nuanced analysis of a text's meaning. For example, a text with very - neutral sentiment would have an average rating of 5. But also a text that - could be interpreted as having very positive sentiment or very negative - sentiment would also have an average rating of 5. The average sentiment is - less informative than the distribution over sentiment, so this API returns - a value for each sentiment level. - type: list - ToxicityItem: - properties: - name: - type: optional - docs: Category of toxicity. - score: - type: optional - docs: Prediction for this category of toxicity - source: - openapi: ../expression-measurement/streaming-asyncapi.yml - Toxicity: - docs: >- - Toxicity predictions returned as probabilities that the text can be - classified into the following categories: toxic, severe_toxic, obscene, - threat, insult, and identity_hate. - type: list diff --git a/.mock/definition/expression-measurement/stream/__package__.yml b/.mock/definition/expression-measurement/stream/__package__.yml new file mode 100644 index 00000000..6a7fe07f --- /dev/null +++ b/.mock/definition/expression-measurement/stream/__package__.yml @@ -0,0 +1,499 @@ +channel: + path: /v0/stream/models + auth: false + headers: + X-Hume-Api-Key: + type: string + name: humeApiKey + messages: + subscribe: + origin: server + body: SubscribeEvent + publish: + origin: client + body: + type: StreamModelsEndpointPayload + docs: Models endpoint payload + examples: + - messages: + - type: publish + body: {} + - type: subscribe + body: {} +types: + StreamModelPredictionsJobDetails: + docs: > + If the job_details flag was set in the request, details about the current + streaming job will be returned in the response body. + properties: + job_id: + type: optional + docs: ID of the current streaming job. + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsBurstPredictionsItem: + properties: + time: optional + emotions: optional + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsBurst: + docs: Response for the vocal burst emotion model. + properties: + predictions: optional> + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsFacePredictionsItem: + properties: + frame: + type: optional + docs: Frame number + time: + type: optional + docs: Time in seconds when face detection occurred. + bbox: optional + prob: + type: optional + docs: The predicted probability that a detected face was actually a face. + face_id: + type: optional + docs: >- + Identifier for a face. Not that this defaults to `unknown` unless face + identification is enabled in the face model configuration. + emotions: optional + facs: optional + descriptions: optional + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsFace: + docs: Response for the facial expression emotion model. + properties: + predictions: optional> + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsFacemeshPredictionsItem: + properties: + emotions: optional + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsFacemesh: + docs: Response for the facemesh emotion model. + properties: + predictions: optional> + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsLanguagePredictionsItem: + properties: + text: + type: optional + docs: A segment of text (like a word or a sentence). + position: optional + emotions: optional + sentiment: optional + toxicity: optional + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsLanguage: + docs: Response for the language emotion model. + properties: + predictions: optional> + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsProsodyPredictionsItem: + properties: + time: optional + emotions: optional + source: + openapi: streaming-asyncapi.yml + StreamModelPredictionsProsody: + docs: Response for the speech prosody emotion model. + properties: + predictions: optional> + source: + openapi: streaming-asyncapi.yml + Config: + docs: > + Configuration used to specify which models should be used and with what + settings. + properties: + burst: + type: optional> + docs: | + Configuration for the vocal burst emotion model. + + Note: Model configuration is not currently available in streaming. + + Please use the default configuration by passing an empty object `{}`. + face: + type: optional + docs: > + Configuration for the facial expression emotion model. + + + Note: Using the `reset_stream` parameter does not have any effect on + face identification. A single face identifier cache is maintained over + a full session whether `reset_stream` is used or not. + facemesh: + type: optional> + docs: | + Configuration for the facemesh emotion model. + + Note: Model configuration is not currently available in streaming. + + Please use the default configuration by passing an empty object `{}`. + language: + type: optional + docs: Configuration for the language emotion model. + prosody: + type: optional> + docs: | + Configuration for the speech prosody emotion model. + + Note: Model configuration is not currently available in streaming. + + Please use the default configuration by passing an empty object `{}`. + source: + openapi: streaming-asyncapi.yml + JobDetails: + docs: > + If the job_details flag was set in the request, details about the current + streaming job will be returned in the response body. + properties: + job_id: + type: optional + docs: ID of the current streaming job. + source: + openapi: streaming-asyncapi.yml + StreamErrorMessage: + docs: Error message + properties: + error: + type: optional + docs: Error message text. + code: + type: optional + docs: Unique identifier for the error. + payload_id: + type: optional + docs: > + If a payload ID was passed in the request, the same payload ID will be + sent back in the response body. + job_details: + type: optional + docs: > + If the job_details flag was set in the request, details about the + current streaming job will be returned in the response body. + source: + openapi: streaming-asyncapi.yml + StreamWarningMessageJobDetails: + docs: > + If the job_details flag was set in the request, details about the current + streaming job will be returned in the response body. + properties: + job_id: + type: optional + docs: ID of the current streaming job. + source: + openapi: streaming-asyncapi.yml + StreamWarningMessage: + docs: Warning message + properties: + warning: + type: optional + docs: Warning message text. + code: + type: optional + docs: Unique identifier for the error. + payload_id: + type: optional + docs: > + If a payload ID was passed in the request, the same payload ID will be + sent back in the response body. + job_details: + type: optional + docs: > + If the job_details flag was set in the request, details about the + current streaming job will be returned in the response body. + source: + openapi: streaming-asyncapi.yml + SubscribeEvent: + discriminated: false + union: + - type: Config + docs: Model predictions + - type: StreamErrorMessage + docs: Error message + - type: StreamWarningMessage + docs: Warning message + source: + openapi: streaming-asyncapi.yml + StreamModelsEndpointPayloadModelsFace: + docs: > + Configuration for the facial expression emotion model. + + + Note: Using the `reset_stream` parameter does not have any effect on face + identification. A single face identifier cache is maintained over a full + session whether `reset_stream` is used or not. + properties: + facs: + type: optional> + docs: >- + Configuration for FACS predictions. If missing or null, no FACS + predictions will be generated. + descriptions: + type: optional> + docs: >- + Configuration for Descriptions predictions. If missing or null, no + Descriptions predictions will be generated. + identify_faces: + type: optional + docs: > + Whether to return identifiers for faces across frames. If true, unique + identifiers will be assigned to face bounding boxes to differentiate + different faces. If false, all faces will be tagged with an "unknown" + ID. + default: false + fps_pred: + type: optional + docs: > + Number of frames per second to process. Other frames will be omitted + from the response. + default: 3 + prob_threshold: + type: optional + docs: > + Face detection probability threshold. Faces detected with a + probability less than this threshold will be omitted from the + response. + default: 3 + min_face_size: + type: optional + docs: > + Minimum bounding box side length in pixels to treat as a face. Faces + detected with a bounding box side length in pixels less than this + threshold will be omitted from the response. + default: 3 + source: + openapi: streaming-asyncapi.yml + StreamModelsEndpointPayloadModelsLanguage: + docs: Configuration for the language emotion model. + properties: + sentiment: + type: optional> + docs: >- + Configuration for sentiment predictions. If missing or null, no + sentiment predictions will be generated. + toxicity: + type: optional> + docs: >- + Configuration for toxicity predictions. If missing or null, no + toxicity predictions will be generated. + granularity: + type: optional + docs: >- + The granularity at which to generate predictions. Values are `word`, + `sentence`, `utterance`, or `passage`. To get a single prediction for + the entire text of your streaming payload use `passage`. Default value + is `word`. + source: + openapi: streaming-asyncapi.yml + StreamModelsEndpointPayload: + docs: Models endpoint payload + properties: + data: optional + models: + type: optional + docs: > + Configuration used to specify which models should be used and with + what settings. + stream_window_ms: + type: optional + docs: > + Length in milliseconds of streaming sliding window. + + + Extending the length of this window will prepend media context from + past payloads into the current payload. + + + For example, if on the first payload you send 500ms of data and on the + second payload you send an additional 500ms of data, a window of at + least 1000ms will allow the model to process all 1000ms of stream + data. + + + A window of 600ms would append the full 500ms of the second payload to + the last 100ms of the first payload. + + + Note: This feature is currently only supported for audio data and + audio models. For other file types and models this parameter will be + ignored. + default: 5000 + validation: + min: 500 + max: 10000 + reset_stream: + type: optional + docs: > + Whether to reset the streaming sliding window before processing the + current payload. + + + If this parameter is set to `true` then past context will be deleted + before processing the current payload. + + + Use reset_stream when one audio file is done being processed and you + do not want context to leak across files. + default: false + raw_text: + type: optional + docs: > + Set to `true` to enable the data parameter to be parsed as raw text + rather than base64 encoded bytes. + + This parameter is useful if you want to send text to be processed by + the language model, but it cannot be used with other file types like + audio, image, or video. + default: false + job_details: + type: optional + docs: > + Set to `true` to get details about the job. + + + This parameter can be set in the same payload as data or it can be set + without data and models configuration to get the job details between + payloads. + + + This parameter is useful to get the unique job ID. + default: false + payload_id: + type: optional + docs: > + Pass an arbitrary string as the payload ID and get it back at the top + level of the socket response. + + + This can be useful if you have multiple requests running + asynchronously and want to disambiguate responses as they are + received. + face: optional + language: optional + source: + openapi: streaming-asyncapi.yml + EmotionEmbeddingItem: + properties: + name: + type: optional + docs: Name of the emotion being expressed. + score: + type: optional + docs: Embedding value for the emotion being expressed. + source: + openapi: streaming-asyncapi.yml + EmotionEmbedding: + docs: A high-dimensional embedding in emotion space. + type: list + StreamBoundingBox: + docs: A bounding box around a face. + properties: + x: + type: optional + docs: x-coordinate of bounding box top left corner. + validation: + min: 0 + 'y': + type: optional + docs: y-coordinate of bounding box top left corner. + validation: + min: 0 + w: + type: optional + docs: Bounding box width. + validation: + min: 0 + h: + type: optional + docs: Bounding box height. + validation: + min: 0 + source: + openapi: streaming-asyncapi.yml + TimeRange: + docs: A time range with a beginning and end, measured in seconds. + properties: + begin: + type: optional + docs: Beginning of time range in seconds. + validation: + min: 0 + end: + type: optional + docs: End of time range in seconds. + validation: + min: 0 + source: + openapi: streaming-asyncapi.yml + TextPosition: + docs: > + Position of a segment of text within a larger document, measured in + characters. Uses zero-based indexing. The beginning index is inclusive and + the end index is exclusive. + properties: + begin: + type: optional + docs: The index of the first character in the text segment, inclusive. + validation: + min: 0 + end: + type: optional + docs: The index of the last character in the text segment, exclusive. + validation: + min: 0 + source: + openapi: streaming-asyncapi.yml + SentimentItem: + properties: + name: + type: optional + docs: Level of sentiment, ranging from 1 (negative) to 9 (positive) + score: + type: optional + docs: Prediction for this level of sentiment + source: + openapi: streaming-asyncapi.yml + Sentiment: + docs: >- + Sentiment predictions returned as a distribution. This model predicts the + probability that a given text could be interpreted as having each + sentiment level from 1 (negative) to 9 (positive). + + + Compared to returning one estimate of sentiment, this enables a more + nuanced analysis of a text's meaning. For example, a text with very + neutral sentiment would have an average rating of 5. But also a text that + could be interpreted as having very positive sentiment or very negative + sentiment would also have an average rating of 5. The average sentiment is + less informative than the distribution over sentiment, so this API returns + a value for each sentiment level. + type: list + ToxicityItem: + properties: + name: + type: optional + docs: Category of toxicity. + score: + type: optional + docs: Prediction for this category of toxicity + source: + openapi: streaming-asyncapi.yml + Toxicity: + docs: >- + Toxicity predictions returned as probabilities that the text can be + classified into the following categories: toxic, severe_toxic, obscene, + threat, insult, and identity_hate. + type: list diff --git a/.mock/fern.config.json b/.mock/fern.config.json index 74eda3fe..ba052cad 100644 --- a/.mock/fern.config.json +++ b/.mock/fern.config.json @@ -1,4 +1,4 @@ { - "organization": "hume", - "version": "0.40.2" -} + "organization" : "hume", + "version" : "0.41.9" +} \ No newline at end of file diff --git a/package.json b/package.json index 555263c0..4a68df58 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "hume", - "version": "0.8.10", + "version": "0.8.11", "private": false, "repository": "https://github.com/HumeAI/hume-typescript-sdk", "main": "./index.js", diff --git a/reference.md b/reference.md index 2c09d71a..dbb6e631 100644 --- a/reference.md +++ b/reference.md @@ -373,7 +373,7 @@ await client.empathicVoice.tools.getToolVersion("00183a3f-79ba-413d-9f3b-6098642 Version number for a Tool. -Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. +Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. @@ -435,7 +435,7 @@ await client.empathicVoice.tools.deleteToolVersion("00183a3f-79ba-413d-9f3b-6098 Version number for a Tool. -Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. +Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. @@ -500,7 +500,7 @@ await client.empathicVoice.tools.updateToolDescription("00183a3f-79ba-413d-9f3b- Version number for a Tool. -Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. +Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. @@ -895,7 +895,7 @@ await client.empathicVoice.prompts.getPromptVersion("af699d45-2985-42cc-91b9-af9 Version number for a Prompt. -Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. +Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. @@ -957,7 +957,7 @@ await client.empathicVoice.prompts.deletePromptVersion("af699d45-2985-42cc-91b9- Version number for a Prompt. -Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. +Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. @@ -1021,7 +1021,7 @@ await client.empathicVoice.prompts.updatePromptDescription("af699d45-2985-42cc-9 Version number for a Prompt. -Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. +Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. @@ -1050,6 +1050,322 @@ Version numbers are integer values representing different iterations of the Prom +## EmpathicVoice CustomVoices + +
client.empathicVoice.customVoices.getReturnCustomVoicesForUser({ ...params }) -> Hume.ReturnPagedCustomVoices +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.empathicVoice.customVoices.getReturnCustomVoicesForUser(); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `Hume.empathicVoice.GetReturnCustomVoicesForUserRequest` + +
+
+ +
+
+ +**requestOptions:** `CustomVoices.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.empathicVoice.customVoices.createNewCustomVoice({ ...params }) -> Hume.ReturnCustomVoice +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.empathicVoice.customVoices.createNewCustomVoice({ + name: "name", + baseVoice: Hume.PostedCustomVoiceBaseVoice.Ito, + parameterModel: "20240715-4parameter", +}); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**request:** `Hume.PostedCustomVoice` + +
+
+ +
+
+ +**requestOptions:** `CustomVoices.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.empathicVoice.customVoices.getReturnCustomVoiceByCustomVoiceId(id) -> Hume.ReturnCustomVoice +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.empathicVoice.customVoices.getReturnCustomVoiceByCustomVoiceId("id"); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Identifier for a Custom Voice. Formatted as a UUID. + +
+
+ +
+
+ +**requestOptions:** `CustomVoices.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.empathicVoice.customVoices.addNewCustomVoiceVersion(id, { ...params }) -> Hume.ReturnCustomVoice +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.empathicVoice.customVoices.addNewCustomVoiceVersion("id", { + name: "name", + baseVoice: Hume.PostedCustomVoiceBaseVoice.Ito, + parameterModel: "20240715-4parameter", +}); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Identifier for a Custom Voice. Formatted as a UUID. + +
+
+ +
+
+ +**request:** `Hume.PostedCustomVoice` + +
+
+ +
+
+ +**requestOptions:** `CustomVoices.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.empathicVoice.customVoices.deleteCustomVoice(id) -> void +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.empathicVoice.customVoices.deleteCustomVoice("id"); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Identifier for a Custom Voice. Formatted as a UUID. + +
+
+ +
+
+ +**requestOptions:** `CustomVoices.RequestOptions` + +
+
+
+
+ +
+
+
+ +
client.empathicVoice.customVoices.updateCustomVoiceName(id, { ...params }) -> string +
+
+ +#### 🔌 Usage + +
+
+ +
+
+ +```typescript +await client.empathicVoice.customVoices.updateCustomVoiceName("string", { + name: "string", +}); +``` + +
+
+
+
+ +#### ⚙️ Parameters + +
+
+ +
+
+ +**id:** `string` — Identifier for a Custom Voice. Formatted as a UUID. + +
+
+ +
+
+ +**request:** `Hume.empathicVoice.PostedCustomVoiceName` + +
+
+ +
+
+ +**requestOptions:** `CustomVoices.RequestOptions` + +
+
+
+
+ +
+
+
+ ## EmpathicVoice Configs
client.empathicVoice.configs.listConfigs({ ...params }) -> Hume.ReturnPagedConfigs @@ -1122,9 +1438,10 @@ await client.empathicVoice.configs.createConfig({ id: "af699d45-2985-42cc-91b9-af9e5da3bac5", version: 0, }, + eviVersion: "2", voice: { provider: "HUME_AI", - name: Hume.PostedVoiceName.Kora, + name: "SAMPLE VOICE", }, languageModel: { modelProvider: Hume.PostedLanguageModelModelProvider.Anthropic, @@ -1251,13 +1568,14 @@ await client.empathicVoice.configs.listConfigVersions("1b60e1a0-cc59-424a-8d2c-1 ```typescript await client.empathicVoice.configs.createConfigVersion("1b60e1a0-cc59-424a-8d2c-189d354db3f3", { versionDescription: "This is an updated version of the Weather Assistant Config.", + eviVersion: "2", prompt: { id: "af699d45-2985-42cc-91b9-af9e5da3bac5", version: 0, }, voice: { provider: "HUME_AI", - name: Hume.PostedVoiceName.Ito, + name: "ITO", }, languageModel: { modelProvider: Hume.PostedLanguageModelModelProvider.Anthropic, @@ -1471,7 +1789,7 @@ await client.empathicVoice.configs.getConfigVersion("1b60e1a0-cc59-424a-8d2c-189 Version number for a Config. -Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. @@ -1533,7 +1851,7 @@ await client.empathicVoice.configs.deleteConfigVersion("1b60e1a0-cc59-424a-8d2c- Version number for a Config. -Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. @@ -1597,7 +1915,7 @@ await client.empathicVoice.configs.updateConfigDescription("1b60e1a0-cc59-424a-8 Version number for a Config. -Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. +Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. @@ -1901,7 +2219,7 @@ await client.expressionMeasurement.batch.listJobs();
-**request:** `Hume.expressionMeasurement.BatchListJobsRequest` +**request:** `Hume.expressionMeasurement.batch.BatchListJobsRequest`
@@ -2230,7 +2548,7 @@ await client.expressionMeasurement.batch.startInferenceJobFromLocalFile(
-**request:** `Hume.expressionMeasurement.BatchStartInferenceJobFromLocalFileRequest` +**request:** `Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest`
diff --git a/src/api/resources/empathicVoice/errors/BadRequestError.ts b/src/api/resources/empathicVoice/errors/BadRequestError.ts new file mode 100644 index 00000000..e204eab5 --- /dev/null +++ b/src/api/resources/empathicVoice/errors/BadRequestError.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as errors from "../../../../errors/index"; +import * as Hume from "../../../index"; + +export class BadRequestError extends errors.HumeError { + constructor(body: Hume.empathicVoice.ErrorResponse) { + super({ + message: "BadRequestError", + statusCode: 400, + body: body, + }); + Object.setPrototypeOf(this, BadRequestError.prototype); + } +} diff --git a/src/api/resources/empathicVoice/errors/index.ts b/src/api/resources/empathicVoice/errors/index.ts new file mode 100644 index 00000000..92efd0fd --- /dev/null +++ b/src/api/resources/empathicVoice/errors/index.ts @@ -0,0 +1 @@ +export * from "./BadRequestError"; diff --git a/src/api/resources/empathicVoice/index.ts b/src/api/resources/empathicVoice/index.ts index d3c50802..d5c609c4 100644 --- a/src/api/resources/empathicVoice/index.ts +++ b/src/api/resources/empathicVoice/index.ts @@ -1,3 +1,4 @@ export * from "./resources"; export * from "./types"; +export * from "./errors"; export * from "./client"; diff --git a/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts b/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts index 4902bca5..50d6662a 100644 --- a/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/chatGroups/client/Client.ts @@ -33,6 +33,8 @@ export class ChatGroups { * @param {Hume.empathicVoice.ChatGroupsListChatGroupsRequest} request * @param {ChatGroups.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.chatGroups.listChatGroups({ * pageNumber: 0, @@ -72,8 +74,8 @@ export class ChatGroups { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -95,10 +97,22 @@ export class ChatGroups { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -121,6 +135,8 @@ export class ChatGroups { * @param {Hume.empathicVoice.ChatGroupsListChatGroupEventsRequest} request * @param {ChatGroups.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.chatGroups.listChatGroupEvents("697056f0-6c7e-487d-9bd8-9c19df79f05f", { * pageNumber: 0, @@ -156,8 +172,8 @@ export class ChatGroups { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -179,10 +195,22 @@ export class ChatGroups { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { diff --git a/src/api/resources/empathicVoice/resources/chats/client/Client.ts b/src/api/resources/empathicVoice/resources/chats/client/Client.ts index f9967d74..3c9bcf56 100644 --- a/src/api/resources/empathicVoice/resources/chats/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/chats/client/Client.ts @@ -33,6 +33,8 @@ export class Chats { * @param {Hume.empathicVoice.ChatsListChatsRequest} request * @param {Chats.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.chats.listChats({ * pageNumber: 0, @@ -67,8 +69,8 @@ export class Chats { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -89,10 +91,22 @@ export class Chats { }); } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { case "non-json": @@ -125,6 +139,8 @@ export class Chats { * @param {Hume.empathicVoice.ChatsListChatEventsRequest} request * @param {Chats.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.chats.listChatEvents("470a49f6-1dec-4afe-8b61-035d3b2d63b0", { * pageNumber: 0, @@ -160,8 +176,8 @@ export class Chats { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -182,10 +198,22 @@ export class Chats { }); } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { case "non-json": diff --git a/src/api/resources/empathicVoice/resources/configs/client/Client.ts b/src/api/resources/empathicVoice/resources/configs/client/Client.ts index c6d9eb92..b1f29207 100644 --- a/src/api/resources/empathicVoice/resources/configs/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/configs/client/Client.ts @@ -33,6 +33,8 @@ export class Configs { * @param {Hume.empathicVoice.ConfigsListConfigsRequest} request * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.configs.listConfigs({ * pageNumber: 0, @@ -70,8 +72,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -93,10 +95,22 @@ export class Configs { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -118,6 +132,8 @@ export class Configs { * @param {Hume.empathicVoice.PostedConfig} request * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.configs.createConfig({ * name: "Weather Assistant Config", @@ -125,9 +141,10 @@ export class Configs { * id: "af699d45-2985-42cc-91b9-af9e5da3bac5", * version: 0 * }, + * eviVersion: "2", * voice: { * provider: "HUME_AI", - * name: Hume.empathicVoice.PostedVoiceName.Kora + * name: "SAMPLE VOICE" * }, * languageModel: { * modelProvider: Hume.empathicVoice.PostedLanguageModelModelProvider.Anthropic, @@ -163,8 +180,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -186,10 +203,22 @@ export class Configs { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -212,6 +241,8 @@ export class Configs { * @param {Hume.empathicVoice.ConfigsListConfigVersionsRequest} request * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.configs.listConfigVersions("1b60e1a0-cc59-424a-8d2c-189d354db3f3") */ @@ -243,8 +274,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -266,10 +297,22 @@ export class Configs { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -292,16 +335,19 @@ export class Configs { * @param {Hume.empathicVoice.PostedConfigVersion} request * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.configs.createConfigVersion("1b60e1a0-cc59-424a-8d2c-189d354db3f3", { * versionDescription: "This is an updated version of the Weather Assistant Config.", + * eviVersion: "2", * prompt: { * id: "af699d45-2985-42cc-91b9-af9e5da3bac5", * version: 0 * }, * voice: { * provider: "HUME_AI", - * name: Hume.empathicVoice.PostedVoiceName.Ito + * name: "ITO" * }, * languageModel: { * modelProvider: Hume.empathicVoice.PostedLanguageModelModelProvider.Anthropic, @@ -329,7 +375,7 @@ export class Configs { */ public async createConfigVersion( id: string, - request: Hume.empathicVoice.PostedConfigVersion = {}, + request: Hume.empathicVoice.PostedConfigVersion, requestOptions?: Configs.RequestOptions ): Promise { const _response = await (this._options.fetcher ?? core.fetcher)({ @@ -341,8 +387,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -366,10 +412,22 @@ export class Configs { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -391,6 +449,8 @@ export class Configs { * @param {string} id - Identifier for a Config. Formatted as a UUID. * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.configs.deleteConfig("1b60e1a0-cc59-424a-8d2c-189d354db3f3") */ @@ -404,8 +464,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -421,10 +481,22 @@ export class Configs { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -447,6 +519,8 @@ export class Configs { * @param {Hume.empathicVoice.PostedConfigName} request * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.configs.updateConfigName("1b60e1a0-cc59-424a-8d2c-189d354db3f3", { * name: "Updated Weather Assistant Config Name" @@ -466,8 +540,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -485,10 +559,22 @@ export class Configs { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -510,11 +596,13 @@ export class Configs { * @param {string} id - Identifier for a Config. Formatted as a UUID. * @param {number} version - Version number for a Config. * - * Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + * Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. * * Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.configs.getConfigVersion("1b60e1a0-cc59-424a-8d2c-189d354db3f3", 1) */ @@ -532,8 +620,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -554,10 +642,22 @@ export class Configs { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -579,11 +679,13 @@ export class Configs { * @param {string} id - Identifier for a Config. Formatted as a UUID. * @param {number} version - Version number for a Config. * - * Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + * Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. * * Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.configs.deleteConfigVersion("1b60e1a0-cc59-424a-8d2c-189d354db3f3", 1) */ @@ -601,8 +703,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -618,10 +720,22 @@ export class Configs { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -643,12 +757,14 @@ export class Configs { * @param {string} id - Identifier for a Config. Formatted as a UUID. * @param {number} version - Version number for a Config. * - * Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + * Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. * * Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. * @param {Hume.empathicVoice.PostedConfigVersionDescription} request * @param {Configs.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.configs.updateConfigDescription("1b60e1a0-cc59-424a-8d2c-189d354db3f3", 1, { * versionDescription: "This is an updated version_description." @@ -669,8 +785,8 @@ export class Configs { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -694,10 +810,22 @@ export class Configs { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { diff --git a/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts b/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts index fc101528..66a7ea74 100644 --- a/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts +++ b/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts @@ -12,9 +12,10 @@ import * as Hume from "../../../../../../index"; * id: "af699d45-2985-42cc-91b9-af9e5da3bac5", * version: 0 * }, + * eviVersion: "2", * voice: { * provider: "HUME_AI", - * name: Hume.empathicVoice.PostedVoiceName.Kora + * name: "SAMPLE VOICE" * }, * languageModel: { * modelProvider: Hume.empathicVoice.PostedLanguageModelModelProvider.Anthropic, @@ -38,11 +39,13 @@ import * as Hume from "../../../../../../index"; * } */ export interface PostedConfig { + /** Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/empathic-voice-interface-evi/evi-2). */ + eviVersion: string; /** Name applied to all versions of a particular Config. */ name: string; /** An optional description of the Config version. */ versionDescription?: string; - prompt?: Hume.empathicVoice.PostedPromptSpec; + prompt?: Hume.empathicVoice.PostedConfigPromptSpec; /** A voice specification associated with this Config. */ voice?: Hume.empathicVoice.PostedVoice; /** diff --git a/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts b/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts index af1a21dd..9017ab98 100644 --- a/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts +++ b/src/api/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts @@ -8,13 +8,14 @@ import * as Hume from "../../../../../../index"; * @example * { * versionDescription: "This is an updated version of the Weather Assistant Config.", + * eviVersion: "2", * prompt: { * id: "af699d45-2985-42cc-91b9-af9e5da3bac5", * version: 0 * }, * voice: { * provider: "HUME_AI", - * name: Hume.empathicVoice.PostedVoiceName.Ito + * name: "ITO" * }, * languageModel: { * modelProvider: Hume.empathicVoice.PostedLanguageModelModelProvider.Anthropic, @@ -41,9 +42,11 @@ import * as Hume from "../../../../../../index"; * } */ export interface PostedConfigVersion { + /** The version of the EVI used with this config. */ + eviVersion: string; /** An optional description of the Config version. */ versionDescription?: string; - prompt?: Hume.empathicVoice.PostedPromptSpec; + prompt?: Hume.empathicVoice.PostedConfigPromptSpec; /** A voice specification associated with this Config version. */ voice?: Hume.empathicVoice.PostedVoice; /** diff --git a/src/api/resources/empathicVoice/resources/customVoices/client/Client.ts b/src/api/resources/empathicVoice/resources/customVoices/client/Client.ts new file mode 100644 index 00000000..df21e95f --- /dev/null +++ b/src/api/resources/empathicVoice/resources/customVoices/client/Client.ts @@ -0,0 +1,520 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../../../environments"; +import * as core from "../../../../../../core"; +import * as Hume from "../../../../../index"; +import urlJoin from "url-join"; +import * as serializers from "../../../../../../serialization/index"; +import * as errors from "../../../../../../errors/index"; + +export declare namespace CustomVoices { + interface Options { + environment?: core.Supplier; + apiKey?: core.Supplier; + fetcher?: core.FetchFunction; + } + + interface RequestOptions { + /** The maximum time to wait for a response in seconds. */ + timeoutInSeconds?: number; + /** The number of times to retry the request. Defaults to 2. */ + maxRetries?: number; + /** A hook to abort the request. */ + abortSignal?: AbortSignal; + } +} + +export class CustomVoices { + constructor(protected readonly _options: CustomVoices.Options = {}) {} + + /** + * @param {Hume.empathicVoice.GetReturnCustomVoicesForUserRequest} request + * @param {CustomVoices.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Hume.empathicVoice.BadRequestError} + * + * @example + * await client.empathicVoice.customVoices.getReturnCustomVoicesForUser() + */ + public async getReturnCustomVoicesForUser( + request: Hume.empathicVoice.GetReturnCustomVoicesForUserRequest = {}, + requestOptions?: CustomVoices.RequestOptions + ): Promise { + const { pageNumber, pageSize, name } = request; + const _queryParams: Record = {}; + if (pageNumber != null) { + _queryParams["page_number"] = pageNumber.toString(); + } + + if (pageSize != null) { + _queryParams["page_size"] = pageSize.toString(); + } + + if (name != null) { + _queryParams["name"] = name; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.environment)) ?? environments.HumeEnvironment.Production, + "v0/evi/custom_voices" + ), + method: "GET", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "hume", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + }, + contentType: "application/json", + queryParameters: _queryParams, + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.empathicVoice.ReturnPagedCustomVoices.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumeTimeoutError(); + case "unknown": + throw new errors.HumeError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * @param {Hume.empathicVoice.PostedCustomVoice} request + * @param {CustomVoices.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Hume.empathicVoice.BadRequestError} + * + * @example + * await client.empathicVoice.customVoices.createNewCustomVoice({ + * name: "name", + * baseVoice: Hume.empathicVoice.PostedCustomVoiceBaseVoice.Ito, + * parameterModel: "20240715-4parameter" + * }) + */ + public async createNewCustomVoice( + request: Hume.empathicVoice.PostedCustomVoice, + requestOptions?: CustomVoices.RequestOptions + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.environment)) ?? environments.HumeEnvironment.Production, + "v0/evi/custom_voices" + ), + method: "POST", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "hume", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + }, + contentType: "application/json", + requestType: "json", + body: serializers.empathicVoice.PostedCustomVoice.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.empathicVoice.ReturnCustomVoice.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumeTimeoutError(); + case "unknown": + throw new errors.HumeError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * @param {string} id - Identifier for a Custom Voice. Formatted as a UUID. + * @param {CustomVoices.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Hume.empathicVoice.BadRequestError} + * + * @example + * await client.empathicVoice.customVoices.getReturnCustomVoiceByCustomVoiceId("id") + */ + public async getReturnCustomVoiceByCustomVoiceId( + id: string, + requestOptions?: CustomVoices.RequestOptions + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.environment)) ?? environments.HumeEnvironment.Production, + `v0/evi/custom_voices/${encodeURIComponent(id)}` + ), + method: "GET", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "hume", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + }, + contentType: "application/json", + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.empathicVoice.ReturnCustomVoice.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumeTimeoutError(); + case "unknown": + throw new errors.HumeError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * @param {string} id - Identifier for a Custom Voice. Formatted as a UUID. + * @param {Hume.empathicVoice.PostedCustomVoice} request + * @param {CustomVoices.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Hume.empathicVoice.BadRequestError} + * + * @example + * await client.empathicVoice.customVoices.addNewCustomVoiceVersion("id", { + * name: "name", + * baseVoice: Hume.empathicVoice.PostedCustomVoiceBaseVoice.Ito, + * parameterModel: "20240715-4parameter" + * }) + */ + public async addNewCustomVoiceVersion( + id: string, + request: Hume.empathicVoice.PostedCustomVoice, + requestOptions?: CustomVoices.RequestOptions + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.environment)) ?? environments.HumeEnvironment.Production, + `v0/evi/custom_voices/${encodeURIComponent(id)}` + ), + method: "POST", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "hume", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + }, + contentType: "application/json", + requestType: "json", + body: serializers.empathicVoice.PostedCustomVoice.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return serializers.empathicVoice.ReturnCustomVoice.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumeTimeoutError(); + case "unknown": + throw new errors.HumeError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * @param {string} id - Identifier for a Custom Voice. Formatted as a UUID. + * @param {CustomVoices.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Hume.empathicVoice.BadRequestError} + * + * @example + * await client.empathicVoice.customVoices.deleteCustomVoice("id") + */ + public async deleteCustomVoice(id: string, requestOptions?: CustomVoices.RequestOptions): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.environment)) ?? environments.HumeEnvironment.Production, + `v0/evi/custom_voices/${encodeURIComponent(id)}` + ), + method: "DELETE", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "hume", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + }, + contentType: "application/json", + requestType: "json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return; + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumeTimeoutError(); + case "unknown": + throw new errors.HumeError({ + message: _response.error.errorMessage, + }); + } + } + + /** + * @param {string} id - Identifier for a Custom Voice. Formatted as a UUID. + * @param {Hume.empathicVoice.PostedCustomVoiceName} request + * @param {CustomVoices.RequestOptions} requestOptions - Request-specific configuration. + * + * @throws {@link Hume.empathicVoice.BadRequestError} + * + * @example + * await client.empathicVoice.customVoices.updateCustomVoiceName("string", { + * name: "string" + * }) + */ + public async updateCustomVoiceName( + id: string, + request: Hume.empathicVoice.PostedCustomVoiceName, + requestOptions?: CustomVoices.RequestOptions + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.environment)) ?? environments.HumeEnvironment.Production, + `v0/evi/custom_voices/${encodeURIComponent(id)}` + ), + method: "PATCH", + headers: { + "X-Fern-Language": "JavaScript", + "X-Fern-SDK-Name": "hume", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", + "X-Fern-Runtime": core.RUNTIME.type, + "X-Fern-Runtime-Version": core.RUNTIME.version, + ...(await this._getCustomAuthorizationHeaders()), + }, + contentType: "application/json", + requestType: "json", + body: serializers.empathicVoice.PostedCustomVoiceName.jsonOrThrow(request, { + unrecognizedObjectKeys: "strip", + }), + responseType: "text", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, + maxRetries: requestOptions?.maxRetries, + abortSignal: requestOptions?.abortSignal, + }); + if (_response.ok) { + return _response.body as string; + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.HumeTimeoutError(); + case "unknown": + throw new errors.HumeError({ + message: _response.error.errorMessage, + }); + } + } + + protected async _getCustomAuthorizationHeaders() { + const apiKeyValue = await core.Supplier.get(this._options.apiKey); + return { "X-Hume-Api-Key": apiKeyValue }; + } +} diff --git a/src/api/resources/empathicVoice/resources/customVoices/client/index.ts b/src/api/resources/empathicVoice/resources/customVoices/client/index.ts new file mode 100644 index 00000000..415726b7 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/customVoices/client/index.ts @@ -0,0 +1 @@ +export * from "./requests"; diff --git a/src/api/resources/empathicVoice/resources/customVoices/client/requests/GetReturnCustomVoicesForUserRequest.ts b/src/api/resources/empathicVoice/resources/customVoices/client/requests/GetReturnCustomVoicesForUserRequest.ts new file mode 100644 index 00000000..8fb67e5f --- /dev/null +++ b/src/api/resources/empathicVoice/resources/customVoices/client/requests/GetReturnCustomVoicesForUserRequest.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * {} + */ +export interface GetReturnCustomVoicesForUserRequest { + /** + * Specifies the page number to retrieve, enabling pagination. + * + * This parameter uses zero-based indexing. For example, setting `page_number` to 0 retrieves the first page of results (items 0-9 if `page_size` is 10), setting `page_number` to 1 retrieves the second page (items 10-19), and so on. Defaults to 0, which retrieves the first page. + */ + pageNumber?: number; + /** + * Specifies the maximum number of results to include per page, enabling pagination. The value must be between 1 and 100, inclusive. + * + * For example, if `page_size` is set to 10, each page will include up to 10 items. Defaults to 10. + */ + pageSize?: number; + /** + * Filter to only include custom voices with this name. + */ + name?: string; +} diff --git a/src/api/resources/empathicVoice/resources/customVoices/client/requests/PostedCustomVoiceName.ts b/src/api/resources/empathicVoice/resources/customVoices/client/requests/PostedCustomVoiceName.ts new file mode 100644 index 00000000..3362ed44 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/customVoices/client/requests/PostedCustomVoiceName.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * @example + * { + * name: "string" + * } + */ +export interface PostedCustomVoiceName { + /** The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") */ + name: string; +} diff --git a/src/api/resources/empathicVoice/resources/customVoices/client/requests/index.ts b/src/api/resources/empathicVoice/resources/customVoices/client/requests/index.ts new file mode 100644 index 00000000..e0d3e1cf --- /dev/null +++ b/src/api/resources/empathicVoice/resources/customVoices/client/requests/index.ts @@ -0,0 +1,2 @@ +export { type GetReturnCustomVoicesForUserRequest } from "./GetReturnCustomVoicesForUserRequest"; +export { type PostedCustomVoiceName } from "./PostedCustomVoiceName"; diff --git a/src/api/resources/empathicVoice/resources/customVoices/index.ts b/src/api/resources/empathicVoice/resources/customVoices/index.ts new file mode 100644 index 00000000..5ec76921 --- /dev/null +++ b/src/api/resources/empathicVoice/resources/customVoices/index.ts @@ -0,0 +1 @@ +export * from "./client"; diff --git a/src/api/resources/empathicVoice/resources/index.ts b/src/api/resources/empathicVoice/resources/index.ts index fe0b1638..2f4120c1 100644 --- a/src/api/resources/empathicVoice/resources/index.ts +++ b/src/api/resources/empathicVoice/resources/index.ts @@ -2,11 +2,13 @@ export * as chat from "./chat"; export * from "./chat/types"; export * as tools from "./tools"; export * as prompts from "./prompts"; +export * as customVoices from "./customVoices"; export * as configs from "./configs"; export * as chats from "./chats"; export * as chatGroups from "./chatGroups"; export * from "./tools/client/requests"; export * from "./prompts/client/requests"; +export * from "./customVoices/client/requests"; export * from "./configs/client/requests"; export * from "./chats/client/requests"; export * from "./chatGroups/client/requests"; diff --git a/src/api/resources/empathicVoice/resources/prompts/client/Client.ts b/src/api/resources/empathicVoice/resources/prompts/client/Client.ts index 2e644bea..92d3b7aa 100644 --- a/src/api/resources/empathicVoice/resources/prompts/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/prompts/client/Client.ts @@ -33,6 +33,8 @@ export class Prompts { * @param {Hume.empathicVoice.PromptsListPromptsRequest} request * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.prompts.listPrompts({ * pageNumber: 0, @@ -69,8 +71,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -91,10 +93,22 @@ export class Prompts { }); } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { case "non-json": @@ -126,6 +140,8 @@ export class Prompts { * @param {Hume.empathicVoice.PostedPrompt} request * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.prompts.createPrompt({ * name: "Weather Assistant Prompt", @@ -145,8 +161,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -168,10 +184,22 @@ export class Prompts { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -194,6 +222,8 @@ export class Prompts { * @param {Hume.empathicVoice.PromptsListPromptVersionsRequest} request * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.prompts.listPromptVersions("af699d45-2985-42cc-91b9-af9e5da3bac5") */ @@ -225,8 +255,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -248,10 +278,22 @@ export class Prompts { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -274,6 +316,8 @@ export class Prompts { * @param {Hume.empathicVoice.PostedPromptVersion} request * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.prompts.createPromptVerison("af699d45-2985-42cc-91b9-af9e5da3bac5", { * text: "You are an updated version of an AI weather assistant providing users with accurate and up-to-date weather information. Respond to user queries concisely and clearly. Use simple language and avoid technical jargon. Provide temperature, precipitation, wind conditions, and any weather alerts. Include helpful tips if severe weather is expected.", @@ -294,8 +338,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -319,10 +363,22 @@ export class Prompts { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -344,6 +400,8 @@ export class Prompts { * @param {string} id - Identifier for a Prompt. Formatted as a UUID. * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.prompts.deletePrompt("af699d45-2985-42cc-91b9-af9e5da3bac5") */ @@ -357,8 +415,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -374,10 +432,22 @@ export class Prompts { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -400,6 +470,8 @@ export class Prompts { * @param {Hume.empathicVoice.PostedPromptName} request * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.prompts.updatePromptName("af699d45-2985-42cc-91b9-af9e5da3bac5", { * name: "Updated Weather Assistant Prompt Name" @@ -419,8 +491,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -438,10 +510,22 @@ export class Prompts { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -463,11 +547,13 @@ export class Prompts { * @param {string} id - Identifier for a Prompt. Formatted as a UUID. * @param {number} version - Version number for a Prompt. * - * Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + * Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. * * Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.prompts.getPromptVersion("af699d45-2985-42cc-91b9-af9e5da3bac5", 0) */ @@ -485,8 +571,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -507,10 +593,22 @@ export class Prompts { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -532,11 +630,13 @@ export class Prompts { * @param {string} id - Identifier for a Prompt. Formatted as a UUID. * @param {number} version - Version number for a Prompt. * - * Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + * Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. * * Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.prompts.deletePromptVersion("af699d45-2985-42cc-91b9-af9e5da3bac5", 1) */ @@ -554,8 +654,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -571,10 +671,22 @@ export class Prompts { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -596,12 +708,14 @@ export class Prompts { * @param {string} id - Identifier for a Prompt. Formatted as a UUID. * @param {number} version - Version number for a Prompt. * - * Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + * Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. * * Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. * @param {Hume.empathicVoice.PostedPromptVersionDescription} request * @param {Prompts.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.prompts.updatePromptDescription("af699d45-2985-42cc-91b9-af9e5da3bac5", 1, { * versionDescription: "This is an updated version_description." @@ -622,8 +736,8 @@ export class Prompts { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -647,10 +761,22 @@ export class Prompts { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { diff --git a/src/api/resources/empathicVoice/resources/tools/client/Client.ts b/src/api/resources/empathicVoice/resources/tools/client/Client.ts index 47b66d31..b99b4f04 100644 --- a/src/api/resources/empathicVoice/resources/tools/client/Client.ts +++ b/src/api/resources/empathicVoice/resources/tools/client/Client.ts @@ -33,6 +33,8 @@ export class Tools { * @param {Hume.empathicVoice.ToolsListToolsRequest} request * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.tools.listTools({ * pageNumber: 0, @@ -69,8 +71,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -91,10 +93,22 @@ export class Tools { }); } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { case "non-json": @@ -129,6 +143,8 @@ export class Tools { * @param {Hume.empathicVoice.PostedUserDefinedTool} request * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.tools.createTool({ * name: "get_current_weather", @@ -151,8 +167,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -176,10 +192,22 @@ export class Tools { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -202,6 +230,8 @@ export class Tools { * @param {Hume.empathicVoice.ToolsListToolVersionsRequest} request * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.tools.listToolVersions("00183a3f-79ba-413d-9f3b-609864268bea") */ @@ -233,8 +263,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -256,10 +286,22 @@ export class Tools { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -282,6 +324,8 @@ export class Tools { * @param {Hume.empathicVoice.PostedUserDefinedToolVersion} request * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.tools.createToolVersion("00183a3f-79ba-413d-9f3b-609864268bea", { * parameters: "{ \"type\": \"object\", \"properties\": { \"location\": { \"type\": \"string\", \"description\": \"The city and state, e.g. San Francisco, CA\" }, \"format\": { \"type\": \"string\", \"enum\": [\"celsius\", \"fahrenheit\", \"kelvin\"], \"description\": \"The temperature unit to use. Infer this from the users location.\" } }, \"required\": [\"location\", \"format\"] }", @@ -304,8 +348,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -329,10 +373,22 @@ export class Tools { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -354,6 +410,8 @@ export class Tools { * @param {string} id - Identifier for a Tool. Formatted as a UUID. * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.tools.deleteTool("00183a3f-79ba-413d-9f3b-609864268bea") */ @@ -367,8 +425,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -384,10 +442,22 @@ export class Tools { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -410,6 +480,8 @@ export class Tools { * @param {Hume.empathicVoice.PostedUserDefinedToolName} request * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.tools.updateToolName("00183a3f-79ba-413d-9f3b-609864268bea", { * name: "get_current_temperature" @@ -429,8 +501,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -450,10 +522,22 @@ export class Tools { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -475,11 +559,13 @@ export class Tools { * @param {string} id - Identifier for a Tool. Formatted as a UUID. * @param {number} version - Version number for a Tool. * - * Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + * Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. * * Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.tools.getToolVersion("00183a3f-79ba-413d-9f3b-609864268bea", 1) */ @@ -497,8 +583,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -519,10 +605,22 @@ export class Tools { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -544,11 +642,13 @@ export class Tools { * @param {string} id - Identifier for a Tool. Formatted as a UUID. * @param {number} version - Version number for a Tool. * - * Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + * Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. * * Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.tools.deleteToolVersion("00183a3f-79ba-413d-9f3b-609864268bea", 1) */ @@ -562,8 +662,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -579,10 +679,22 @@ export class Tools { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { @@ -604,12 +716,14 @@ export class Tools { * @param {string} id - Identifier for a Tool. Formatted as a UUID. * @param {number} version - Version number for a Tool. * - * Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + * Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. * * Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. * @param {Hume.empathicVoice.PostedUserDefinedToolVersionDescription} request * @param {Tools.RequestOptions} requestOptions - Request-specific configuration. * + * @throws {@link Hume.empathicVoice.BadRequestError} + * * @example * await client.empathicVoice.tools.updateToolDescription("00183a3f-79ba-413d-9f3b-609864268bea", 1, { * versionDescription: "Fetches current temperature, precipitation, wind speed, AQI, and other weather conditions. Uses Celsius, Fahrenheit, or kelvin depending on user's region." @@ -630,8 +744,8 @@ export class Tools { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -655,10 +769,22 @@ export class Tools { } if (_response.error.reason === "status-code") { - throw new errors.HumeError({ - statusCode: _response.error.statusCode, - body: _response.error.body, - }); + switch (_response.error.statusCode) { + case 400: + throw new Hume.empathicVoice.BadRequestError( + serializers.empathicVoice.ErrorResponse.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.HumeError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } } switch (_response.error.reason) { diff --git a/src/api/resources/empathicVoice/types/ErrorResponse.ts b/src/api/resources/empathicVoice/types/ErrorResponse.ts new file mode 100644 index 00000000..b9f0c384 --- /dev/null +++ b/src/api/resources/empathicVoice/types/ErrorResponse.ts @@ -0,0 +1,8 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface ErrorResponse { + error?: string; + message?: string; +} diff --git a/src/api/resources/empathicVoice/types/PostedConfigPromptSpec.ts b/src/api/resources/empathicVoice/types/PostedConfigPromptSpec.ts new file mode 100644 index 00000000..862cbd9d --- /dev/null +++ b/src/api/resources/empathicVoice/types/PostedConfigPromptSpec.ts @@ -0,0 +1,15 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * Identifies which prompt to use in a a config OR how to create a new prompt to use in the config + */ +export interface PostedConfigPromptSpec { + /** Identifier for a Prompt. Formatted as a UUID. */ + id?: string; + /** Version number for a Prompt. Version numbers should be integers. The combination of configId and version number is unique. */ + version?: number; + /** Text used to create a new prompt for a particular config. */ + text?: string; +} diff --git a/src/api/resources/empathicVoice/types/PostedCustomVoice.ts b/src/api/resources/empathicVoice/types/PostedCustomVoice.ts index 1282375a..486d1c33 100644 --- a/src/api/resources/empathicVoice/types/PostedCustomVoice.ts +++ b/src/api/resources/empathicVoice/types/PostedCustomVoice.ts @@ -2,18 +2,26 @@ * This file was auto-generated by Fern from our API Definition. */ +import * as Hume from "../../../index"; + /** - * A custom voice specifications posted to the server + * A Custom Voice specification to be associated with this Config. + * + * If a Custom Voice specification is not provided then the [name](/reference/empathic-voice-interface-evi/configs/create-config#request.body.voice.name) of a base voice or previously created Custom Voice must be provided. + * + * See our [Voices guide](/docs/empathic-voice-interface-evi/voices) for a tutorial on how to craft a Custom Voice. */ export interface PostedCustomVoice { - /** String with the name of the voice to use. Maximum length of 75 characters. Will be converted to all-uppercase. */ + /** The name of the Custom Voice. Maximum length of 75 characters. Will be converted to all-uppercase. (e.g., "sample voice" becomes "SAMPLE VOICE") */ name: string; - /** The voice the custom voice is based off of. */ - baseVoice: string; - /** The speech rate multiplier for this custom voice. */ - speechRateMultiplier?: number; - /** The name of the parameter model used to define which attributes are used by `parameters`. */ - parameterModel: string; - /** Voice specification for a Config. */ - parameters?: Record; + /** Specifies the base voice used to create the Custom Voice. */ + baseVoice: Hume.empathicVoice.PostedCustomVoiceBaseVoice; + /** The name of the parameter model used to define which attributes are used by the `parameters` field. Currently, only `20240715-4parameter` is supported as the parameter model. */ + parameterModel: "20240715-4parameter"; + /** + * The specified attributes of a Custom Voice. + * + * If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. + */ + parameters?: Hume.empathicVoice.PostedCustomVoiceParameters; } diff --git a/src/api/resources/empathicVoice/types/PostedCustomVoiceBaseVoice.ts b/src/api/resources/empathicVoice/types/PostedCustomVoiceBaseVoice.ts new file mode 100644 index 00000000..7da60c48 --- /dev/null +++ b/src/api/resources/empathicVoice/types/PostedCustomVoiceBaseVoice.ts @@ -0,0 +1,18 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * Specifies the base voice used to create the Custom Voice. + */ +export type PostedCustomVoiceBaseVoice = "ITO" | "KORA" | "DACHER" | "AURA" | "FINN" | "STELLA" | "WHIMSY"; + +export const PostedCustomVoiceBaseVoice = { + Ito: "ITO", + Kora: "KORA", + Dacher: "DACHER", + Aura: "AURA", + Finn: "FINN", + Stella: "STELLA", + Whimsy: "WHIMSY", +} as const; diff --git a/src/api/resources/empathicVoice/types/PostedCustomVoiceName.ts b/src/api/resources/empathicVoice/types/PostedCustomVoiceName.ts deleted file mode 100644 index 48d55c50..00000000 --- a/src/api/resources/empathicVoice/types/PostedCustomVoiceName.ts +++ /dev/null @@ -1,11 +0,0 @@ -/** - * This file was auto-generated by Fern from our API Definition. - */ - -/** - * A custom voice name change to be posted to the server - */ -export interface PostedCustomVoiceName { - /** String with the name of the voice to use. Maximum length of 75 characters. Will be converted to all-uppercase. */ - name: string; -} diff --git a/src/api/resources/empathicVoice/types/PostedCustomVoiceParameters.ts b/src/api/resources/empathicVoice/types/PostedCustomVoiceParameters.ts new file mode 100644 index 00000000..c820eff5 --- /dev/null +++ b/src/api/resources/empathicVoice/types/PostedCustomVoiceParameters.ts @@ -0,0 +1,35 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * The specified attributes of a Custom Voice. + * + * If no parameters are specified then all attributes will be set to their defaults, meaning no modfications will be made to the base voice. + */ +export interface PostedCustomVoiceParameters { + /** + * The vocalization of gender, ranging between masculine and feminine. + * + * The default value is `0`, with a minimum of `-100` (more masculine) and a maximum of `100` (more feminine). A value of `0` leaves this parameter unchanged from the base voice. + */ + gender?: number; + /** + * The texture of the voice, ranging between bright and husky. + * + * The default value is `0`, with a minimum of `-100` (brighter) and a maximum of `100` (huskier). A value of `0` leaves this parameter unchanged from the base voice. + */ + huskiness?: number; + /** + * The openness of the voice, ranging between resonant and nasal. + * + * The default value is `0`, with a minimum of `-100` (more resonant) and a maximum of `100` (more nasal). A value of `0` leaves this parameter unchanged from the base voice. + */ + nasality?: number; + /** + * The frequency of the voice, ranging between low and high. + * + * The default value is `0`, with a minimum of `-100` (lower) and a maximum of `100` (higher). A value of `0` leaves this parameter unchanged from the base voice. + */ + pitch?: number; +} diff --git a/src/api/resources/empathicVoice/types/PostedPromptSpec.ts b/src/api/resources/empathicVoice/types/PostedPromptSpec.ts index b97d5e24..0ff80183 100644 --- a/src/api/resources/empathicVoice/types/PostedPromptSpec.ts +++ b/src/api/resources/empathicVoice/types/PostedPromptSpec.ts @@ -6,14 +6,5 @@ * A Prompt associated with this Config. */ export interface PostedPromptSpec { - /** Identifier for a Prompt. Formatted as a UUID. */ - id: string; - /** - * Version number for a Prompt. - * - * Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. - * - * Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. - */ - version?: number; + version?: unknown; } diff --git a/src/api/resources/empathicVoice/types/PostedUserDefinedToolSpec.ts b/src/api/resources/empathicVoice/types/PostedUserDefinedToolSpec.ts index 001522a1..d6054d98 100644 --- a/src/api/resources/empathicVoice/types/PostedUserDefinedToolSpec.ts +++ b/src/api/resources/empathicVoice/types/PostedUserDefinedToolSpec.ts @@ -11,7 +11,7 @@ export interface PostedUserDefinedToolSpec { /** * Version number for a Tool. * - * Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + * Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. * * Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. */ diff --git a/src/api/resources/empathicVoice/types/PostedVoice.ts b/src/api/resources/empathicVoice/types/PostedVoice.ts index b5ac53f1..1ba08d83 100644 --- a/src/api/resources/empathicVoice/types/PostedVoice.ts +++ b/src/api/resources/empathicVoice/types/PostedVoice.ts @@ -10,6 +10,13 @@ import * as Hume from "../../../index"; export interface PostedVoice { /** The provider of the voice to use. Currently, only `HUME_AI` is supported as the voice provider. */ provider: "HUME_AI"; - /** String with the name of the voice to use. Maximum length of 75 characters. Will be converted to all-uppercase. */ - name?: Hume.empathicVoice.PostedVoiceName; + /** + * Specifies the name of the voice to use. + * + * This can be either the name of a previously created Custom Voice or one of our 7 base voices: `ITO`, `KORA`, `DACHER`, `AURA`, `FINN`, `WHIMSY`, or `STELLA`. + * + * The name will be automatically converted to uppercase (e.g., "Ito" becomes "ITO"). If a name is not specified, then a [Custom Voice](/reference/empathic-voice-interface-evi/configs/create-config#request.body.voice.custom_voice) specification must be provided. + */ + name?: string; + customVoice?: Hume.empathicVoice.PostedCustomVoice; } diff --git a/src/api/resources/empathicVoice/types/PostedVoiceName.ts b/src/api/resources/empathicVoice/types/PostedVoiceName.ts deleted file mode 100644 index 56952529..00000000 --- a/src/api/resources/empathicVoice/types/PostedVoiceName.ts +++ /dev/null @@ -1,14 +0,0 @@ -/** - * This file was auto-generated by Fern from our API Definition. - */ - -/** - * String with the name of the voice to use. Maximum length of 75 characters. Will be converted to all-uppercase. - */ -export type PostedVoiceName = "ITO" | "DACHER" | "KORA"; - -export const PostedVoiceName = { - Ito: "ITO", - Dacher: "DACHER", - Kora: "KORA", -} as const; diff --git a/src/api/resources/empathicVoice/types/ReturnConfig.ts b/src/api/resources/empathicVoice/types/ReturnConfig.ts index 12c25021..7a24472b 100644 --- a/src/api/resources/empathicVoice/types/ReturnConfig.ts +++ b/src/api/resources/empathicVoice/types/ReturnConfig.ts @@ -13,11 +13,13 @@ export interface ReturnConfig { /** * Version number for a Config. * - * Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + * Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. * * Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. */ version?: number; + /** Specifies the EVI version to use. Use `"1"` for version 1, or `"2"` for the latest enhanced version. For a detailed comparison of the two versions, refer to our [guide](/docs/empathic-voice-interface-evi/evi-2). */ + eviVersion?: string; /** An optional description of the Config version. */ versionDescription?: string; /** Name applied to all versions of a particular Config. */ diff --git a/src/api/resources/empathicVoice/types/ReturnConfigSpec.ts b/src/api/resources/empathicVoice/types/ReturnConfigSpec.ts index 48cf45ca..d7c02d8c 100644 --- a/src/api/resources/empathicVoice/types/ReturnConfigSpec.ts +++ b/src/api/resources/empathicVoice/types/ReturnConfigSpec.ts @@ -11,7 +11,7 @@ export interface ReturnConfigSpec { /** * Version number for a Config. * - * Configs, as well as Prompts and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + * Configs, Prompts, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. * * Version numbers are integer values representing different iterations of the Config. Each update to the Config increments its version number. */ diff --git a/src/api/resources/empathicVoice/types/ReturnCustomVoice.ts b/src/api/resources/empathicVoice/types/ReturnCustomVoice.ts index 7acca5c4..7864cd9f 100644 --- a/src/api/resources/empathicVoice/types/ReturnCustomVoice.ts +++ b/src/api/resources/empathicVoice/types/ReturnCustomVoice.ts @@ -2,26 +2,32 @@ * This file was auto-generated by Fern from our API Definition. */ +import * as Hume from "../../../index"; + /** - * A custom voice specification returned from the server + * A Custom Voice specification associated with this Config. */ export interface ReturnCustomVoice { /** Identifier for a Custom Voice. Formatted as a UUID. */ id: string; - /** Version number for a Custom Voice. Version numbers should be integers. The combination of custom_voice_id and version number is unique. */ + /** + * Version number for a Custom Voice. + * + * Custom Voices, Prompts, Configs, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine configurations and revert to previous versions if needed. + * + * Version numbers are integer values representing different iterations of the Custom Voice. Each update to the Custom Voice increments its version number. + */ version: number; - /** String with the name of the voice to use. Maximum length of 75 characters. Will be converted to all-uppercase. */ + /** The name of the Custom Voice. Maximum length of 75 characters. */ name: string; - /** The timestamp when the first version of this prompt was created. */ + /** Time at which the Custom Voice was created. Measured in seconds since the Unix epoch. */ createdOn: number; - /** The timestamp when this version of the prompt was created. */ + /** Time at which the Custom Voice was last modified. Measured in seconds since the Unix epoch. */ modifiedOn: number; - /** The voice the custom voice is based off of. */ - baseVoice: string; - /** The speech rate multiplier for this custom voice. */ - speechRateMultiplier?: number; - /** The name of the parameter model used to define which attributes are used by `parameters`. */ - parameterModel: string; - /** Voice specification for a Config. */ - parameters: Record; + /** The base voice used to create the Custom Voice. */ + baseVoice: Hume.empathicVoice.ReturnCustomVoiceBaseVoice; + /** The name of the parameter model used to define which attributes are used by the `parameters` field. Currently, only `20240715-4parameter` is supported as the parameter model. */ + parameterModel: "20240715-4parameter"; + /** The specified attributes of a Custom Voice. If a parameter's value is `0` (default), it will not be included in the response. */ + parameters: Hume.empathicVoice.ReturnCustomVoiceParameters; } diff --git a/src/api/resources/empathicVoice/types/ReturnCustomVoiceBaseVoice.ts b/src/api/resources/empathicVoice/types/ReturnCustomVoiceBaseVoice.ts new file mode 100644 index 00000000..34bf3edd --- /dev/null +++ b/src/api/resources/empathicVoice/types/ReturnCustomVoiceBaseVoice.ts @@ -0,0 +1,18 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * The base voice used to create the Custom Voice. + */ +export type ReturnCustomVoiceBaseVoice = "ITO" | "KORA" | "DACHER" | "AURA" | "FINN" | "STELLA" | "WHIMSY"; + +export const ReturnCustomVoiceBaseVoice = { + Ito: "ITO", + Kora: "KORA", + Dacher: "DACHER", + Aura: "AURA", + Finn: "FINN", + Stella: "STELLA", + Whimsy: "WHIMSY", +} as const; diff --git a/src/api/resources/empathicVoice/types/ReturnCustomVoiceParameters.ts b/src/api/resources/empathicVoice/types/ReturnCustomVoiceParameters.ts new file mode 100644 index 00000000..d0e62f28 --- /dev/null +++ b/src/api/resources/empathicVoice/types/ReturnCustomVoiceParameters.ts @@ -0,0 +1,33 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * The specified attributes of a Custom Voice. If a parameter's value is `0` (default), it will not be included in the response. + */ +export interface ReturnCustomVoiceParameters { + /** + * The vocalization of gender, ranging between masculine and feminine. + * + * The default value is `0`, with a minimum of `-100` (more masculine) and a maximum of `100` (more feminine). A value of `0` leaves this parameter unchanged from the base voice. + */ + gender?: number; + /** + * The texture of the voice, ranging between bright and husky. + * + * The default value is `0`, with a minimum of `-100` (brighter) and a maximum of `100` (huskier). A value of `0` leaves this parameter unchanged from the base voice. + */ + huskiness?: number; + /** + * The openness of the voice, ranging between resonant and nasal. + * + * The default value is `0`, with a minimum of `-100` (more resonant) and a maximum of `100` (more nasal). A value of `0` leaves this parameter unchanged from the base voice. + */ + nasality?: number; + /** + * The frequency of the voice, ranging between low and high. + * + * The default value is `0`, with a minimum of `-100` (lower) and a maximum of `100` (higher). A value of `0` leaves this parameter unchanged from the base voice. + */ + pitch?: number; +} diff --git a/src/api/resources/empathicVoice/types/ReturnPagedCustomVoices.ts b/src/api/resources/empathicVoice/types/ReturnPagedCustomVoices.ts index 7ad10675..df9526c9 100644 --- a/src/api/resources/empathicVoice/types/ReturnPagedCustomVoices.ts +++ b/src/api/resources/empathicVoice/types/ReturnPagedCustomVoices.ts @@ -8,12 +8,20 @@ import * as Hume from "../../../index"; * A paginated list of custom voices returned from the server */ export interface ReturnPagedCustomVoices { - /** The page number of the returned results. */ + /** + * The page number of the returned list. + * + * This value corresponds to the `page_number` parameter specified in the request. Pagination uses zero-based indexing. + */ pageNumber: number; - /** The number of results returned per page. */ + /** + * The maximum number of items returned per page. + * + * This value corresponds to the `page_size` parameter specified in the request. + */ pageSize: number; - /** The total number of pages in the collection */ + /** The total number of pages in the collection. */ totalPages: number; - /** List of custom voices returned for the specified page number and page size. */ + /** List of Custom Voices for the specified `page_number` and `page_size`. */ customVoicesPage: Hume.empathicVoice.ReturnCustomVoice[]; } diff --git a/src/api/resources/empathicVoice/types/ReturnPrompt.ts b/src/api/resources/empathicVoice/types/ReturnPrompt.ts index a9f7adfe..48dbc81f 100644 --- a/src/api/resources/empathicVoice/types/ReturnPrompt.ts +++ b/src/api/resources/empathicVoice/types/ReturnPrompt.ts @@ -13,7 +13,7 @@ export interface ReturnPrompt { /** * Version number for a Prompt. * - * Prompts, as well as Configs and Tools, are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. + * Prompts, Configs, Custom Voices, and Tools are versioned. This versioning system supports iterative development, allowing you to progressively refine prompts and revert to previous versions if needed. * * Version numbers are integer values representing different iterations of the Prompt. Each update to the Prompt increments its version number. */ diff --git a/src/api/resources/empathicVoice/types/ReturnUserDefinedTool.ts b/src/api/resources/empathicVoice/types/ReturnUserDefinedTool.ts index 8a46a6de..fd0ed63f 100644 --- a/src/api/resources/empathicVoice/types/ReturnUserDefinedTool.ts +++ b/src/api/resources/empathicVoice/types/ReturnUserDefinedTool.ts @@ -15,7 +15,7 @@ export interface ReturnUserDefinedTool { /** * Version number for a Tool. * - * Tools, as well as Configs and Prompts, are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. + * Tools, Configs, Custom Voices, and Prompts are versioned. This versioning system supports iterative development, allowing you to progressively refine tools and revert to previous versions if needed. * * Version numbers are integer values representing different iterations of the Tool. Each update to the Tool increments its version number. */ diff --git a/src/api/resources/empathicVoice/types/ReturnVoice.ts b/src/api/resources/empathicVoice/types/ReturnVoice.ts index 0c3604eb..b2368f0c 100644 --- a/src/api/resources/empathicVoice/types/ReturnVoice.ts +++ b/src/api/resources/empathicVoice/types/ReturnVoice.ts @@ -10,6 +10,11 @@ import * as Hume from "../../../index"; export interface ReturnVoice { /** The provider of the voice to use. Currently, only `HUME_AI` is supported as the voice provider. */ provider: "HUME_AI"; - /** String with the name of the voice to use. Maximum length of 75 characters. Will be converted to all-uppercase. */ - name?: Hume.empathicVoice.ReturnVoiceName; + /** + * The name of the specified voice. + * + * This will either be the name of a previously created Custom Voice or one of our 7 base voices: `ITO`, `KORA`, `DACHER`, `AURA`, `FINN`, `WHIMSY`, or `STELLA`. + */ + name?: string; + customVoice: Hume.empathicVoice.ReturnCustomVoice; } diff --git a/src/api/resources/empathicVoice/types/ReturnVoiceName.ts b/src/api/resources/empathicVoice/types/ReturnVoiceName.ts deleted file mode 100644 index 73016fb0..00000000 --- a/src/api/resources/empathicVoice/types/ReturnVoiceName.ts +++ /dev/null @@ -1,14 +0,0 @@ -/** - * This file was auto-generated by Fern from our API Definition. - */ - -/** - * String with the name of the voice to use. Maximum length of 75 characters. Will be converted to all-uppercase. - */ -export type ReturnVoiceName = "ITO" | "DACHER" | "KORA"; - -export const ReturnVoiceName = { - Ito: "ITO", - Dacher: "DACHER", - Kora: "KORA", -} as const; diff --git a/src/api/resources/empathicVoice/types/index.ts b/src/api/resources/empathicVoice/types/index.ts index 2768f85f..c9338062 100644 --- a/src/api/resources/empathicVoice/types/index.ts +++ b/src/api/resources/empathicVoice/types/index.ts @@ -1,24 +1,28 @@ +export * from "./ErrorResponse"; export * from "./ReturnUserDefinedToolToolType"; export * from "./ReturnUserDefinedToolVersionType"; export * from "./ReturnUserDefinedTool"; export * from "./ReturnPromptVersionType"; export * from "./ReturnPrompt"; +export * from "./PostedCustomVoiceBaseVoice"; +export * from "./PostedCustomVoiceParameters"; export * from "./PostedCustomVoice"; +export * from "./ReturnCustomVoiceBaseVoice"; +export * from "./ReturnCustomVoiceParameters"; export * from "./ReturnCustomVoice"; export * from "./PostedBuiltinToolName"; export * from "./PostedBuiltinTool"; +export * from "./PostedConfigPromptSpec"; export * from "./PostedEllmModel"; export * from "./PostedEventMessageSpec"; export * from "./PostedEventMessageSpecs"; export * from "./PostedLanguageModelModelProvider"; export * from "./PostedLanguageModel"; -export * from "./PostedPromptSpec"; export * from "./PostedTimeoutSpec"; export * from "./PostedTimeoutSpecsInactivity"; export * from "./PostedTimeoutSpecsMaxDuration"; export * from "./PostedTimeoutSpecs"; export * from "./PostedUserDefinedToolSpec"; -export * from "./PostedVoiceName"; export * from "./PostedVoice"; export * from "./ReturnBuiltinToolToolType"; export * from "./ReturnBuiltinTool"; @@ -30,9 +34,7 @@ export * from "./ReturnLanguageModelModelProvider"; export * from "./ReturnLanguageModel"; export * from "./ReturnTimeoutSpec"; export * from "./ReturnTimeoutSpecs"; -export * from "./ReturnVoiceName"; export * from "./ReturnVoice"; -export * from "./PostedCustomVoiceName"; export * from "./ReturnPagedUserDefinedTools"; export * from "./ReturnPagedPrompts"; export * from "./ReturnPagedCustomVoices"; @@ -56,6 +58,7 @@ export * from "./ReturnPagedChatGroups"; export * from "./ReturnChatGroupPagedChats"; export * from "./ReturnChatGroupPagedEventsPaginationDirection"; export * from "./ReturnChatGroupPagedEvents"; +export * from "./PostedPromptSpec"; export * from "./AssistantInput"; export * from "./AudioConfiguration"; export * from "./AudioInput"; diff --git a/src/api/resources/expressionMeasurement/resources/batch/client/Client.ts b/src/api/resources/expressionMeasurement/resources/batch/client/Client.ts index ae377c62..c72634b6 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/client/Client.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/client/Client.ts @@ -35,16 +35,16 @@ export class Batch { /** * Sort and filter jobs. * - * @param {Hume.expressionMeasurement.BatchListJobsRequest} request + * @param {Hume.expressionMeasurement.batch.BatchListJobsRequest} request * @param {Batch.RequestOptions} requestOptions - Request-specific configuration. * * @example * await client.expressionMeasurement.batch.listJobs() */ public async listJobs( - request: Hume.expressionMeasurement.BatchListJobsRequest = {}, + request: Hume.expressionMeasurement.batch.BatchListJobsRequest = {}, requestOptions?: Batch.RequestOptions - ): Promise { + ): Promise { const { limit, status, when, timestampMs, sortBy, direction } = request; const _queryParams: Record = {}; if (limit != null) { @@ -84,8 +84,8 @@ export class Batch { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -131,7 +131,7 @@ export class Batch { /** * Start a new measurement inference job. * - * @param {Hume.expressionMeasurement.InferenceBaseRequest} request + * @param {Hume.expressionMeasurement.batch.InferenceBaseRequest} request * @param {Batch.RequestOptions} requestOptions - Request-specific configuration. * * @example @@ -141,9 +141,9 @@ export class Batch { * }) */ public async startInferenceJob( - request: Hume.expressionMeasurement.InferenceBaseRequest, + request: Hume.expressionMeasurement.batch.InferenceBaseRequest, requestOptions?: Batch.RequestOptions - ): Promise { + ): Promise { const _response = await (this._options.fetcher ?? core.fetcher)({ url: urlJoin( (await core.Supplier.get(this._options.environment)) ?? environments.HumeEnvironment.Production, @@ -153,15 +153,15 @@ export class Batch { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), }, contentType: "application/json", requestType: "json", - body: serializers.expressionMeasurement.InferenceBaseRequest.jsonOrThrow(request, { + body: serializers.expressionMeasurement.batch.InferenceBaseRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip", }), timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 60000, @@ -169,7 +169,7 @@ export class Batch { abortSignal: requestOptions?.abortSignal, }); if (_response.ok) { - return serializers.expressionMeasurement.JobId.parseOrThrow(_response.body, { + return serializers.expressionMeasurement.batch.JobId.parseOrThrow(_response.body, { unrecognizedObjectKeys: "passthrough", allowUnrecognizedUnionMembers: true, allowUnrecognizedEnumValues: true, @@ -211,7 +211,7 @@ export class Batch { public async getJobDetails( id: string, requestOptions?: Batch.RequestOptions - ): Promise { + ): Promise { const _response = await (this._options.fetcher ?? core.fetcher)({ url: urlJoin( (await core.Supplier.get(this._options.environment)) ?? environments.HumeEnvironment.Production, @@ -221,8 +221,8 @@ export class Batch { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -234,7 +234,7 @@ export class Batch { abortSignal: requestOptions?.abortSignal, }); if (_response.ok) { - return serializers.expressionMeasurement.UnionJob.parseOrThrow(_response.body, { + return serializers.expressionMeasurement.batch.UnionJob.parseOrThrow(_response.body, { unrecognizedObjectKeys: "passthrough", allowUnrecognizedUnionMembers: true, allowUnrecognizedEnumValues: true, @@ -276,7 +276,7 @@ export class Batch { public async getJobPredictions( id: string, requestOptions?: Batch.RequestOptions - ): Promise { + ): Promise { const _response = await (this._options.fetcher ?? core.fetcher)({ url: urlJoin( (await core.Supplier.get(this._options.environment)) ?? environments.HumeEnvironment.Production, @@ -286,8 +286,8 @@ export class Batch { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -342,8 +342,8 @@ export class Batch { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -385,7 +385,7 @@ export class Batch { * Start a new batch inference job. * * @param {File[] | fs.ReadStream[] | Blob[]} file - * @param {Hume.expressionMeasurement.BatchStartInferenceJobFromLocalFileRequest} request + * @param {Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest} request * @param {Batch.RequestOptions} requestOptions - Request-specific configuration. * * @example @@ -393,9 +393,9 @@ export class Batch { */ public async startInferenceJobFromLocalFile( file: File[] | fs.ReadStream[] | Blob[], - request: Hume.expressionMeasurement.BatchStartInferenceJobFromLocalFileRequest, + request: Hume.expressionMeasurement.batch.BatchStartInferenceJobFromLocalFileRequest, requestOptions?: Batch.RequestOptions - ): Promise { + ): Promise { const _request = await core.newFormData(); if (request.json != null) { await _request.append("json", JSON.stringify(request.json)); @@ -415,8 +415,8 @@ export class Batch { headers: { "X-Fern-Language": "JavaScript", "X-Fern-SDK-Name": "hume", - "X-Fern-SDK-Version": "0.8.9", - "User-Agent": "hume/0.8.9", + "X-Fern-SDK-Version": "0.8.11", + "User-Agent": "hume/0.8.11", "X-Fern-Runtime": core.RUNTIME.type, "X-Fern-Runtime-Version": core.RUNTIME.version, ...(await this._getCustomAuthorizationHeaders()), @@ -430,7 +430,7 @@ export class Batch { abortSignal: requestOptions?.abortSignal, }); if (_response.ok) { - return serializers.expressionMeasurement.JobId.parseOrThrow(_response.body, { + return serializers.expressionMeasurement.batch.JobId.parseOrThrow(_response.body, { unrecognizedObjectKeys: "passthrough", allowUnrecognizedUnionMembers: true, allowUnrecognizedEnumValues: true, diff --git a/src/api/resources/expressionMeasurement/resources/batch/client/requests/BatchListJobsRequest.ts b/src/api/resources/expressionMeasurement/resources/batch/client/requests/BatchListJobsRequest.ts index bb1c3221..6bf6ae73 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/client/requests/BatchListJobsRequest.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/client/requests/BatchListJobsRequest.ts @@ -24,11 +24,11 @@ export interface BatchListJobsRequest { * * - `FAILED`: The job encountered an error and could not be completed successfully. */ - status?: Hume.expressionMeasurement.Status | Hume.expressionMeasurement.Status[]; + status?: Hume.expressionMeasurement.batch.Status | Hume.expressionMeasurement.batch.Status[]; /** * Specify whether to include jobs created before or after a given `timestamp_ms`. */ - when?: Hume.expressionMeasurement.When; + when?: Hume.expressionMeasurement.batch.When; /** * Provide a timestamp in milliseconds to filter jobs. * @@ -44,7 +44,7 @@ export interface BatchListJobsRequest { * * - `ended`: Sort jobs by the time processing ended, indicated by `ended_timestamp_ms`. */ - sortBy?: Hume.expressionMeasurement.SortBy; + sortBy?: Hume.expressionMeasurement.batch.SortBy; /** * Specify the order in which to sort the jobs. Defaults to descending order. * @@ -52,5 +52,5 @@ export interface BatchListJobsRequest { * * - `desc`: Sort in descending order (reverse-chronological, with the newest records first). */ - direction?: Hume.expressionMeasurement.Direction; + direction?: Hume.expressionMeasurement.batch.Direction; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/client/requests/BatchStartInferenceJobFromLocalFileRequest.ts b/src/api/resources/expressionMeasurement/resources/batch/client/requests/BatchStartInferenceJobFromLocalFileRequest.ts index 0838ff4e..857e23c3 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/client/requests/BatchStartInferenceJobFromLocalFileRequest.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/client/requests/BatchStartInferenceJobFromLocalFileRequest.ts @@ -10,5 +10,5 @@ import * as Hume from "../../../../../../index"; */ export interface BatchStartInferenceJobFromLocalFileRequest { /** Stringified JSON object containing the inference job configuration. */ - json?: Hume.expressionMeasurement.InferenceBaseRequest; + json?: Hume.expressionMeasurement.batch.InferenceBaseRequest; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/BurstPrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/BurstPrediction.ts index e204a318..36d26084 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/BurstPrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/BurstPrediction.ts @@ -5,9 +5,9 @@ import * as Hume from "../../../../../index"; export interface BurstPrediction { - time: Hume.expressionMeasurement.TimeInterval; + time: Hume.expressionMeasurement.batch.TimeInterval; /** A high-dimensional embedding in emotion space. */ - emotions: Hume.expressionMeasurement.EmotionScore[]; + emotions: Hume.expressionMeasurement.batch.EmotionScore[]; /** Modality-specific descriptive features and their scores. */ - descriptions: Hume.expressionMeasurement.DescriptionsScore[]; + descriptions: Hume.expressionMeasurement.batch.DescriptionsScore[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/CompletedState.ts b/src/api/resources/expressionMeasurement/resources/batch/types/CompletedState.ts index 511b53da..c8097d54 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/CompletedState.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/CompletedState.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface CompletedState extends Hume.expressionMeasurement.CompletedInference {} +export interface CompletedState extends Hume.expressionMeasurement.batch.CompletedInference {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/CompletedTraining.ts b/src/api/resources/expressionMeasurement/resources/batch/types/CompletedTraining.ts index 28e0885e..164f1b67 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/CompletedTraining.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/CompletedTraining.ts @@ -11,6 +11,6 @@ export interface CompletedTraining { startedTimestampMs: number; /** When this job ended (Unix timestamp in milliseconds). */ endedTimestampMs: number; - customModel: Hume.expressionMeasurement.TrainingCustomModel; - alternatives?: Record; + customModel: Hume.expressionMeasurement.batch.TrainingCustomModel; + alternatives?: Record; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/CustomModel.ts b/src/api/resources/expressionMeasurement/resources/batch/types/CustomModel.ts index 0e92450a..38270373 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/CustomModel.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/CustomModel.ts @@ -4,4 +4,6 @@ import * as Hume from "../../../../../index"; -export type CustomModel = Hume.expressionMeasurement.CustomModelId | Hume.expressionMeasurement.CustomModelVersionId; +export type CustomModel = + | Hume.expressionMeasurement.batch.CustomModelId + | Hume.expressionMeasurement.batch.CustomModelVersionId; diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/CustomModelRequest.ts b/src/api/resources/expressionMeasurement/resources/batch/types/CustomModelRequest.ts index 36bb9506..a8a45ef2 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/CustomModelRequest.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/CustomModelRequest.ts @@ -7,5 +7,5 @@ import * as Hume from "../../../../../index"; export interface CustomModelRequest { name: string; description?: string; - tags?: Hume.expressionMeasurement.Tag[]; + tags?: Hume.expressionMeasurement.batch.Tag[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/CustomModelsInferenceJob.ts b/src/api/resources/expressionMeasurement/resources/batch/types/CustomModelsInferenceJob.ts index 8d0721ba..8c263d18 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/CustomModelsInferenceJob.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/CustomModelsInferenceJob.ts @@ -4,6 +4,6 @@ import * as Hume from "../../../../../index"; -export interface CustomModelsInferenceJob extends Hume.expressionMeasurement.JobTlInference { +export interface CustomModelsInferenceJob extends Hume.expressionMeasurement.batch.JobTlInference { type: string; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/CustomModelsTrainingJob.ts b/src/api/resources/expressionMeasurement/resources/batch/types/CustomModelsTrainingJob.ts index d202d04f..2709fe4d 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/CustomModelsTrainingJob.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/CustomModelsTrainingJob.ts @@ -4,6 +4,6 @@ import * as Hume from "../../../../../index"; -export interface CustomModelsTrainingJob extends Hume.expressionMeasurement.JobTraining { +export interface CustomModelsTrainingJob extends Hume.expressionMeasurement.batch.JobTraining { type: string; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/Dataset.ts b/src/api/resources/expressionMeasurement/resources/batch/types/Dataset.ts index 63b4eff0..3c9f5b17 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/Dataset.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/Dataset.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export type Dataset = Hume.expressionMeasurement.DatasetId | Hume.expressionMeasurement.DatasetVersionId; +export type Dataset = Hume.expressionMeasurement.batch.DatasetId | Hume.expressionMeasurement.batch.DatasetVersionId; diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationBaseRequest.ts b/src/api/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationBaseRequest.ts index 3b76e71e..84e29a24 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationBaseRequest.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationBaseRequest.ts @@ -6,5 +6,5 @@ import * as Hume from "../../../../../index"; export interface EmbeddingGenerationBaseRequest { /** File ID and File URL pairs for an asset registry file */ - registryFileDetails?: Hume.expressionMeasurement.RegistryFileDetail[]; + registryFileDetails?: Hume.expressionMeasurement.batch.RegistryFileDetail[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationJob.ts b/src/api/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationJob.ts index 4d3be3bb..0da71d8c 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationJob.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationJob.ts @@ -4,6 +4,6 @@ import * as Hume from "../../../../../index"; -export interface EmbeddingGenerationJob extends Hume.expressionMeasurement.JobEmbeddingGeneration { +export interface EmbeddingGenerationJob extends Hume.expressionMeasurement.batch.JobEmbeddingGeneration { type: string; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/EvaluationArgs.ts b/src/api/resources/expressionMeasurement/resources/batch/types/EvaluationArgs.ts index 64af6781..461443de 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/EvaluationArgs.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/EvaluationArgs.ts @@ -5,5 +5,5 @@ import * as Hume from "../../../../../index"; export interface EvaluationArgs { - validation?: Hume.expressionMeasurement.ValidationArgs; + validation?: Hume.expressionMeasurement.batch.ValidationArgs; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/Face.ts b/src/api/resources/expressionMeasurement/resources/batch/types/Face.ts index faa9e5dc..06370fa0 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/Face.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/Face.ts @@ -18,8 +18,8 @@ export interface Face { identifyFaces?: boolean; /** Minimum bounding box side length in pixels to treat as a face. Faces detected with a bounding box side length in pixels less than this threshold will be omitted from the response. */ minFaceSize?: number; - facs?: Hume.expressionMeasurement.Unconfigurable; - descriptions?: Hume.expressionMeasurement.Unconfigurable; + facs?: Hume.expressionMeasurement.batch.Unconfigurable; + descriptions?: Hume.expressionMeasurement.batch.Unconfigurable; /** Whether to extract and save the detected faces in the artifacts zip created by each job. */ saveFaces?: boolean; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/FacePrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/FacePrediction.ts index eff9a347..ba09edda 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/FacePrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/FacePrediction.ts @@ -11,11 +11,11 @@ export interface FacePrediction { time: number; /** The predicted probability that a detected face was actually a face. */ prob: number; - box: Hume.expressionMeasurement.BoundingBox; + box: Hume.expressionMeasurement.batch.BoundingBox; /** A high-dimensional embedding in emotion space. */ - emotions: Hume.expressionMeasurement.EmotionScore[]; + emotions: Hume.expressionMeasurement.batch.EmotionScore[]; /** FACS 2.0 features and their scores. */ - facs?: Hume.expressionMeasurement.FacsScore[]; + facs?: Hume.expressionMeasurement.batch.FacsScore[]; /** Modality-specific descriptive features and their scores. */ - descriptions?: Hume.expressionMeasurement.DescriptionsScore[]; + descriptions?: Hume.expressionMeasurement.batch.DescriptionsScore[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/FacemeshPrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/FacemeshPrediction.ts index 6195b73a..6d1162e0 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/FacemeshPrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/FacemeshPrediction.ts @@ -6,5 +6,5 @@ import * as Hume from "../../../../../index"; export interface FacemeshPrediction { /** A high-dimensional embedding in emotion space. */ - emotions: Hume.expressionMeasurement.EmotionScore[]; + emotions: Hume.expressionMeasurement.batch.EmotionScore[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/FailedState.ts b/src/api/resources/expressionMeasurement/resources/batch/types/FailedState.ts index 72215dfc..27fa9e42 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/FailedState.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/FailedState.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface FailedState extends Hume.expressionMeasurement.Failed {} +export interface FailedState extends Hume.expressionMeasurement.batch.Failed {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsBurstPrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsBurstPrediction.ts index 2717f7da..01d08c0f 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsBurstPrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsBurstPrediction.ts @@ -7,5 +7,5 @@ import * as Hume from "../../../../../index"; export interface GroupedPredictionsBurstPrediction { /** An automatically generated label to identify individuals in your media file. Will be `unknown` if you have chosen to disable identification, or if the model is unable to distinguish between individuals. */ id: string; - predictions: Hume.expressionMeasurement.BurstPrediction[]; + predictions: Hume.expressionMeasurement.batch.BurstPrediction[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacePrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacePrediction.ts index 16a8b9c0..12a7af31 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacePrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacePrediction.ts @@ -7,5 +7,5 @@ import * as Hume from "../../../../../index"; export interface GroupedPredictionsFacePrediction { /** An automatically generated label to identify individuals in your media file. Will be `unknown` if you have chosen to disable identification, or if the model is unable to distinguish between individuals. */ id: string; - predictions: Hume.expressionMeasurement.FacePrediction[]; + predictions: Hume.expressionMeasurement.batch.FacePrediction[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacemeshPrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacemeshPrediction.ts index f780013f..00ca0cfd 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacemeshPrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacemeshPrediction.ts @@ -7,5 +7,5 @@ import * as Hume from "../../../../../index"; export interface GroupedPredictionsFacemeshPrediction { /** An automatically generated label to identify individuals in your media file. Will be `unknown` if you have chosen to disable identification, or if the model is unable to distinguish between individuals. */ id: string; - predictions: Hume.expressionMeasurement.FacemeshPrediction[]; + predictions: Hume.expressionMeasurement.batch.FacemeshPrediction[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsLanguagePrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsLanguagePrediction.ts index 0310485e..8956314b 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsLanguagePrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsLanguagePrediction.ts @@ -7,5 +7,5 @@ import * as Hume from "../../../../../index"; export interface GroupedPredictionsLanguagePrediction { /** An automatically generated label to identify individuals in your media file. Will be `unknown` if you have chosen to disable identification, or if the model is unable to distinguish between individuals. */ id: string; - predictions: Hume.expressionMeasurement.LanguagePrediction[]; + predictions: Hume.expressionMeasurement.batch.LanguagePrediction[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsNerPrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsNerPrediction.ts index d89872b6..24c3cd9a 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsNerPrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsNerPrediction.ts @@ -7,5 +7,5 @@ import * as Hume from "../../../../../index"; export interface GroupedPredictionsNerPrediction { /** An automatically generated label to identify individuals in your media file. Will be `unknown` if you have chosen to disable identification, or if the model is unable to distinguish between individuals. */ id: string; - predictions: Hume.expressionMeasurement.NerPrediction[]; + predictions: Hume.expressionMeasurement.batch.NerPrediction[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsProsodyPrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsProsodyPrediction.ts index 08819171..c687ef16 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsProsodyPrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsProsodyPrediction.ts @@ -7,5 +7,5 @@ import * as Hume from "../../../../../index"; export interface GroupedPredictionsProsodyPrediction { /** An automatically generated label to identify individuals in your media file. Will be `unknown` if you have chosen to disable identification, or if the model is unable to distinguish between individuals. */ id: string; - predictions: Hume.expressionMeasurement.ProsodyPrediction[]; + predictions: Hume.expressionMeasurement.batch.ProsodyPrediction[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/InProgressState.ts b/src/api/resources/expressionMeasurement/resources/batch/types/InProgressState.ts index b0d15bb6..9e15976f 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/InProgressState.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/InProgressState.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface InProgressState extends Hume.expressionMeasurement.InProgress {} +export interface InProgressState extends Hume.expressionMeasurement.batch.InProgress {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/InferenceBaseRequest.ts b/src/api/resources/expressionMeasurement/resources/batch/types/InferenceBaseRequest.ts index e1c054d8..94bcb1a7 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/InferenceBaseRequest.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/InferenceBaseRequest.ts @@ -10,8 +10,8 @@ export interface InferenceBaseRequest { * * If this field is not explicitly set, then all models will run by default. */ - models?: Hume.expressionMeasurement.Models; - transcription?: Hume.expressionMeasurement.Transcription; + models?: Hume.expressionMeasurement.batch.Models; + transcription?: Hume.expressionMeasurement.batch.Transcription; /** * URLs to the media files to be processed. Each must be a valid public URL to a media file (see recommended input filetypes) or an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. * diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/InferenceJob.ts b/src/api/resources/expressionMeasurement/resources/batch/types/InferenceJob.ts index 2be70fee..a01d28cd 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/InferenceJob.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/InferenceJob.ts @@ -4,7 +4,7 @@ import * as Hume from "../../../../../index"; -export interface InferenceJob extends Hume.expressionMeasurement.JobInference { +export interface InferenceJob extends Hume.expressionMeasurement.batch.JobInference { /** * Denotes the job type. * diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/InferencePrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/InferencePrediction.ts index c0b7280c..184b7bad 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/InferencePrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/InferencePrediction.ts @@ -7,5 +7,5 @@ import * as Hume from "../../../../../index"; export interface InferencePrediction { /** A file path relative to the top level source URL or file. */ file: string; - models: Hume.expressionMeasurement.ModelsPredictions; + models: Hume.expressionMeasurement.batch.ModelsPredictions; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/InferenceRequest.ts b/src/api/resources/expressionMeasurement/resources/batch/types/InferenceRequest.ts index 96561280..67550a52 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/InferenceRequest.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/InferenceRequest.ts @@ -5,8 +5,8 @@ import * as Hume from "../../../../../index"; export interface InferenceRequest { - models?: Hume.expressionMeasurement.Models; - transcription?: Hume.expressionMeasurement.Transcription; + models?: Hume.expressionMeasurement.batch.Models; + transcription?: Hume.expressionMeasurement.batch.Transcription; /** * URLs to the media files to be processed. Each must be a valid public URL to a media file (see recommended input filetypes) or an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. * @@ -19,5 +19,5 @@ export interface InferenceRequest { callbackUrl?: string; /** Whether to send an email notification to the user upon job completion/failure. */ notify?: boolean; - files: Hume.expressionMeasurement.File_[]; + files: Hume.expressionMeasurement.batch.File_[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/InferenceResults.ts b/src/api/resources/expressionMeasurement/resources/batch/types/InferenceResults.ts index 03430757..73636b53 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/InferenceResults.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/InferenceResults.ts @@ -5,6 +5,6 @@ import * as Hume from "../../../../../index"; export interface InferenceResults { - predictions: Hume.expressionMeasurement.InferencePrediction[]; - errors: Hume.expressionMeasurement.Error_[]; + predictions: Hume.expressionMeasurement.batch.InferencePrediction[]; + errors: Hume.expressionMeasurement.batch.Error_[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/InferenceSourcePredictResult.ts b/src/api/resources/expressionMeasurement/resources/batch/types/InferenceSourcePredictResult.ts index d5509cff..46e3e2f4 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/InferenceSourcePredictResult.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/InferenceSourcePredictResult.ts @@ -5,8 +5,8 @@ import * as Hume from "../../../../../index"; export interface InferenceSourcePredictResult { - source: Hume.expressionMeasurement.Source; - results?: Hume.expressionMeasurement.InferenceResults; + source: Hume.expressionMeasurement.batch.Source; + results?: Hume.expressionMeasurement.batch.InferenceResults; /** An error message. */ error?: string; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/JobEmbeddingGeneration.ts b/src/api/resources/expressionMeasurement/resources/batch/types/JobEmbeddingGeneration.ts index 75e4fd93..6478755b 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/JobEmbeddingGeneration.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/JobEmbeddingGeneration.ts @@ -8,6 +8,6 @@ export interface JobEmbeddingGeneration { /** The ID associated with this job. */ jobId: string; userId: string; - request: Hume.expressionMeasurement.EmbeddingGenerationBaseRequest; - state: Hume.expressionMeasurement.StateEmbeddingGeneration; + request: Hume.expressionMeasurement.batch.EmbeddingGenerationBaseRequest; + state: Hume.expressionMeasurement.batch.StateEmbeddingGeneration; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/JobInference.ts b/src/api/resources/expressionMeasurement/resources/batch/types/JobInference.ts index 03506581..ee791bd5 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/JobInference.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/JobInference.ts @@ -8,7 +8,7 @@ export interface JobInference { /** The ID associated with this job. */ jobId: string; /** The request that initiated the job. */ - request: Hume.expressionMeasurement.InferenceRequest; + request: Hume.expressionMeasurement.batch.InferenceRequest; /** The current state of the job. */ - state: Hume.expressionMeasurement.StateInference; + state: Hume.expressionMeasurement.batch.StateInference; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/JobTlInference.ts b/src/api/resources/expressionMeasurement/resources/batch/types/JobTlInference.ts index 4d8c16d8..deff708c 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/JobTlInference.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/JobTlInference.ts @@ -8,6 +8,6 @@ export interface JobTlInference { /** The ID associated with this job. */ jobId: string; userId: string; - request: Hume.expressionMeasurement.TlInferenceBaseRequest; - state: Hume.expressionMeasurement.StateTlInference; + request: Hume.expressionMeasurement.batch.TlInferenceBaseRequest; + state: Hume.expressionMeasurement.batch.StateTlInference; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/JobTraining.ts b/src/api/resources/expressionMeasurement/resources/batch/types/JobTraining.ts index 2a767f61..4b84a16e 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/JobTraining.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/JobTraining.ts @@ -8,6 +8,6 @@ export interface JobTraining { /** The ID associated with this job. */ jobId: string; userId: string; - request: Hume.expressionMeasurement.TrainingBaseRequest; - state: Hume.expressionMeasurement.StateTraining; + request: Hume.expressionMeasurement.batch.TrainingBaseRequest; + state: Hume.expressionMeasurement.batch.StateTraining; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/Language.ts b/src/api/resources/expressionMeasurement/resources/batch/types/Language.ts index d7f3a891..944ef38c 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/Language.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/Language.ts @@ -10,9 +10,9 @@ import * as Hume from "../../../../../index"; * Recommended input filetypes: `.txt`, `.mp3`, `.wav`, `.mp4` */ export interface Language { - granularity?: Hume.expressionMeasurement.Granularity; - sentiment?: Hume.expressionMeasurement.Unconfigurable; - toxicity?: Hume.expressionMeasurement.Unconfigurable; + granularity?: Hume.expressionMeasurement.batch.Granularity; + sentiment?: Hume.expressionMeasurement.batch.Unconfigurable; + toxicity?: Hume.expressionMeasurement.batch.Unconfigurable; /** Whether to return identifiers for speakers over time. If `true`, unique identifiers will be assigned to spoken words to differentiate different speakers. If `false`, all speakers will be tagged with an `unknown` ID. */ identifySpeakers?: boolean; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/LanguagePrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/LanguagePrediction.ts index 2625916b..255d94c5 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/LanguagePrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/LanguagePrediction.ts @@ -7,20 +7,20 @@ import * as Hume from "../../../../../index"; export interface LanguagePrediction { /** A segment of text (like a word or a sentence). */ text: string; - position: Hume.expressionMeasurement.PositionInterval; - time?: Hume.expressionMeasurement.TimeInterval; + position: Hume.expressionMeasurement.batch.PositionInterval; + time?: Hume.expressionMeasurement.batch.TimeInterval; /** Value between `0.0` and `1.0` that indicates our transcription model's relative confidence in this text. */ confidence?: number; /** Value between `0.0` and `1.0` that indicates our transcription model's relative confidence that this text was spoken by this speaker. */ speakerConfidence?: number; /** A high-dimensional embedding in emotion space. */ - emotions: Hume.expressionMeasurement.EmotionScore[]; + emotions: Hume.expressionMeasurement.batch.EmotionScore[]; /** * Sentiment predictions returned as a distribution. This model predicts the probability that a given text could be interpreted as having each sentiment level from `1` (negative) to `9` (positive). * * Compared to returning one estimate of sentiment, this enables a more nuanced analysis of a text's meaning. For example, a text with very neutral sentiment would have an average rating of `5`. But also a text that could be interpreted as having very positive sentiment or very negative sentiment would also have an average rating of `5`. The average sentiment is less informative than the distribution over sentiment, so this API returns a value for each sentiment level. */ - sentiment?: Hume.expressionMeasurement.SentimentScore[]; + sentiment?: Hume.expressionMeasurement.batch.SentimentScore[]; /** Toxicity predictions returned as probabilities that the text can be classified into the following categories: `toxic`, `severe_toxic`, `obscene`, `threat`, `insult`, and `identity_hate`. */ - toxicity?: Hume.expressionMeasurement.ToxicityScore[]; + toxicity?: Hume.expressionMeasurement.batch.ToxicityScore[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/Models.ts b/src/api/resources/expressionMeasurement/resources/batch/types/Models.ts index 03a3f73c..8bb54775 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/Models.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/Models.ts @@ -8,10 +8,10 @@ import * as Hume from "../../../../../index"; * The models used for inference. */ export interface Models { - face?: Hume.expressionMeasurement.Face; - burst?: Hume.expressionMeasurement.Unconfigurable; - prosody?: Hume.expressionMeasurement.Prosody; - language?: Hume.expressionMeasurement.Language; - ner?: Hume.expressionMeasurement.Ner; - facemesh?: Hume.expressionMeasurement.Unconfigurable; + face?: Hume.expressionMeasurement.batch.Face; + burst?: Hume.expressionMeasurement.batch.Unconfigurable; + prosody?: Hume.expressionMeasurement.batch.Prosody; + language?: Hume.expressionMeasurement.batch.Language; + ner?: Hume.expressionMeasurement.batch.Ner; + facemesh?: Hume.expressionMeasurement.batch.Unconfigurable; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/ModelsPredictions.ts b/src/api/resources/expressionMeasurement/resources/batch/types/ModelsPredictions.ts index 70ff5349..e3d1c84e 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/ModelsPredictions.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/ModelsPredictions.ts @@ -5,10 +5,10 @@ import * as Hume from "../../../../../index"; export interface ModelsPredictions { - face?: Hume.expressionMeasurement.PredictionsOptionalNullFacePrediction; - burst?: Hume.expressionMeasurement.PredictionsOptionalNullBurstPrediction; - prosody?: Hume.expressionMeasurement.PredictionsOptionalTranscriptionMetadataProsodyPrediction; - language?: Hume.expressionMeasurement.PredictionsOptionalTranscriptionMetadataLanguagePrediction; - ner?: Hume.expressionMeasurement.PredictionsOptionalTranscriptionMetadataNerPrediction; - facemesh?: Hume.expressionMeasurement.PredictionsOptionalNullFacemeshPrediction; + face?: Hume.expressionMeasurement.batch.PredictionsOptionalNullFacePrediction; + burst?: Hume.expressionMeasurement.batch.PredictionsOptionalNullBurstPrediction; + prosody?: Hume.expressionMeasurement.batch.PredictionsOptionalTranscriptionMetadataProsodyPrediction; + language?: Hume.expressionMeasurement.batch.PredictionsOptionalTranscriptionMetadataLanguagePrediction; + ner?: Hume.expressionMeasurement.batch.PredictionsOptionalTranscriptionMetadataNerPrediction; + facemesh?: Hume.expressionMeasurement.batch.PredictionsOptionalNullFacemeshPrediction; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/NerPrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/NerPrediction.ts index a8cd0113..7965d5bb 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/NerPrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/NerPrediction.ts @@ -7,7 +7,7 @@ import * as Hume from "../../../../../index"; export interface NerPrediction { /** The recognized topic or entity. */ entity: string; - position: Hume.expressionMeasurement.PositionInterval; + position: Hume.expressionMeasurement.batch.PositionInterval; /** Our NER model's relative confidence in the recognized topic or entity. */ entityConfidence: number; /** A measure of how often the entity is linked to by other entities. */ @@ -16,11 +16,11 @@ export interface NerPrediction { uri: string; /** The specific word to which the emotion predictions are linked. */ linkWord: string; - time?: Hume.expressionMeasurement.TimeInterval; + time?: Hume.expressionMeasurement.batch.TimeInterval; /** Value between `0.0` and `1.0` that indicates our transcription model's relative confidence in this text. */ confidence?: number; /** Value between `0.0` and `1.0` that indicates our transcription model's relative confidence that this text was spoken by this speaker. */ speakerConfidence?: number; /** A high-dimensional embedding in emotion space. */ - emotions: Hume.expressionMeasurement.EmotionScore[]; + emotions: Hume.expressionMeasurement.batch.EmotionScore[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullBurstPrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullBurstPrediction.ts index 755bf8cb..84a4a62e 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullBurstPrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullBurstPrediction.ts @@ -5,6 +5,6 @@ import * as Hume from "../../../../../index"; export interface PredictionsOptionalNullBurstPrediction { - metadata?: Hume.expressionMeasurement.Null; - groupedPredictions: Hume.expressionMeasurement.GroupedPredictionsBurstPrediction[]; + metadata?: Hume.expressionMeasurement.batch.Null; + groupedPredictions: Hume.expressionMeasurement.batch.GroupedPredictionsBurstPrediction[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacePrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacePrediction.ts index 08f4bb23..0cfb22bb 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacePrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacePrediction.ts @@ -5,6 +5,6 @@ import * as Hume from "../../../../../index"; export interface PredictionsOptionalNullFacePrediction { - metadata?: Hume.expressionMeasurement.Null; - groupedPredictions: Hume.expressionMeasurement.GroupedPredictionsFacePrediction[]; + metadata?: Hume.expressionMeasurement.batch.Null; + groupedPredictions: Hume.expressionMeasurement.batch.GroupedPredictionsFacePrediction[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacemeshPrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacemeshPrediction.ts index b8ccf9d4..28584ca5 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacemeshPrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacemeshPrediction.ts @@ -5,6 +5,6 @@ import * as Hume from "../../../../../index"; export interface PredictionsOptionalNullFacemeshPrediction { - metadata?: Hume.expressionMeasurement.Null; - groupedPredictions: Hume.expressionMeasurement.GroupedPredictionsFacemeshPrediction[]; + metadata?: Hume.expressionMeasurement.batch.Null; + groupedPredictions: Hume.expressionMeasurement.batch.GroupedPredictionsFacemeshPrediction[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataLanguagePrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataLanguagePrediction.ts index 895e27f1..ef5b65f5 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataLanguagePrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataLanguagePrediction.ts @@ -5,6 +5,6 @@ import * as Hume from "../../../../../index"; export interface PredictionsOptionalTranscriptionMetadataLanguagePrediction { - metadata?: Hume.expressionMeasurement.TranscriptionMetadata; - groupedPredictions: Hume.expressionMeasurement.GroupedPredictionsLanguagePrediction[]; + metadata?: Hume.expressionMeasurement.batch.TranscriptionMetadata; + groupedPredictions: Hume.expressionMeasurement.batch.GroupedPredictionsLanguagePrediction[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataNerPrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataNerPrediction.ts index c5b8311f..de78237c 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataNerPrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataNerPrediction.ts @@ -5,6 +5,6 @@ import * as Hume from "../../../../../index"; export interface PredictionsOptionalTranscriptionMetadataNerPrediction { - metadata?: Hume.expressionMeasurement.TranscriptionMetadata; - groupedPredictions: Hume.expressionMeasurement.GroupedPredictionsNerPrediction[]; + metadata?: Hume.expressionMeasurement.batch.TranscriptionMetadata; + groupedPredictions: Hume.expressionMeasurement.batch.GroupedPredictionsNerPrediction[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataProsodyPrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataProsodyPrediction.ts index 96296c37..efbf0a62 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataProsodyPrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataProsodyPrediction.ts @@ -5,6 +5,6 @@ import * as Hume from "../../../../../index"; export interface PredictionsOptionalTranscriptionMetadataProsodyPrediction { - metadata?: Hume.expressionMeasurement.TranscriptionMetadata; - groupedPredictions: Hume.expressionMeasurement.GroupedPredictionsProsodyPrediction[]; + metadata?: Hume.expressionMeasurement.batch.TranscriptionMetadata; + groupedPredictions: Hume.expressionMeasurement.batch.GroupedPredictionsProsodyPrediction[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/Prosody.ts b/src/api/resources/expressionMeasurement/resources/batch/types/Prosody.ts index 1b96a29a..0621ec56 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/Prosody.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/Prosody.ts @@ -10,8 +10,8 @@ import * as Hume from "../../../../../index"; * Recommended input file types: `.wav`, `.mp3`, `.mp4` */ export interface Prosody { - granularity?: Hume.expressionMeasurement.Granularity; - window?: Hume.expressionMeasurement.Window; + granularity?: Hume.expressionMeasurement.batch.Granularity; + window?: Hume.expressionMeasurement.batch.Window; /** Whether to return identifiers for speakers over time. If `true`, unique identifiers will be assigned to spoken words to differentiate different speakers. If `false`, all speakers will be tagged with an `unknown` ID. */ identifySpeakers?: boolean; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/ProsodyPrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/ProsodyPrediction.ts index d941f01b..5cfba9c0 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/ProsodyPrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/ProsodyPrediction.ts @@ -7,11 +7,11 @@ import * as Hume from "../../../../../index"; export interface ProsodyPrediction { /** A segment of text (like a word or a sentence). */ text?: string; - time: Hume.expressionMeasurement.TimeInterval; + time: Hume.expressionMeasurement.batch.TimeInterval; /** Value between `0.0` and `1.0` that indicates our transcription model's relative confidence in this text. */ confidence?: number; /** Value between `0.0` and `1.0` that indicates our transcription model's relative confidence that this text was spoken by this speaker. */ speakerConfidence?: number; /** A high-dimensional embedding in emotion space. */ - emotions: Hume.expressionMeasurement.EmotionScore[]; + emotions: Hume.expressionMeasurement.batch.EmotionScore[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/QueuedState.ts b/src/api/resources/expressionMeasurement/resources/batch/types/QueuedState.ts index 6e262e3c..d2487b82 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/QueuedState.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/QueuedState.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface QueuedState extends Hume.expressionMeasurement.Queued {} +export interface QueuedState extends Hume.expressionMeasurement.batch.Queued {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/Source.ts b/src/api/resources/expressionMeasurement/resources/batch/types/Source.ts index 473a4288..467ada41 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/Source.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/Source.ts @@ -5,20 +5,20 @@ import * as Hume from "../../../../../index"; export type Source = - | Hume.expressionMeasurement.Source.Url - | Hume.expressionMeasurement.Source.File_ - | Hume.expressionMeasurement.Source.Text; + | Hume.expressionMeasurement.batch.Source.Url + | Hume.expressionMeasurement.batch.Source.File_ + | Hume.expressionMeasurement.batch.Source.Text; export declare namespace Source { - interface Url extends Hume.expressionMeasurement.SourceUrl { + interface Url extends Hume.expressionMeasurement.batch.SourceUrl { type: "url"; } - interface File_ extends Hume.expressionMeasurement.SourceFile { + interface File_ extends Hume.expressionMeasurement.batch.SourceFile { type: "file"; } - interface Text extends Hume.expressionMeasurement.SourceTextSource { + interface Text extends Hume.expressionMeasurement.batch.SourceTextSource { type: "text"; } } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/SourceFile.ts b/src/api/resources/expressionMeasurement/resources/batch/types/SourceFile.ts index 8e35a08a..f2d79cb3 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/SourceFile.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/SourceFile.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface SourceFile extends Hume.expressionMeasurement.File_ {} +export interface SourceFile extends Hume.expressionMeasurement.batch.File_ {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/SourceUrl.ts b/src/api/resources/expressionMeasurement/resources/batch/types/SourceUrl.ts index 53d6d8c1..af1b8a6d 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/SourceUrl.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/SourceUrl.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface SourceUrl extends Hume.expressionMeasurement.Url {} +export interface SourceUrl extends Hume.expressionMeasurement.batch.Url {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGeneration.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGeneration.ts index d7561d78..1312eec6 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGeneration.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGeneration.ts @@ -5,25 +5,25 @@ import * as Hume from "../../../../../index"; export type StateEmbeddingGeneration = - | Hume.expressionMeasurement.StateEmbeddingGeneration.Queued - | Hume.expressionMeasurement.StateEmbeddingGeneration.InProgress - | Hume.expressionMeasurement.StateEmbeddingGeneration.Completed - | Hume.expressionMeasurement.StateEmbeddingGeneration.Failed; + | Hume.expressionMeasurement.batch.StateEmbeddingGeneration.Queued + | Hume.expressionMeasurement.batch.StateEmbeddingGeneration.InProgress + | Hume.expressionMeasurement.batch.StateEmbeddingGeneration.Completed + | Hume.expressionMeasurement.batch.StateEmbeddingGeneration.Failed; export declare namespace StateEmbeddingGeneration { - interface Queued extends Hume.expressionMeasurement.StateEmbeddingGenerationQueued { + interface Queued extends Hume.expressionMeasurement.batch.StateEmbeddingGenerationQueued { status: "QUEUED"; } - interface InProgress extends Hume.expressionMeasurement.StateEmbeddingGenerationInProgress { + interface InProgress extends Hume.expressionMeasurement.batch.StateEmbeddingGenerationInProgress { status: "IN_PROGRESS"; } - interface Completed extends Hume.expressionMeasurement.StateEmbeddingGenerationCompletedEmbeddingGeneration { + interface Completed extends Hume.expressionMeasurement.batch.StateEmbeddingGenerationCompletedEmbeddingGeneration { status: "COMPLETED"; } - interface Failed extends Hume.expressionMeasurement.StateEmbeddingGenerationFailed { + interface Failed extends Hume.expressionMeasurement.batch.StateEmbeddingGenerationFailed { status: "FAILED"; } } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationCompletedEmbeddingGeneration.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationCompletedEmbeddingGeneration.ts index 25cfcb59..b4ba5d10 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationCompletedEmbeddingGeneration.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationCompletedEmbeddingGeneration.ts @@ -5,4 +5,4 @@ import * as Hume from "../../../../../index"; export interface StateEmbeddingGenerationCompletedEmbeddingGeneration - extends Hume.expressionMeasurement.CompletedEmbeddingGeneration {} + extends Hume.expressionMeasurement.batch.CompletedEmbeddingGeneration {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationFailed.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationFailed.ts index c422e588..26ccfb6c 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationFailed.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationFailed.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface StateEmbeddingGenerationFailed extends Hume.expressionMeasurement.Failed {} +export interface StateEmbeddingGenerationFailed extends Hume.expressionMeasurement.batch.Failed {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationInProgress.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationInProgress.ts index 36d95b34..18226d47 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationInProgress.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationInProgress.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface StateEmbeddingGenerationInProgress extends Hume.expressionMeasurement.InProgress {} +export interface StateEmbeddingGenerationInProgress extends Hume.expressionMeasurement.batch.InProgress {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationQueued.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationQueued.ts index 7cc22a16..d40ed204 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationQueued.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationQueued.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface StateEmbeddingGenerationQueued extends Hume.expressionMeasurement.Queued {} +export interface StateEmbeddingGenerationQueued extends Hume.expressionMeasurement.batch.Queued {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateInference.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateInference.ts index 0bf0cb4c..50cd0820 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateInference.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateInference.ts @@ -5,25 +5,25 @@ import * as Hume from "../../../../../index"; export type StateInference = - | Hume.expressionMeasurement.StateInference.Queued - | Hume.expressionMeasurement.StateInference.InProgress - | Hume.expressionMeasurement.StateInference.Completed - | Hume.expressionMeasurement.StateInference.Failed; + | Hume.expressionMeasurement.batch.StateInference.Queued + | Hume.expressionMeasurement.batch.StateInference.InProgress + | Hume.expressionMeasurement.batch.StateInference.Completed + | Hume.expressionMeasurement.batch.StateInference.Failed; export declare namespace StateInference { - interface Queued extends Hume.expressionMeasurement.QueuedState { + interface Queued extends Hume.expressionMeasurement.batch.QueuedState { status: "QUEUED"; } - interface InProgress extends Hume.expressionMeasurement.InProgressState { + interface InProgress extends Hume.expressionMeasurement.batch.InProgressState { status: "IN_PROGRESS"; } - interface Completed extends Hume.expressionMeasurement.CompletedState { + interface Completed extends Hume.expressionMeasurement.batch.CompletedState { status: "COMPLETED"; } - interface Failed extends Hume.expressionMeasurement.FailedState { + interface Failed extends Hume.expressionMeasurement.batch.FailedState { status: "FAILED"; } } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInference.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInference.ts index a3a9016c..3a0e9f6d 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInference.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInference.ts @@ -5,25 +5,25 @@ import * as Hume from "../../../../../index"; export type StateTlInference = - | Hume.expressionMeasurement.StateTlInference.Queued - | Hume.expressionMeasurement.StateTlInference.InProgress - | Hume.expressionMeasurement.StateTlInference.Completed - | Hume.expressionMeasurement.StateTlInference.Failed; + | Hume.expressionMeasurement.batch.StateTlInference.Queued + | Hume.expressionMeasurement.batch.StateTlInference.InProgress + | Hume.expressionMeasurement.batch.StateTlInference.Completed + | Hume.expressionMeasurement.batch.StateTlInference.Failed; export declare namespace StateTlInference { - interface Queued extends Hume.expressionMeasurement.StateTlInferenceQueued { + interface Queued extends Hume.expressionMeasurement.batch.StateTlInferenceQueued { status: "QUEUED"; } - interface InProgress extends Hume.expressionMeasurement.StateTlInferenceInProgress { + interface InProgress extends Hume.expressionMeasurement.batch.StateTlInferenceInProgress { status: "IN_PROGRESS"; } - interface Completed extends Hume.expressionMeasurement.StateTlInferenceCompletedTlInference { + interface Completed extends Hume.expressionMeasurement.batch.StateTlInferenceCompletedTlInference { status: "COMPLETED"; } - interface Failed extends Hume.expressionMeasurement.StateTlInferenceFailed { + interface Failed extends Hume.expressionMeasurement.batch.StateTlInferenceFailed { status: "FAILED"; } } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceCompletedTlInference.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceCompletedTlInference.ts index e1a51867..92036474 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceCompletedTlInference.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceCompletedTlInference.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface StateTlInferenceCompletedTlInference extends Hume.expressionMeasurement.CompletedTlInference {} +export interface StateTlInferenceCompletedTlInference extends Hume.expressionMeasurement.batch.CompletedTlInference {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceFailed.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceFailed.ts index ebdca312..1416d477 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceFailed.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceFailed.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface StateTlInferenceFailed extends Hume.expressionMeasurement.Failed {} +export interface StateTlInferenceFailed extends Hume.expressionMeasurement.batch.Failed {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceInProgress.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceInProgress.ts index 0527d3b7..32e2176a 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceInProgress.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceInProgress.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface StateTlInferenceInProgress extends Hume.expressionMeasurement.InProgress {} +export interface StateTlInferenceInProgress extends Hume.expressionMeasurement.batch.InProgress {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceQueued.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceQueued.ts index b61f8470..d74855dc 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceQueued.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateTlInferenceQueued.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface StateTlInferenceQueued extends Hume.expressionMeasurement.Queued {} +export interface StateTlInferenceQueued extends Hume.expressionMeasurement.batch.Queued {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateTraining.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateTraining.ts index 799ce8ca..cf6e2a10 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateTraining.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateTraining.ts @@ -5,25 +5,25 @@ import * as Hume from "../../../../../index"; export type StateTraining = - | Hume.expressionMeasurement.StateTraining.Queued - | Hume.expressionMeasurement.StateTraining.InProgress - | Hume.expressionMeasurement.StateTraining.Completed - | Hume.expressionMeasurement.StateTraining.Failed; + | Hume.expressionMeasurement.batch.StateTraining.Queued + | Hume.expressionMeasurement.batch.StateTraining.InProgress + | Hume.expressionMeasurement.batch.StateTraining.Completed + | Hume.expressionMeasurement.batch.StateTraining.Failed; export declare namespace StateTraining { - interface Queued extends Hume.expressionMeasurement.StateTrainingQueued { + interface Queued extends Hume.expressionMeasurement.batch.StateTrainingQueued { status: "QUEUED"; } - interface InProgress extends Hume.expressionMeasurement.StateTrainingInProgress { + interface InProgress extends Hume.expressionMeasurement.batch.StateTrainingInProgress { status: "IN_PROGRESS"; } - interface Completed extends Hume.expressionMeasurement.StateTrainingCompletedTraining { + interface Completed extends Hume.expressionMeasurement.batch.StateTrainingCompletedTraining { status: "COMPLETED"; } - interface Failed extends Hume.expressionMeasurement.StateTrainingFailed { + interface Failed extends Hume.expressionMeasurement.batch.StateTrainingFailed { status: "FAILED"; } } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingCompletedTraining.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingCompletedTraining.ts index ec508318..8cf5a3b3 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingCompletedTraining.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingCompletedTraining.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface StateTrainingCompletedTraining extends Hume.expressionMeasurement.CompletedTraining {} +export interface StateTrainingCompletedTraining extends Hume.expressionMeasurement.batch.CompletedTraining {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingFailed.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingFailed.ts index 2378392c..64737b9a 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingFailed.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingFailed.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface StateTrainingFailed extends Hume.expressionMeasurement.Failed {} +export interface StateTrainingFailed extends Hume.expressionMeasurement.batch.Failed {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingInProgress.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingInProgress.ts index ca56215c..8c3a6f9d 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingInProgress.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingInProgress.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface StateTrainingInProgress extends Hume.expressionMeasurement.InProgress {} +export interface StateTrainingInProgress extends Hume.expressionMeasurement.batch.InProgress {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingQueued.ts b/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingQueued.ts index 13a8c8a8..2d448cd6 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingQueued.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/StateTrainingQueued.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export interface StateTrainingQueued extends Hume.expressionMeasurement.Queued {} +export interface StateTrainingQueued extends Hume.expressionMeasurement.batch.Queued {} diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/Task.ts b/src/api/resources/expressionMeasurement/resources/batch/types/Task.ts index 7687bc4b..55961e09 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/Task.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/Task.ts @@ -4,14 +4,16 @@ import * as Hume from "../../../../../index"; -export type Task = Hume.expressionMeasurement.Task.Classification | Hume.expressionMeasurement.Task.Regression; +export type Task = + | Hume.expressionMeasurement.batch.Task.Classification + | Hume.expressionMeasurement.batch.Task.Regression; export declare namespace Task { - interface Classification extends Hume.expressionMeasurement.TaskClassification { + interface Classification extends Hume.expressionMeasurement.batch.TaskClassification { type: "classification"; } - interface Regression extends Hume.expressionMeasurement.TaskRegression { + interface Regression extends Hume.expressionMeasurement.batch.TaskRegression { type: "regression"; } } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/TlInferenceBaseRequest.ts b/src/api/resources/expressionMeasurement/resources/batch/types/TlInferenceBaseRequest.ts index 986ed92f..2019fb75 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/TlInferenceBaseRequest.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/TlInferenceBaseRequest.ts @@ -5,7 +5,7 @@ import * as Hume from "../../../../../index"; export interface TlInferenceBaseRequest { - customModel: Hume.expressionMeasurement.CustomModel; + customModel: Hume.expressionMeasurement.batch.CustomModel; /** * URLs to the media files to be processed. Each must be a valid public URL to a media file (see recommended input filetypes) or an archive (`.zip`, `.tar.gz`, `.tar.bz2`, `.tar.xz`) of media files. * diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/TlInferencePrediction.ts b/src/api/resources/expressionMeasurement/resources/batch/types/TlInferencePrediction.ts index 0a91d216..54ec8aeb 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/TlInferencePrediction.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/TlInferencePrediction.ts @@ -8,5 +8,5 @@ export interface TlInferencePrediction { /** A file path relative to the top level source URL or file. */ file: string; fileType: string; - customModels: Record; + customModels: Record; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/TlInferenceResults.ts b/src/api/resources/expressionMeasurement/resources/batch/types/TlInferenceResults.ts index dd370100..54d6eebf 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/TlInferenceResults.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/TlInferenceResults.ts @@ -5,6 +5,6 @@ import * as Hume from "../../../../../index"; export interface TlInferenceResults { - predictions: Hume.expressionMeasurement.TlInferencePrediction[]; - errors: Hume.expressionMeasurement.Error_[]; + predictions: Hume.expressionMeasurement.batch.TlInferencePrediction[]; + errors: Hume.expressionMeasurement.batch.Error_[]; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/TlInferenceSourcePredictResult.ts b/src/api/resources/expressionMeasurement/resources/batch/types/TlInferenceSourcePredictResult.ts index 136c17ea..4f88d903 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/TlInferenceSourcePredictResult.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/TlInferenceSourcePredictResult.ts @@ -5,8 +5,8 @@ import * as Hume from "../../../../../index"; export interface TlInferenceSourcePredictResult { - source: Hume.expressionMeasurement.Source; - results?: Hume.expressionMeasurement.TlInferenceResults; + source: Hume.expressionMeasurement.batch.Source; + results?: Hume.expressionMeasurement.batch.TlInferenceResults; /** An error message. */ error?: string; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/TrainingBaseRequest.ts b/src/api/resources/expressionMeasurement/resources/batch/types/TrainingBaseRequest.ts index 9820e515..640ba266 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/TrainingBaseRequest.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/TrainingBaseRequest.ts @@ -5,12 +5,12 @@ import * as Hume from "../../../../../index"; export interface TrainingBaseRequest { - customModel: Hume.expressionMeasurement.CustomModelRequest; - dataset: Hume.expressionMeasurement.Dataset; + customModel: Hume.expressionMeasurement.batch.CustomModelRequest; + dataset: Hume.expressionMeasurement.batch.Dataset; targetFeature?: string; - task?: Hume.expressionMeasurement.Task; - evaluation?: Hume.expressionMeasurement.EvaluationArgs; - alternatives?: Hume.expressionMeasurement.Alternative[]; + task?: Hume.expressionMeasurement.batch.Task; + evaluation?: Hume.expressionMeasurement.batch.EvaluationArgs; + alternatives?: Hume.expressionMeasurement.batch.Alternative[]; callbackUrl?: string; notify?: boolean; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/Transcription.ts b/src/api/resources/expressionMeasurement/resources/batch/types/Transcription.ts index b7a82398..b21490d9 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/Transcription.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/Transcription.ts @@ -45,7 +45,7 @@ export interface Transcription { * - Turkish: `tr` * - Ukrainian: `uk` */ - language?: Hume.expressionMeasurement.Bcp47Tag; + language?: Hume.expressionMeasurement.batch.Bcp47Tag; /** Whether to return identifiers for speakers over time. If `true`, unique identifiers will be assigned to spoken words to differentiate different speakers. If `false`, all speakers will be tagged with an `unknown` ID. */ identifySpeakers?: boolean; /** Transcript confidence threshold. Transcripts generated with a confidence less than this threshold will be considered invalid and not used as an input for model inference. */ diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/TranscriptionMetadata.ts b/src/api/resources/expressionMeasurement/resources/batch/types/TranscriptionMetadata.ts index cd1a333d..9631e222 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/TranscriptionMetadata.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/TranscriptionMetadata.ts @@ -10,5 +10,5 @@ import * as Hume from "../../../../../index"; export interface TranscriptionMetadata { /** Value between `0.0` and `1.0` indicating our transcription model's relative confidence in the transcription of your media file. */ confidence: number; - detectedLanguage?: Hume.expressionMeasurement.Bcp47Tag; + detectedLanguage?: Hume.expressionMeasurement.batch.Bcp47Tag; } diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/UnionJob.ts b/src/api/resources/expressionMeasurement/resources/batch/types/UnionJob.ts index 49b0f821..5cfeb7da 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/UnionJob.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/UnionJob.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export type UnionJob = Hume.expressionMeasurement.InferenceJob; +export type UnionJob = Hume.expressionMeasurement.batch.InferenceJob; diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/UnionPredictResult.ts b/src/api/resources/expressionMeasurement/resources/batch/types/UnionPredictResult.ts index 503acaf8..3a79c39b 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/UnionPredictResult.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/UnionPredictResult.ts @@ -4,4 +4,4 @@ import * as Hume from "../../../../../index"; -export type UnionPredictResult = Hume.expressionMeasurement.InferenceSourcePredictResult; +export type UnionPredictResult = Hume.expressionMeasurement.batch.InferenceSourcePredictResult; diff --git a/src/api/resources/expressionMeasurement/resources/batch/types/ValidationArgs.ts b/src/api/resources/expressionMeasurement/resources/batch/types/ValidationArgs.ts index 490971fa..05940ae8 100644 --- a/src/api/resources/expressionMeasurement/resources/batch/types/ValidationArgs.ts +++ b/src/api/resources/expressionMeasurement/resources/batch/types/ValidationArgs.ts @@ -5,5 +5,5 @@ import * as Hume from "../../../../../index"; export interface ValidationArgs { - positiveLabel?: Hume.expressionMeasurement.Target; + positiveLabel?: Hume.expressionMeasurement.batch.Target; } diff --git a/src/api/resources/expressionMeasurement/resources/index.ts b/src/api/resources/expressionMeasurement/resources/index.ts index f7cee85c..99457488 100644 --- a/src/api/resources/expressionMeasurement/resources/index.ts +++ b/src/api/resources/expressionMeasurement/resources/index.ts @@ -1,5 +1,2 @@ export * as batch from "./batch"; -export * from "./batch/types"; export * as stream from "./stream"; -export * from "./stream/types"; -export * from "./batch/client/requests"; diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/Config.ts b/src/api/resources/expressionMeasurement/resources/stream/types/Config.ts index 588757de..8a38526b 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/Config.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/Config.ts @@ -21,7 +21,7 @@ export interface Config { * * Note: Using the `reset_stream` parameter does not have any effect on face identification. A single face identifier cache is maintained over a full session whether `reset_stream` is used or not. */ - face?: Hume.expressionMeasurement.StreamModelsEndpointPayloadModelsFace; + face?: Hume.expressionMeasurement.stream.StreamModelsEndpointPayloadModelsFace; /** * Configuration for the facemesh emotion model. * @@ -31,7 +31,7 @@ export interface Config { */ facemesh?: Record; /** Configuration for the language emotion model. */ - language?: Hume.expressionMeasurement.StreamModelsEndpointPayloadModelsLanguage; + language?: Hume.expressionMeasurement.stream.StreamModelsEndpointPayloadModelsLanguage; /** * Configuration for the speech prosody emotion model. * diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/EmotionEmbedding.ts b/src/api/resources/expressionMeasurement/resources/stream/types/EmotionEmbedding.ts index 1769b493..2e9052da 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/EmotionEmbedding.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/EmotionEmbedding.ts @@ -7,4 +7,4 @@ import * as Hume from "../../../../../index"; /** * A high-dimensional embedding in emotion space. */ -export type EmotionEmbedding = Hume.expressionMeasurement.EmotionEmbeddingItem[]; +export type EmotionEmbedding = Hume.expressionMeasurement.stream.EmotionEmbeddingItem[]; diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/Sentiment.ts b/src/api/resources/expressionMeasurement/resources/stream/types/Sentiment.ts index 3ae09d6c..88537364 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/Sentiment.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/Sentiment.ts @@ -9,4 +9,4 @@ import * as Hume from "../../../../../index"; * * Compared to returning one estimate of sentiment, this enables a more nuanced analysis of a text's meaning. For example, a text with very neutral sentiment would have an average rating of 5. But also a text that could be interpreted as having very positive sentiment or very negative sentiment would also have an average rating of 5. The average sentiment is less informative than the distribution over sentiment, so this API returns a value for each sentiment level. */ -export type Sentiment = Hume.expressionMeasurement.SentimentItem[]; +export type Sentiment = Hume.expressionMeasurement.stream.SentimentItem[]; diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/StreamErrorMessage.ts b/src/api/resources/expressionMeasurement/resources/stream/types/StreamErrorMessage.ts index ad1c1c2d..b474023a 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/StreamErrorMessage.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/StreamErrorMessage.ts @@ -15,5 +15,5 @@ export interface StreamErrorMessage { /** If a payload ID was passed in the request, the same payload ID will be sent back in the response body. */ payloadId?: string; /** If the job_details flag was set in the request, details about the current streaming job will be returned in the response body. */ - jobDetails?: Hume.expressionMeasurement.JobDetails; + jobDetails?: Hume.expressionMeasurement.stream.JobDetails; } diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurst.ts b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurst.ts index 16ff57a8..1895925e 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurst.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurst.ts @@ -8,5 +8,5 @@ import * as Hume from "../../../../../index"; * Response for the vocal burst emotion model. */ export interface StreamModelPredictionsBurst { - predictions?: Hume.expressionMeasurement.StreamModelPredictionsBurstPredictionsItem[]; + predictions?: Hume.expressionMeasurement.stream.StreamModelPredictionsBurstPredictionsItem[]; } diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurstPredictionsItem.ts b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurstPredictionsItem.ts index 8fc722e5..47f95081 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurstPredictionsItem.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurstPredictionsItem.ts @@ -5,6 +5,6 @@ import * as Hume from "../../../../../index"; export interface StreamModelPredictionsBurstPredictionsItem { - time?: Hume.expressionMeasurement.TimeRange; - emotions?: Hume.expressionMeasurement.EmotionEmbedding; + time?: Hume.expressionMeasurement.stream.TimeRange; + emotions?: Hume.expressionMeasurement.stream.EmotionEmbedding; } diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFace.ts b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFace.ts index 436b4473..bbb126b3 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFace.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFace.ts @@ -8,5 +8,5 @@ import * as Hume from "../../../../../index"; * Response for the facial expression emotion model. */ export interface StreamModelPredictionsFace { - predictions?: Hume.expressionMeasurement.StreamModelPredictionsFacePredictionsItem[]; + predictions?: Hume.expressionMeasurement.stream.StreamModelPredictionsFacePredictionsItem[]; } diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts index 5faf4c0b..fe5703be 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts @@ -9,12 +9,12 @@ export interface StreamModelPredictionsFacePredictionsItem { frame?: number; /** Time in seconds when face detection occurred. */ time?: number; - bbox?: Hume.expressionMeasurement.StreamBoundingBox; + bbox?: Hume.expressionMeasurement.stream.StreamBoundingBox; /** The predicted probability that a detected face was actually a face. */ prob?: number; /** Identifier for a face. Not that this defaults to `unknown` unless face identification is enabled in the face model configuration. */ faceId?: string; - emotions?: Hume.expressionMeasurement.EmotionEmbedding; - facs?: Hume.expressionMeasurement.EmotionEmbedding; - descriptions?: Hume.expressionMeasurement.EmotionEmbedding; + emotions?: Hume.expressionMeasurement.stream.EmotionEmbedding; + facs?: Hume.expressionMeasurement.stream.EmotionEmbedding; + descriptions?: Hume.expressionMeasurement.stream.EmotionEmbedding; } diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemesh.ts b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemesh.ts index e131b809..3d1a2f55 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemesh.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemesh.ts @@ -8,5 +8,5 @@ import * as Hume from "../../../../../index"; * Response for the facemesh emotion model. */ export interface StreamModelPredictionsFacemesh { - predictions?: Hume.expressionMeasurement.StreamModelPredictionsFacemeshPredictionsItem[]; + predictions?: Hume.expressionMeasurement.stream.StreamModelPredictionsFacemeshPredictionsItem[]; } diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemeshPredictionsItem.ts b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemeshPredictionsItem.ts index 484d57ea..1a76930c 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemeshPredictionsItem.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemeshPredictionsItem.ts @@ -5,5 +5,5 @@ import * as Hume from "../../../../../index"; export interface StreamModelPredictionsFacemeshPredictionsItem { - emotions?: Hume.expressionMeasurement.EmotionEmbedding; + emotions?: Hume.expressionMeasurement.stream.EmotionEmbedding; } diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguage.ts b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguage.ts index fe3b0770..b3eff197 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguage.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguage.ts @@ -8,5 +8,5 @@ import * as Hume from "../../../../../index"; * Response for the language emotion model. */ export interface StreamModelPredictionsLanguage { - predictions?: Hume.expressionMeasurement.StreamModelPredictionsLanguagePredictionsItem[]; + predictions?: Hume.expressionMeasurement.stream.StreamModelPredictionsLanguagePredictionsItem[]; } diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguagePredictionsItem.ts b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguagePredictionsItem.ts index cda959d9..234f0dd9 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguagePredictionsItem.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguagePredictionsItem.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../index"; export interface StreamModelPredictionsLanguagePredictionsItem { /** A segment of text (like a word or a sentence). */ text?: string; - position?: Hume.expressionMeasurement.TextPosition; - emotions?: Hume.expressionMeasurement.EmotionEmbedding; - sentiment?: Hume.expressionMeasurement.Sentiment; - toxicity?: Hume.expressionMeasurement.Toxicity; + position?: Hume.expressionMeasurement.stream.TextPosition; + emotions?: Hume.expressionMeasurement.stream.EmotionEmbedding; + sentiment?: Hume.expressionMeasurement.stream.Sentiment; + toxicity?: Hume.expressionMeasurement.stream.Toxicity; } diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsody.ts b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsody.ts index 188d6342..58a3c68d 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsody.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsody.ts @@ -8,5 +8,5 @@ import * as Hume from "../../../../../index"; * Response for the speech prosody emotion model. */ export interface StreamModelPredictionsProsody { - predictions?: Hume.expressionMeasurement.StreamModelPredictionsProsodyPredictionsItem[]; + predictions?: Hume.expressionMeasurement.stream.StreamModelPredictionsProsodyPredictionsItem[]; } diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsodyPredictionsItem.ts b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsodyPredictionsItem.ts index fdf20142..92b4d67c 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsodyPredictionsItem.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsodyPredictionsItem.ts @@ -5,6 +5,6 @@ import * as Hume from "../../../../../index"; export interface StreamModelPredictionsProsodyPredictionsItem { - time?: Hume.expressionMeasurement.TimeRange; - emotions?: Hume.expressionMeasurement.EmotionEmbedding; + time?: Hume.expressionMeasurement.stream.TimeRange; + emotions?: Hume.expressionMeasurement.stream.EmotionEmbedding; } diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayload.ts b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayload.ts index 16aa5d64..b9988395 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayload.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayload.ts @@ -10,7 +10,7 @@ import * as Hume from "../../../../../index"; export interface StreamModelsEndpointPayload { data?: string; /** Configuration used to specify which models should be used and with what settings. */ - models?: Hume.expressionMeasurement.Config; + models?: Hume.expressionMeasurement.stream.Config; /** * Length in milliseconds of streaming sliding window. * diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/StreamWarningMessage.ts b/src/api/resources/expressionMeasurement/resources/stream/types/StreamWarningMessage.ts index 6e059126..e91e1294 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/StreamWarningMessage.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/StreamWarningMessage.ts @@ -15,5 +15,5 @@ export interface StreamWarningMessage { /** If a payload ID was passed in the request, the same payload ID will be sent back in the response body. */ payloadId?: string; /** If the job_details flag was set in the request, details about the current streaming job will be returned in the response body. */ - jobDetails?: Hume.expressionMeasurement.StreamWarningMessageJobDetails; + jobDetails?: Hume.expressionMeasurement.stream.StreamWarningMessageJobDetails; } diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/SubscribeEvent.ts b/src/api/resources/expressionMeasurement/resources/stream/types/SubscribeEvent.ts index a5771b5a..eca296c5 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/SubscribeEvent.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/SubscribeEvent.ts @@ -7,10 +7,10 @@ import * as Hume from "../../../../../index"; export type SubscribeEvent = /** * Model predictions */ - | Hume.expressionMeasurement.Config + | Hume.expressionMeasurement.stream.Config /** * Error message */ - | Hume.expressionMeasurement.StreamErrorMessage + | Hume.expressionMeasurement.stream.StreamErrorMessage /** * Warning message */ - | Hume.expressionMeasurement.StreamWarningMessage; + | Hume.expressionMeasurement.stream.StreamWarningMessage; diff --git a/src/api/resources/expressionMeasurement/resources/stream/types/Toxicity.ts b/src/api/resources/expressionMeasurement/resources/stream/types/Toxicity.ts index 22848ef4..5a44c9c1 100644 --- a/src/api/resources/expressionMeasurement/resources/stream/types/Toxicity.ts +++ b/src/api/resources/expressionMeasurement/resources/stream/types/Toxicity.ts @@ -7,4 +7,4 @@ import * as Hume from "../../../../../index"; /** * Toxicity predictions returned as probabilities that the text can be classified into the following categories: toxic, severe_toxic, obscene, threat, insult, and identity_hate. */ -export type Toxicity = Hume.expressionMeasurement.ToxicityItem[]; +export type Toxicity = Hume.expressionMeasurement.stream.ToxicityItem[]; diff --git a/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts b/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts index 70b2e8de..888d5f41 100644 --- a/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts +++ b/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfig.ts @@ -5,7 +5,7 @@ import * as serializers from "../../../../../../index"; import * as Hume from "../../../../../../../api/index"; import * as core from "../../../../../../../core"; -import { PostedPromptSpec } from "../../../../types/PostedPromptSpec"; +import { PostedConfigPromptSpec } from "../../../../types/PostedConfigPromptSpec"; import { PostedVoice } from "../../../../types/PostedVoice"; import { PostedLanguageModel } from "../../../../types/PostedLanguageModel"; import { PostedEllmModel } from "../../../../types/PostedEllmModel"; @@ -18,9 +18,10 @@ export const PostedConfig: core.serialization.Schema< serializers.empathicVoice.PostedConfig.Raw, Hume.empathicVoice.PostedConfig > = core.serialization.object({ + eviVersion: core.serialization.property("evi_version", core.serialization.string()), name: core.serialization.string(), versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), - prompt: PostedPromptSpec.optional(), + prompt: PostedConfigPromptSpec.optional(), voice: PostedVoice.optional(), languageModel: core.serialization.property("language_model", PostedLanguageModel.optional()), ellmModel: core.serialization.property("ellm_model", PostedEllmModel.optional()), @@ -35,9 +36,10 @@ export const PostedConfig: core.serialization.Schema< export declare namespace PostedConfig { interface Raw { + evi_version: string; name: string; version_description?: string | null; - prompt?: PostedPromptSpec.Raw | null; + prompt?: PostedConfigPromptSpec.Raw | null; voice?: PostedVoice.Raw | null; language_model?: PostedLanguageModel.Raw | null; ellm_model?: PostedEllmModel.Raw | null; diff --git a/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts b/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts index bf8504b0..40482bbe 100644 --- a/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts +++ b/src/serialization/resources/empathicVoice/resources/configs/client/requests/PostedConfigVersion.ts @@ -5,7 +5,7 @@ import * as serializers from "../../../../../../index"; import * as Hume from "../../../../../../../api/index"; import * as core from "../../../../../../../core"; -import { PostedPromptSpec } from "../../../../types/PostedPromptSpec"; +import { PostedConfigPromptSpec } from "../../../../types/PostedConfigPromptSpec"; import { PostedVoice } from "../../../../types/PostedVoice"; import { PostedLanguageModel } from "../../../../types/PostedLanguageModel"; import { PostedEllmModel } from "../../../../types/PostedEllmModel"; @@ -18,8 +18,9 @@ export const PostedConfigVersion: core.serialization.Schema< serializers.empathicVoice.PostedConfigVersion.Raw, Hume.empathicVoice.PostedConfigVersion > = core.serialization.object({ + eviVersion: core.serialization.property("evi_version", core.serialization.string()), versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), - prompt: PostedPromptSpec.optional(), + prompt: PostedConfigPromptSpec.optional(), voice: PostedVoice.optional(), languageModel: core.serialization.property("language_model", PostedLanguageModel.optional()), ellmModel: core.serialization.property("ellm_model", PostedEllmModel.optional()), @@ -34,8 +35,9 @@ export const PostedConfigVersion: core.serialization.Schema< export declare namespace PostedConfigVersion { interface Raw { + evi_version: string; version_description?: string | null; - prompt?: PostedPromptSpec.Raw | null; + prompt?: PostedConfigPromptSpec.Raw | null; voice?: PostedVoice.Raw | null; language_model?: PostedLanguageModel.Raw | null; ellm_model?: PostedEllmModel.Raw | null; diff --git a/src/serialization/resources/empathicVoice/resources/customVoices/client/index.ts b/src/serialization/resources/empathicVoice/resources/customVoices/client/index.ts new file mode 100644 index 00000000..415726b7 --- /dev/null +++ b/src/serialization/resources/empathicVoice/resources/customVoices/client/index.ts @@ -0,0 +1 @@ +export * from "./requests"; diff --git a/src/serialization/resources/empathicVoice/types/PostedCustomVoiceName.ts b/src/serialization/resources/empathicVoice/resources/customVoices/client/requests/PostedCustomVoiceName.ts similarity index 60% rename from src/serialization/resources/empathicVoice/types/PostedCustomVoiceName.ts rename to src/serialization/resources/empathicVoice/resources/customVoices/client/requests/PostedCustomVoiceName.ts index 330a9732..8d770b3a 100644 --- a/src/serialization/resources/empathicVoice/types/PostedCustomVoiceName.ts +++ b/src/serialization/resources/empathicVoice/resources/customVoices/client/requests/PostedCustomVoiceName.ts @@ -2,11 +2,11 @@ * This file was auto-generated by Fern from our API Definition. */ -import * as serializers from "../../../index"; -import * as Hume from "../../../../api/index"; -import * as core from "../../../../core"; +import * as serializers from "../../../../../../index"; +import * as Hume from "../../../../../../../api/index"; +import * as core from "../../../../../../../core"; -export const PostedCustomVoiceName: core.serialization.ObjectSchema< +export const PostedCustomVoiceName: core.serialization.Schema< serializers.empathicVoice.PostedCustomVoiceName.Raw, Hume.empathicVoice.PostedCustomVoiceName > = core.serialization.object({ diff --git a/src/serialization/resources/empathicVoice/resources/customVoices/client/requests/index.ts b/src/serialization/resources/empathicVoice/resources/customVoices/client/requests/index.ts new file mode 100644 index 00000000..41dd2c73 --- /dev/null +++ b/src/serialization/resources/empathicVoice/resources/customVoices/client/requests/index.ts @@ -0,0 +1 @@ +export { PostedCustomVoiceName } from "./PostedCustomVoiceName"; diff --git a/src/serialization/resources/empathicVoice/resources/customVoices/index.ts b/src/serialization/resources/empathicVoice/resources/customVoices/index.ts new file mode 100644 index 00000000..5ec76921 --- /dev/null +++ b/src/serialization/resources/empathicVoice/resources/customVoices/index.ts @@ -0,0 +1 @@ +export * from "./client"; diff --git a/src/serialization/resources/empathicVoice/resources/index.ts b/src/serialization/resources/empathicVoice/resources/index.ts index 48f1eddb..89364bcd 100644 --- a/src/serialization/resources/empathicVoice/resources/index.ts +++ b/src/serialization/resources/empathicVoice/resources/index.ts @@ -4,5 +4,7 @@ export * as tools from "./tools"; export * as prompts from "./prompts"; export * from "./tools/client/requests"; export * from "./prompts/client/requests"; +export * as customVoices from "./customVoices"; +export * from "./customVoices/client/requests"; export * as configs from "./configs"; export * from "./configs/client/requests"; diff --git a/src/serialization/resources/empathicVoice/types/ErrorResponse.ts b/src/serialization/resources/empathicVoice/types/ErrorResponse.ts new file mode 100644 index 00000000..d4fc3e5a --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/ErrorResponse.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Hume from "../../../../api/index"; +import * as core from "../../../../core"; + +export const ErrorResponse: core.serialization.ObjectSchema< + serializers.empathicVoice.ErrorResponse.Raw, + Hume.empathicVoice.ErrorResponse +> = core.serialization.object({ + error: core.serialization.string().optional(), + message: core.serialization.string().optional(), +}); + +export declare namespace ErrorResponse { + interface Raw { + error?: string | null; + message?: string | null; + } +} diff --git a/src/serialization/resources/empathicVoice/types/PostedConfigPromptSpec.ts b/src/serialization/resources/empathicVoice/types/PostedConfigPromptSpec.ts new file mode 100644 index 00000000..37894222 --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/PostedConfigPromptSpec.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Hume from "../../../../api/index"; +import * as core from "../../../../core"; + +export const PostedConfigPromptSpec: core.serialization.ObjectSchema< + serializers.empathicVoice.PostedConfigPromptSpec.Raw, + Hume.empathicVoice.PostedConfigPromptSpec +> = core.serialization.object({ + id: core.serialization.string().optional(), + version: core.serialization.number().optional(), + text: core.serialization.string().optional(), +}); + +export declare namespace PostedConfigPromptSpec { + interface Raw { + id?: string | null; + version?: number | null; + text?: string | null; + } +} diff --git a/src/serialization/resources/empathicVoice/types/PostedCustomVoice.ts b/src/serialization/resources/empathicVoice/types/PostedCustomVoice.ts index 87f4dee5..8d8e8b02 100644 --- a/src/serialization/resources/empathicVoice/types/PostedCustomVoice.ts +++ b/src/serialization/resources/empathicVoice/types/PostedCustomVoice.ts @@ -5,26 +5,27 @@ import * as serializers from "../../../index"; import * as Hume from "../../../../api/index"; import * as core from "../../../../core"; +import { PostedCustomVoiceBaseVoice } from "./PostedCustomVoiceBaseVoice"; +import { PostedCustomVoiceParameters } from "./PostedCustomVoiceParameters"; export const PostedCustomVoice: core.serialization.ObjectSchema< serializers.empathicVoice.PostedCustomVoice.Raw, Hume.empathicVoice.PostedCustomVoice > = core.serialization.object({ name: core.serialization.string(), - baseVoice: core.serialization.property("base_voice", core.serialization.string()), - speechRateMultiplier: core.serialization.property("speech_rate_multiplier", core.serialization.number().optional()), - parameterModel: core.serialization.property("parameter_model", core.serialization.string()), - parameters: core.serialization - .record(core.serialization.string(), core.serialization.number().optional()) - .optional(), + baseVoice: core.serialization.property("base_voice", PostedCustomVoiceBaseVoice), + parameterModel: core.serialization.property( + "parameter_model", + core.serialization.stringLiteral("20240715-4parameter") + ), + parameters: PostedCustomVoiceParameters.optional(), }); export declare namespace PostedCustomVoice { interface Raw { name: string; - base_voice: string; - speech_rate_multiplier?: number | null; - parameter_model: string; - parameters?: Record | null; + base_voice: PostedCustomVoiceBaseVoice.Raw; + parameter_model: "20240715-4parameter"; + parameters?: PostedCustomVoiceParameters.Raw | null; } } diff --git a/src/serialization/resources/empathicVoice/types/PostedCustomVoiceBaseVoice.ts b/src/serialization/resources/empathicVoice/types/PostedCustomVoiceBaseVoice.ts new file mode 100644 index 00000000..652a8217 --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/PostedCustomVoiceBaseVoice.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Hume from "../../../../api/index"; +import * as core from "../../../../core"; + +export const PostedCustomVoiceBaseVoice: core.serialization.Schema< + serializers.empathicVoice.PostedCustomVoiceBaseVoice.Raw, + Hume.empathicVoice.PostedCustomVoiceBaseVoice +> = core.serialization.enum_(["ITO", "KORA", "DACHER", "AURA", "FINN", "STELLA", "WHIMSY"]); + +export declare namespace PostedCustomVoiceBaseVoice { + type Raw = "ITO" | "KORA" | "DACHER" | "AURA" | "FINN" | "STELLA" | "WHIMSY"; +} diff --git a/src/serialization/resources/empathicVoice/types/PostedCustomVoiceParameters.ts b/src/serialization/resources/empathicVoice/types/PostedCustomVoiceParameters.ts new file mode 100644 index 00000000..90a722f9 --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/PostedCustomVoiceParameters.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Hume from "../../../../api/index"; +import * as core from "../../../../core"; + +export const PostedCustomVoiceParameters: core.serialization.ObjectSchema< + serializers.empathicVoice.PostedCustomVoiceParameters.Raw, + Hume.empathicVoice.PostedCustomVoiceParameters +> = core.serialization.object({ + gender: core.serialization.number().optional(), + huskiness: core.serialization.number().optional(), + nasality: core.serialization.number().optional(), + pitch: core.serialization.number().optional(), +}); + +export declare namespace PostedCustomVoiceParameters { + interface Raw { + gender?: number | null; + huskiness?: number | null; + nasality?: number | null; + pitch?: number | null; + } +} diff --git a/src/serialization/resources/empathicVoice/types/PostedPromptSpec.ts b/src/serialization/resources/empathicVoice/types/PostedPromptSpec.ts index b632f85c..e3b4f221 100644 --- a/src/serialization/resources/empathicVoice/types/PostedPromptSpec.ts +++ b/src/serialization/resources/empathicVoice/types/PostedPromptSpec.ts @@ -10,13 +10,11 @@ export const PostedPromptSpec: core.serialization.ObjectSchema< serializers.empathicVoice.PostedPromptSpec.Raw, Hume.empathicVoice.PostedPromptSpec > = core.serialization.object({ - id: core.serialization.string(), - version: core.serialization.number().optional(), + version: core.serialization.unknown().optional(), }); export declare namespace PostedPromptSpec { interface Raw { - id: string; - version?: number | null; + version?: unknown | null; } } diff --git a/src/serialization/resources/empathicVoice/types/PostedVoice.ts b/src/serialization/resources/empathicVoice/types/PostedVoice.ts index e91101d0..41cf0d67 100644 --- a/src/serialization/resources/empathicVoice/types/PostedVoice.ts +++ b/src/serialization/resources/empathicVoice/types/PostedVoice.ts @@ -5,19 +5,21 @@ import * as serializers from "../../../index"; import * as Hume from "../../../../api/index"; import * as core from "../../../../core"; -import { PostedVoiceName } from "./PostedVoiceName"; +import { PostedCustomVoice } from "./PostedCustomVoice"; export const PostedVoice: core.serialization.ObjectSchema< serializers.empathicVoice.PostedVoice.Raw, Hume.empathicVoice.PostedVoice > = core.serialization.object({ provider: core.serialization.stringLiteral("HUME_AI"), - name: PostedVoiceName.optional(), + name: core.serialization.string().optional(), + customVoice: core.serialization.property("custom_voice", PostedCustomVoice.optional()), }); export declare namespace PostedVoice { interface Raw { provider: "HUME_AI"; - name?: PostedVoiceName.Raw | null; + name?: string | null; + custom_voice?: PostedCustomVoice.Raw | null; } } diff --git a/src/serialization/resources/empathicVoice/types/PostedVoiceName.ts b/src/serialization/resources/empathicVoice/types/PostedVoiceName.ts deleted file mode 100644 index 28e08286..00000000 --- a/src/serialization/resources/empathicVoice/types/PostedVoiceName.ts +++ /dev/null @@ -1,16 +0,0 @@ -/** - * This file was auto-generated by Fern from our API Definition. - */ - -import * as serializers from "../../../index"; -import * as Hume from "../../../../api/index"; -import * as core from "../../../../core"; - -export const PostedVoiceName: core.serialization.Schema< - serializers.empathicVoice.PostedVoiceName.Raw, - Hume.empathicVoice.PostedVoiceName -> = core.serialization.enum_(["ITO", "DACHER", "KORA"]); - -export declare namespace PostedVoiceName { - type Raw = "ITO" | "DACHER" | "KORA"; -} diff --git a/src/serialization/resources/empathicVoice/types/ReturnConfig.ts b/src/serialization/resources/empathicVoice/types/ReturnConfig.ts index 2919ffe4..399d8e72 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnConfig.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnConfig.ts @@ -20,6 +20,7 @@ export const ReturnConfig: core.serialization.ObjectSchema< > = core.serialization.object({ id: core.serialization.string().optional(), version: core.serialization.number().optional(), + eviVersion: core.serialization.property("evi_version", core.serialization.string().optional()), versionDescription: core.serialization.property("version_description", core.serialization.string().optional()), name: core.serialization.string().optional(), createdOn: core.serialization.property("created_on", core.serialization.number().optional()), @@ -41,6 +42,7 @@ export declare namespace ReturnConfig { interface Raw { id?: string | null; version?: number | null; + evi_version?: string | null; version_description?: string | null; name?: string | null; created_on?: number | null; diff --git a/src/serialization/resources/empathicVoice/types/ReturnCustomVoice.ts b/src/serialization/resources/empathicVoice/types/ReturnCustomVoice.ts index fb331118..ab22a948 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnCustomVoice.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnCustomVoice.ts @@ -5,6 +5,8 @@ import * as serializers from "../../../index"; import * as Hume from "../../../../api/index"; import * as core from "../../../../core"; +import { ReturnCustomVoiceBaseVoice } from "./ReturnCustomVoiceBaseVoice"; +import { ReturnCustomVoiceParameters } from "./ReturnCustomVoiceParameters"; export const ReturnCustomVoice: core.serialization.ObjectSchema< serializers.empathicVoice.ReturnCustomVoice.Raw, @@ -15,10 +17,12 @@ export const ReturnCustomVoice: core.serialization.ObjectSchema< name: core.serialization.string(), createdOn: core.serialization.property("created_on", core.serialization.number()), modifiedOn: core.serialization.property("modified_on", core.serialization.number()), - baseVoice: core.serialization.property("base_voice", core.serialization.string()), - speechRateMultiplier: core.serialization.property("speech_rate_multiplier", core.serialization.number().optional()), - parameterModel: core.serialization.property("parameter_model", core.serialization.string()), - parameters: core.serialization.record(core.serialization.string(), core.serialization.number()), + baseVoice: core.serialization.property("base_voice", ReturnCustomVoiceBaseVoice), + parameterModel: core.serialization.property( + "parameter_model", + core.serialization.stringLiteral("20240715-4parameter") + ), + parameters: ReturnCustomVoiceParameters, }); export declare namespace ReturnCustomVoice { @@ -28,9 +32,8 @@ export declare namespace ReturnCustomVoice { name: string; created_on: number; modified_on: number; - base_voice: string; - speech_rate_multiplier?: number | null; - parameter_model: string; - parameters: Record; + base_voice: ReturnCustomVoiceBaseVoice.Raw; + parameter_model: "20240715-4parameter"; + parameters: ReturnCustomVoiceParameters.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/ReturnCustomVoiceBaseVoice.ts b/src/serialization/resources/empathicVoice/types/ReturnCustomVoiceBaseVoice.ts new file mode 100644 index 00000000..891cc9e9 --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/ReturnCustomVoiceBaseVoice.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Hume from "../../../../api/index"; +import * as core from "../../../../core"; + +export const ReturnCustomVoiceBaseVoice: core.serialization.Schema< + serializers.empathicVoice.ReturnCustomVoiceBaseVoice.Raw, + Hume.empathicVoice.ReturnCustomVoiceBaseVoice +> = core.serialization.enum_(["ITO", "KORA", "DACHER", "AURA", "FINN", "STELLA", "WHIMSY"]); + +export declare namespace ReturnCustomVoiceBaseVoice { + type Raw = "ITO" | "KORA" | "DACHER" | "AURA" | "FINN" | "STELLA" | "WHIMSY"; +} diff --git a/src/serialization/resources/empathicVoice/types/ReturnCustomVoiceParameters.ts b/src/serialization/resources/empathicVoice/types/ReturnCustomVoiceParameters.ts new file mode 100644 index 00000000..f5a76ad9 --- /dev/null +++ b/src/serialization/resources/empathicVoice/types/ReturnCustomVoiceParameters.ts @@ -0,0 +1,26 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../index"; +import * as Hume from "../../../../api/index"; +import * as core from "../../../../core"; + +export const ReturnCustomVoiceParameters: core.serialization.ObjectSchema< + serializers.empathicVoice.ReturnCustomVoiceParameters.Raw, + Hume.empathicVoice.ReturnCustomVoiceParameters +> = core.serialization.object({ + gender: core.serialization.number().optional(), + huskiness: core.serialization.number().optional(), + nasality: core.serialization.number().optional(), + pitch: core.serialization.number().optional(), +}); + +export declare namespace ReturnCustomVoiceParameters { + interface Raw { + gender?: number | null; + huskiness?: number | null; + nasality?: number | null; + pitch?: number | null; + } +} diff --git a/src/serialization/resources/empathicVoice/types/ReturnVoice.ts b/src/serialization/resources/empathicVoice/types/ReturnVoice.ts index f8d69cc4..a0918928 100644 --- a/src/serialization/resources/empathicVoice/types/ReturnVoice.ts +++ b/src/serialization/resources/empathicVoice/types/ReturnVoice.ts @@ -5,19 +5,21 @@ import * as serializers from "../../../index"; import * as Hume from "../../../../api/index"; import * as core from "../../../../core"; -import { ReturnVoiceName } from "./ReturnVoiceName"; +import { ReturnCustomVoice } from "./ReturnCustomVoice"; export const ReturnVoice: core.serialization.ObjectSchema< serializers.empathicVoice.ReturnVoice.Raw, Hume.empathicVoice.ReturnVoice > = core.serialization.object({ provider: core.serialization.stringLiteral("HUME_AI"), - name: ReturnVoiceName.optional(), + name: core.serialization.string().optional(), + customVoice: core.serialization.property("custom_voice", ReturnCustomVoice), }); export declare namespace ReturnVoice { interface Raw { provider: "HUME_AI"; - name?: ReturnVoiceName.Raw | null; + name?: string | null; + custom_voice: ReturnCustomVoice.Raw; } } diff --git a/src/serialization/resources/empathicVoice/types/ReturnVoiceName.ts b/src/serialization/resources/empathicVoice/types/ReturnVoiceName.ts deleted file mode 100644 index 680b4bf2..00000000 --- a/src/serialization/resources/empathicVoice/types/ReturnVoiceName.ts +++ /dev/null @@ -1,16 +0,0 @@ -/** - * This file was auto-generated by Fern from our API Definition. - */ - -import * as serializers from "../../../index"; -import * as Hume from "../../../../api/index"; -import * as core from "../../../../core"; - -export const ReturnVoiceName: core.serialization.Schema< - serializers.empathicVoice.ReturnVoiceName.Raw, - Hume.empathicVoice.ReturnVoiceName -> = core.serialization.enum_(["ITO", "DACHER", "KORA"]); - -export declare namespace ReturnVoiceName { - type Raw = "ITO" | "DACHER" | "KORA"; -} diff --git a/src/serialization/resources/empathicVoice/types/index.ts b/src/serialization/resources/empathicVoice/types/index.ts index 2768f85f..c9338062 100644 --- a/src/serialization/resources/empathicVoice/types/index.ts +++ b/src/serialization/resources/empathicVoice/types/index.ts @@ -1,24 +1,28 @@ +export * from "./ErrorResponse"; export * from "./ReturnUserDefinedToolToolType"; export * from "./ReturnUserDefinedToolVersionType"; export * from "./ReturnUserDefinedTool"; export * from "./ReturnPromptVersionType"; export * from "./ReturnPrompt"; +export * from "./PostedCustomVoiceBaseVoice"; +export * from "./PostedCustomVoiceParameters"; export * from "./PostedCustomVoice"; +export * from "./ReturnCustomVoiceBaseVoice"; +export * from "./ReturnCustomVoiceParameters"; export * from "./ReturnCustomVoice"; export * from "./PostedBuiltinToolName"; export * from "./PostedBuiltinTool"; +export * from "./PostedConfigPromptSpec"; export * from "./PostedEllmModel"; export * from "./PostedEventMessageSpec"; export * from "./PostedEventMessageSpecs"; export * from "./PostedLanguageModelModelProvider"; export * from "./PostedLanguageModel"; -export * from "./PostedPromptSpec"; export * from "./PostedTimeoutSpec"; export * from "./PostedTimeoutSpecsInactivity"; export * from "./PostedTimeoutSpecsMaxDuration"; export * from "./PostedTimeoutSpecs"; export * from "./PostedUserDefinedToolSpec"; -export * from "./PostedVoiceName"; export * from "./PostedVoice"; export * from "./ReturnBuiltinToolToolType"; export * from "./ReturnBuiltinTool"; @@ -30,9 +34,7 @@ export * from "./ReturnLanguageModelModelProvider"; export * from "./ReturnLanguageModel"; export * from "./ReturnTimeoutSpec"; export * from "./ReturnTimeoutSpecs"; -export * from "./ReturnVoiceName"; export * from "./ReturnVoice"; -export * from "./PostedCustomVoiceName"; export * from "./ReturnPagedUserDefinedTools"; export * from "./ReturnPagedPrompts"; export * from "./ReturnPagedCustomVoices"; @@ -56,6 +58,7 @@ export * from "./ReturnPagedChatGroups"; export * from "./ReturnChatGroupPagedChats"; export * from "./ReturnChatGroupPagedEventsPaginationDirection"; export * from "./ReturnChatGroupPagedEvents"; +export * from "./PostedPromptSpec"; export * from "./AssistantInput"; export * from "./AudioConfiguration"; export * from "./AudioInput"; diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/client/getJobPredictions.ts b/src/serialization/resources/expressionMeasurement/resources/batch/client/getJobPredictions.ts index f899db26..989a940b 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/client/getJobPredictions.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/client/getJobPredictions.ts @@ -9,7 +9,7 @@ import { UnionPredictResult } from "../types/UnionPredictResult"; export const Response: core.serialization.Schema< serializers.expressionMeasurement.batch.getJobPredictions.Response.Raw, - Hume.expressionMeasurement.UnionPredictResult[] + Hume.expressionMeasurement.batch.UnionPredictResult[] > = core.serialization.list(UnionPredictResult); export declare namespace Response { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/client/listJobs.ts b/src/serialization/resources/expressionMeasurement/resources/batch/client/listJobs.ts index 5e6c2834..caf27cd1 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/client/listJobs.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/client/listJobs.ts @@ -9,7 +9,7 @@ import { UnionJob } from "../types/UnionJob"; export const Response: core.serialization.Schema< serializers.expressionMeasurement.batch.listJobs.Response.Raw, - Hume.expressionMeasurement.UnionJob[] + Hume.expressionMeasurement.batch.UnionJob[] > = core.serialization.list(UnionJob); export declare namespace Response { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Alternative.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Alternative.ts index fc04816e..29e9fbf7 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Alternative.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Alternative.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Alternative: core.serialization.Schema< - serializers.expressionMeasurement.Alternative.Raw, - Hume.expressionMeasurement.Alternative + serializers.expressionMeasurement.batch.Alternative.Raw, + Hume.expressionMeasurement.batch.Alternative > = core.serialization.stringLiteral("language_only"); export declare namespace Alternative { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Bcp47Tag.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Bcp47Tag.ts index 3aba055e..40cfd8e5 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Bcp47Tag.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Bcp47Tag.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Bcp47Tag: core.serialization.Schema< - serializers.expressionMeasurement.Bcp47Tag.Raw, - Hume.expressionMeasurement.Bcp47Tag + serializers.expressionMeasurement.batch.Bcp47Tag.Raw, + Hume.expressionMeasurement.batch.Bcp47Tag > = core.serialization.enum_([ "zh", "da", diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/BoundingBox.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/BoundingBox.ts index d43e4604..d044db28 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/BoundingBox.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/BoundingBox.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const BoundingBox: core.serialization.ObjectSchema< - serializers.expressionMeasurement.BoundingBox.Raw, - Hume.expressionMeasurement.BoundingBox + serializers.expressionMeasurement.batch.BoundingBox.Raw, + Hume.expressionMeasurement.batch.BoundingBox > = core.serialization.object({ x: core.serialization.number(), y: core.serialization.number(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/BurstPrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/BurstPrediction.ts index 1a43ba3a..95ff8b93 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/BurstPrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/BurstPrediction.ts @@ -10,8 +10,8 @@ import { EmotionScore } from "./EmotionScore"; import { DescriptionsScore } from "./DescriptionsScore"; export const BurstPrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.BurstPrediction.Raw, - Hume.expressionMeasurement.BurstPrediction + serializers.expressionMeasurement.batch.BurstPrediction.Raw, + Hume.expressionMeasurement.batch.BurstPrediction > = core.serialization.object({ time: TimeInterval, emotions: core.serialization.list(EmotionScore), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Classification.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Classification.ts index 22a73c40..46771a23 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Classification.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Classification.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Classification: core.serialization.Schema< - serializers.expressionMeasurement.Classification.Raw, - Hume.expressionMeasurement.Classification + serializers.expressionMeasurement.batch.Classification.Raw, + Hume.expressionMeasurement.batch.Classification > = core.serialization.record(core.serialization.string(), core.serialization.unknown()); export declare namespace Classification { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedEmbeddingGeneration.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedEmbeddingGeneration.ts index a13ec1be..50f7420f 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedEmbeddingGeneration.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedEmbeddingGeneration.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const CompletedEmbeddingGeneration: core.serialization.ObjectSchema< - serializers.expressionMeasurement.CompletedEmbeddingGeneration.Raw, - Hume.expressionMeasurement.CompletedEmbeddingGeneration + serializers.expressionMeasurement.batch.CompletedEmbeddingGeneration.Raw, + Hume.expressionMeasurement.batch.CompletedEmbeddingGeneration > = core.serialization.object({ createdTimestampMs: core.serialization.property("created_timestamp_ms", core.serialization.number()), startedTimestampMs: core.serialization.property("started_timestamp_ms", core.serialization.number()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedInference.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedInference.ts index 6843f796..ff4a716a 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedInference.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedInference.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const CompletedInference: core.serialization.ObjectSchema< - serializers.expressionMeasurement.CompletedInference.Raw, - Hume.expressionMeasurement.CompletedInference + serializers.expressionMeasurement.batch.CompletedInference.Raw, + Hume.expressionMeasurement.batch.CompletedInference > = core.serialization.object({ createdTimestampMs: core.serialization.property("created_timestamp_ms", core.serialization.number()), startedTimestampMs: core.serialization.property("started_timestamp_ms", core.serialization.number()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedState.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedState.ts index 48db6b4e..614645c4 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedState.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedState.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { CompletedInference } from "./CompletedInference"; export const CompletedState: core.serialization.ObjectSchema< - serializers.expressionMeasurement.CompletedState.Raw, - Hume.expressionMeasurement.CompletedState + serializers.expressionMeasurement.batch.CompletedState.Raw, + Hume.expressionMeasurement.batch.CompletedState > = core.serialization.object({}).extend(CompletedInference); export declare namespace CompletedState { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedTlInference.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedTlInference.ts index 6facbe23..e0ce3713 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedTlInference.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedTlInference.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const CompletedTlInference: core.serialization.ObjectSchema< - serializers.expressionMeasurement.CompletedTlInference.Raw, - Hume.expressionMeasurement.CompletedTlInference + serializers.expressionMeasurement.batch.CompletedTlInference.Raw, + Hume.expressionMeasurement.batch.CompletedTlInference > = core.serialization.object({ createdTimestampMs: core.serialization.property("created_timestamp_ms", core.serialization.number()), startedTimestampMs: core.serialization.property("started_timestamp_ms", core.serialization.number()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedTraining.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedTraining.ts index a41c8533..bff7ab2f 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedTraining.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/CompletedTraining.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { TrainingCustomModel } from "./TrainingCustomModel"; export const CompletedTraining: core.serialization.ObjectSchema< - serializers.expressionMeasurement.CompletedTraining.Raw, - Hume.expressionMeasurement.CompletedTraining + serializers.expressionMeasurement.batch.CompletedTraining.Raw, + Hume.expressionMeasurement.batch.CompletedTraining > = core.serialization.object({ createdTimestampMs: core.serialization.property("created_timestamp_ms", core.serialization.number()), startedTimestampMs: core.serialization.property("started_timestamp_ms", core.serialization.number()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModel.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModel.ts index 402973b0..b3342f4a 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModel.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModel.ts @@ -9,8 +9,8 @@ import { CustomModelId } from "./CustomModelId"; import { CustomModelVersionId } from "./CustomModelVersionId"; export const CustomModel: core.serialization.Schema< - serializers.expressionMeasurement.CustomModel.Raw, - Hume.expressionMeasurement.CustomModel + serializers.expressionMeasurement.batch.CustomModel.Raw, + Hume.expressionMeasurement.batch.CustomModel > = core.serialization.undiscriminatedUnion([CustomModelId, CustomModelVersionId]); export declare namespace CustomModel { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelId.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelId.ts index b8fff279..7ab78bd4 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelId.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelId.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const CustomModelId: core.serialization.ObjectSchema< - serializers.expressionMeasurement.CustomModelId.Raw, - Hume.expressionMeasurement.CustomModelId + serializers.expressionMeasurement.batch.CustomModelId.Raw, + Hume.expressionMeasurement.batch.CustomModelId > = core.serialization.object({ id: core.serialization.string(), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelPrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelPrediction.ts index 4e34a834..171f5a91 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelPrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelPrediction.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const CustomModelPrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.CustomModelPrediction.Raw, - Hume.expressionMeasurement.CustomModelPrediction + serializers.expressionMeasurement.batch.CustomModelPrediction.Raw, + Hume.expressionMeasurement.batch.CustomModelPrediction > = core.serialization.object({ output: core.serialization.record(core.serialization.string(), core.serialization.number()), error: core.serialization.string(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelRequest.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelRequest.ts index 82ed6dc5..2beaff95 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelRequest.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelRequest.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Tag } from "./Tag"; export const CustomModelRequest: core.serialization.ObjectSchema< - serializers.expressionMeasurement.CustomModelRequest.Raw, - Hume.expressionMeasurement.CustomModelRequest + serializers.expressionMeasurement.batch.CustomModelRequest.Raw, + Hume.expressionMeasurement.batch.CustomModelRequest > = core.serialization.object({ name: core.serialization.string(), description: core.serialization.string().optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelVersionId.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelVersionId.ts index 026b86e1..c577ef97 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelVersionId.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelVersionId.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const CustomModelVersionId: core.serialization.ObjectSchema< - serializers.expressionMeasurement.CustomModelVersionId.Raw, - Hume.expressionMeasurement.CustomModelVersionId + serializers.expressionMeasurement.batch.CustomModelVersionId.Raw, + Hume.expressionMeasurement.batch.CustomModelVersionId > = core.serialization.object({ versionId: core.serialization.property("version_id", core.serialization.string()), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelsInferenceJob.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelsInferenceJob.ts index 5da0b9c9..07cbdc58 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelsInferenceJob.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelsInferenceJob.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { JobTlInference } from "./JobTlInference"; export const CustomModelsInferenceJob: core.serialization.ObjectSchema< - serializers.expressionMeasurement.CustomModelsInferenceJob.Raw, - Hume.expressionMeasurement.CustomModelsInferenceJob + serializers.expressionMeasurement.batch.CustomModelsInferenceJob.Raw, + Hume.expressionMeasurement.batch.CustomModelsInferenceJob > = core.serialization .object({ type: core.serialization.string(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelsTrainingJob.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelsTrainingJob.ts index 94463ceb..eb83f839 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelsTrainingJob.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/CustomModelsTrainingJob.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { JobTraining } from "./JobTraining"; export const CustomModelsTrainingJob: core.serialization.ObjectSchema< - serializers.expressionMeasurement.CustomModelsTrainingJob.Raw, - Hume.expressionMeasurement.CustomModelsTrainingJob + serializers.expressionMeasurement.batch.CustomModelsTrainingJob.Raw, + Hume.expressionMeasurement.batch.CustomModelsTrainingJob > = core.serialization .object({ type: core.serialization.string(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Dataset.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Dataset.ts index ba9f7a15..5ce71776 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Dataset.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Dataset.ts @@ -9,8 +9,8 @@ import { DatasetId } from "./DatasetId"; import { DatasetVersionId } from "./DatasetVersionId"; export const Dataset: core.serialization.Schema< - serializers.expressionMeasurement.Dataset.Raw, - Hume.expressionMeasurement.Dataset + serializers.expressionMeasurement.batch.Dataset.Raw, + Hume.expressionMeasurement.batch.Dataset > = core.serialization.undiscriminatedUnion([DatasetId, DatasetVersionId]); export declare namespace Dataset { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/DatasetId.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/DatasetId.ts index fd7894b1..720afb22 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/DatasetId.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/DatasetId.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const DatasetId: core.serialization.ObjectSchema< - serializers.expressionMeasurement.DatasetId.Raw, - Hume.expressionMeasurement.DatasetId + serializers.expressionMeasurement.batch.DatasetId.Raw, + Hume.expressionMeasurement.batch.DatasetId > = core.serialization.object({ id: core.serialization.string(), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/DatasetVersionId.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/DatasetVersionId.ts index 1401d9ff..166d4b60 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/DatasetVersionId.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/DatasetVersionId.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const DatasetVersionId: core.serialization.ObjectSchema< - serializers.expressionMeasurement.DatasetVersionId.Raw, - Hume.expressionMeasurement.DatasetVersionId + serializers.expressionMeasurement.batch.DatasetVersionId.Raw, + Hume.expressionMeasurement.batch.DatasetVersionId > = core.serialization.object({ versionId: core.serialization.property("version_id", core.serialization.string()), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/DescriptionsScore.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/DescriptionsScore.ts index 00840040..ee2f58fd 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/DescriptionsScore.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/DescriptionsScore.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const DescriptionsScore: core.serialization.ObjectSchema< - serializers.expressionMeasurement.DescriptionsScore.Raw, - Hume.expressionMeasurement.DescriptionsScore + serializers.expressionMeasurement.batch.DescriptionsScore.Raw, + Hume.expressionMeasurement.batch.DescriptionsScore > = core.serialization.object({ name: core.serialization.string(), score: core.serialization.number(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Direction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Direction.ts index e157ecb2..20321b4b 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Direction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Direction.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Direction: core.serialization.Schema< - serializers.expressionMeasurement.Direction.Raw, - Hume.expressionMeasurement.Direction + serializers.expressionMeasurement.batch.Direction.Raw, + Hume.expressionMeasurement.batch.Direction > = core.serialization.enum_(["asc", "desc"]); export declare namespace Direction { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationBaseRequest.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationBaseRequest.ts index ec6290db..96c5b3ec 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationBaseRequest.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationBaseRequest.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { RegistryFileDetail } from "./RegistryFileDetail"; export const EmbeddingGenerationBaseRequest: core.serialization.ObjectSchema< - serializers.expressionMeasurement.EmbeddingGenerationBaseRequest.Raw, - Hume.expressionMeasurement.EmbeddingGenerationBaseRequest + serializers.expressionMeasurement.batch.EmbeddingGenerationBaseRequest.Raw, + Hume.expressionMeasurement.batch.EmbeddingGenerationBaseRequest > = core.serialization.object({ registryFileDetails: core.serialization.property( "registry_file_details", diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationJob.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationJob.ts index 2ef28daa..c06d72f2 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationJob.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/EmbeddingGenerationJob.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { JobEmbeddingGeneration } from "./JobEmbeddingGeneration"; export const EmbeddingGenerationJob: core.serialization.ObjectSchema< - serializers.expressionMeasurement.EmbeddingGenerationJob.Raw, - Hume.expressionMeasurement.EmbeddingGenerationJob + serializers.expressionMeasurement.batch.EmbeddingGenerationJob.Raw, + Hume.expressionMeasurement.batch.EmbeddingGenerationJob > = core.serialization .object({ type: core.serialization.string(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/EmotionScore.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/EmotionScore.ts index 47a38795..e0db2666 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/EmotionScore.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/EmotionScore.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const EmotionScore: core.serialization.ObjectSchema< - serializers.expressionMeasurement.EmotionScore.Raw, - Hume.expressionMeasurement.EmotionScore + serializers.expressionMeasurement.batch.EmotionScore.Raw, + Hume.expressionMeasurement.batch.EmotionScore > = core.serialization.object({ name: core.serialization.string(), score: core.serialization.number(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Error_.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Error_.ts index 3c125e45..cdb71c22 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Error_.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Error_.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Error_: core.serialization.ObjectSchema< - serializers.expressionMeasurement.Error_.Raw, - Hume.expressionMeasurement.Error_ + serializers.expressionMeasurement.batch.Error_.Raw, + Hume.expressionMeasurement.batch.Error_ > = core.serialization.object({ message: core.serialization.string(), file: core.serialization.string(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/EvaluationArgs.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/EvaluationArgs.ts index d5947a52..efa0e57b 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/EvaluationArgs.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/EvaluationArgs.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { ValidationArgs } from "./ValidationArgs"; export const EvaluationArgs: core.serialization.ObjectSchema< - serializers.expressionMeasurement.EvaluationArgs.Raw, - Hume.expressionMeasurement.EvaluationArgs + serializers.expressionMeasurement.batch.EvaluationArgs.Raw, + Hume.expressionMeasurement.batch.EvaluationArgs > = core.serialization.object({ validation: ValidationArgs.optional(), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Face.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Face.ts index 8bdce2ab..1e351117 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Face.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Face.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Unconfigurable } from "./Unconfigurable"; export const Face: core.serialization.ObjectSchema< - serializers.expressionMeasurement.Face.Raw, - Hume.expressionMeasurement.Face + serializers.expressionMeasurement.batch.Face.Raw, + Hume.expressionMeasurement.batch.Face > = core.serialization.object({ fpsPred: core.serialization.property("fps_pred", core.serialization.number().optional()), probThreshold: core.serialization.property("prob_threshold", core.serialization.number().optional()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/FacePrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/FacePrediction.ts index cf5db3d5..1634d147 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/FacePrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/FacePrediction.ts @@ -11,8 +11,8 @@ import { FacsScore } from "./FacsScore"; import { DescriptionsScore } from "./DescriptionsScore"; export const FacePrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.FacePrediction.Raw, - Hume.expressionMeasurement.FacePrediction + serializers.expressionMeasurement.batch.FacePrediction.Raw, + Hume.expressionMeasurement.batch.FacePrediction > = core.serialization.object({ frame: core.serialization.number(), time: core.serialization.number(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/FacemeshPrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/FacemeshPrediction.ts index d9505e85..a25b6c83 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/FacemeshPrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/FacemeshPrediction.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { EmotionScore } from "./EmotionScore"; export const FacemeshPrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.FacemeshPrediction.Raw, - Hume.expressionMeasurement.FacemeshPrediction + serializers.expressionMeasurement.batch.FacemeshPrediction.Raw, + Hume.expressionMeasurement.batch.FacemeshPrediction > = core.serialization.object({ emotions: core.serialization.list(EmotionScore), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/FacsScore.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/FacsScore.ts index 900be4e3..de244669 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/FacsScore.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/FacsScore.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const FacsScore: core.serialization.ObjectSchema< - serializers.expressionMeasurement.FacsScore.Raw, - Hume.expressionMeasurement.FacsScore + serializers.expressionMeasurement.batch.FacsScore.Raw, + Hume.expressionMeasurement.batch.FacsScore > = core.serialization.object({ name: core.serialization.string(), score: core.serialization.number(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Failed.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Failed.ts index f3cf7505..fc7a9080 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Failed.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Failed.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Failed: core.serialization.ObjectSchema< - serializers.expressionMeasurement.Failed.Raw, - Hume.expressionMeasurement.Failed + serializers.expressionMeasurement.batch.Failed.Raw, + Hume.expressionMeasurement.batch.Failed > = core.serialization.object({ createdTimestampMs: core.serialization.property("created_timestamp_ms", core.serialization.number()), startedTimestampMs: core.serialization.property("started_timestamp_ms", core.serialization.number()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/FailedState.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/FailedState.ts index 18fee806..de12b286 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/FailedState.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/FailedState.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Failed } from "./Failed"; export const FailedState: core.serialization.ObjectSchema< - serializers.expressionMeasurement.FailedState.Raw, - Hume.expressionMeasurement.FailedState + serializers.expressionMeasurement.batch.FailedState.Raw, + Hume.expressionMeasurement.batch.FailedState > = core.serialization.object({}).extend(Failed); export declare namespace FailedState { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/File_.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/File_.ts index e76e4961..c75d0ee3 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/File_.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/File_.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const File_: core.serialization.ObjectSchema< - serializers.expressionMeasurement.File_.Raw, - Hume.expressionMeasurement.File_ + serializers.expressionMeasurement.batch.File_.Raw, + Hume.expressionMeasurement.batch.File_ > = core.serialization.object({ filename: core.serialization.string().optional(), contentType: core.serialization.property("content_type", core.serialization.string().optional()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Granularity.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Granularity.ts index 721ee11a..74b7311c 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Granularity.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Granularity.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Granularity: core.serialization.Schema< - serializers.expressionMeasurement.Granularity.Raw, - Hume.expressionMeasurement.Granularity + serializers.expressionMeasurement.batch.Granularity.Raw, + Hume.expressionMeasurement.batch.Granularity > = core.serialization.enum_(["word", "sentence", "utterance", "conversational_turn"]); export declare namespace Granularity { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsBurstPrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsBurstPrediction.ts index fbd6caed..1db6077c 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsBurstPrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsBurstPrediction.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { BurstPrediction } from "./BurstPrediction"; export const GroupedPredictionsBurstPrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.GroupedPredictionsBurstPrediction.Raw, - Hume.expressionMeasurement.GroupedPredictionsBurstPrediction + serializers.expressionMeasurement.batch.GroupedPredictionsBurstPrediction.Raw, + Hume.expressionMeasurement.batch.GroupedPredictionsBurstPrediction > = core.serialization.object({ id: core.serialization.string(), predictions: core.serialization.list(BurstPrediction), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacePrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacePrediction.ts index b897c967..f4792b20 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacePrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacePrediction.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { FacePrediction } from "./FacePrediction"; export const GroupedPredictionsFacePrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.GroupedPredictionsFacePrediction.Raw, - Hume.expressionMeasurement.GroupedPredictionsFacePrediction + serializers.expressionMeasurement.batch.GroupedPredictionsFacePrediction.Raw, + Hume.expressionMeasurement.batch.GroupedPredictionsFacePrediction > = core.serialization.object({ id: core.serialization.string(), predictions: core.serialization.list(FacePrediction), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacemeshPrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacemeshPrediction.ts index dde12b60..c482580a 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacemeshPrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsFacemeshPrediction.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { FacemeshPrediction } from "./FacemeshPrediction"; export const GroupedPredictionsFacemeshPrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.GroupedPredictionsFacemeshPrediction.Raw, - Hume.expressionMeasurement.GroupedPredictionsFacemeshPrediction + serializers.expressionMeasurement.batch.GroupedPredictionsFacemeshPrediction.Raw, + Hume.expressionMeasurement.batch.GroupedPredictionsFacemeshPrediction > = core.serialization.object({ id: core.serialization.string(), predictions: core.serialization.list(FacemeshPrediction), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsLanguagePrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsLanguagePrediction.ts index 493fe152..cccf23e2 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsLanguagePrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsLanguagePrediction.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { LanguagePrediction } from "./LanguagePrediction"; export const GroupedPredictionsLanguagePrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.GroupedPredictionsLanguagePrediction.Raw, - Hume.expressionMeasurement.GroupedPredictionsLanguagePrediction + serializers.expressionMeasurement.batch.GroupedPredictionsLanguagePrediction.Raw, + Hume.expressionMeasurement.batch.GroupedPredictionsLanguagePrediction > = core.serialization.object({ id: core.serialization.string(), predictions: core.serialization.list(LanguagePrediction), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsNerPrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsNerPrediction.ts index e8cf03f1..71bfd720 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsNerPrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsNerPrediction.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { NerPrediction } from "./NerPrediction"; export const GroupedPredictionsNerPrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.GroupedPredictionsNerPrediction.Raw, - Hume.expressionMeasurement.GroupedPredictionsNerPrediction + serializers.expressionMeasurement.batch.GroupedPredictionsNerPrediction.Raw, + Hume.expressionMeasurement.batch.GroupedPredictionsNerPrediction > = core.serialization.object({ id: core.serialization.string(), predictions: core.serialization.list(NerPrediction), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsProsodyPrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsProsodyPrediction.ts index 95ebec51..0d8bde15 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsProsodyPrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/GroupedPredictionsProsodyPrediction.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { ProsodyPrediction } from "./ProsodyPrediction"; export const GroupedPredictionsProsodyPrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.GroupedPredictionsProsodyPrediction.Raw, - Hume.expressionMeasurement.GroupedPredictionsProsodyPrediction + serializers.expressionMeasurement.batch.GroupedPredictionsProsodyPrediction.Raw, + Hume.expressionMeasurement.batch.GroupedPredictionsProsodyPrediction > = core.serialization.object({ id: core.serialization.string(), predictions: core.serialization.list(ProsodyPrediction), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/InProgress.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/InProgress.ts index 24326481..916e3d75 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/InProgress.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/InProgress.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const InProgress: core.serialization.ObjectSchema< - serializers.expressionMeasurement.InProgress.Raw, - Hume.expressionMeasurement.InProgress + serializers.expressionMeasurement.batch.InProgress.Raw, + Hume.expressionMeasurement.batch.InProgress > = core.serialization.object({ createdTimestampMs: core.serialization.property("created_timestamp_ms", core.serialization.number()), startedTimestampMs: core.serialization.property("started_timestamp_ms", core.serialization.number()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/InProgressState.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/InProgressState.ts index 9c332303..1e8e6ba6 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/InProgressState.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/InProgressState.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { InProgress } from "./InProgress"; export const InProgressState: core.serialization.ObjectSchema< - serializers.expressionMeasurement.InProgressState.Raw, - Hume.expressionMeasurement.InProgressState + serializers.expressionMeasurement.batch.InProgressState.Raw, + Hume.expressionMeasurement.batch.InProgressState > = core.serialization.object({}).extend(InProgress); export declare namespace InProgressState { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceBaseRequest.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceBaseRequest.ts index efc287a2..41179d97 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceBaseRequest.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceBaseRequest.ts @@ -9,8 +9,8 @@ import { Models } from "./Models"; import { Transcription } from "./Transcription"; export const InferenceBaseRequest: core.serialization.ObjectSchema< - serializers.expressionMeasurement.InferenceBaseRequest.Raw, - Hume.expressionMeasurement.InferenceBaseRequest + serializers.expressionMeasurement.batch.InferenceBaseRequest.Raw, + Hume.expressionMeasurement.batch.InferenceBaseRequest > = core.serialization.object({ models: Models.optional(), transcription: Transcription.optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceJob.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceJob.ts index 8649c5c8..bd37fa12 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceJob.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceJob.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { JobInference } from "./JobInference"; export const InferenceJob: core.serialization.ObjectSchema< - serializers.expressionMeasurement.InferenceJob.Raw, - Hume.expressionMeasurement.InferenceJob + serializers.expressionMeasurement.batch.InferenceJob.Raw, + Hume.expressionMeasurement.batch.InferenceJob > = core.serialization .object({ type: core.serialization.string(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/InferencePrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/InferencePrediction.ts index 88fc4dbb..005ae86b 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/InferencePrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/InferencePrediction.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { ModelsPredictions } from "./ModelsPredictions"; export const InferencePrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.InferencePrediction.Raw, - Hume.expressionMeasurement.InferencePrediction + serializers.expressionMeasurement.batch.InferencePrediction.Raw, + Hume.expressionMeasurement.batch.InferencePrediction > = core.serialization.object({ file: core.serialization.string(), models: ModelsPredictions, diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceRequest.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceRequest.ts index ffda8587..30cd2a80 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceRequest.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceRequest.ts @@ -10,8 +10,8 @@ import { Transcription } from "./Transcription"; import { File_ } from "./File_"; export const InferenceRequest: core.serialization.ObjectSchema< - serializers.expressionMeasurement.InferenceRequest.Raw, - Hume.expressionMeasurement.InferenceRequest + serializers.expressionMeasurement.batch.InferenceRequest.Raw, + Hume.expressionMeasurement.batch.InferenceRequest > = core.serialization.object({ models: Models.optional(), transcription: Transcription.optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceResults.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceResults.ts index c68b9fbc..9badbe75 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceResults.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceResults.ts @@ -9,8 +9,8 @@ import { InferencePrediction } from "./InferencePrediction"; import { Error_ } from "./Error_"; export const InferenceResults: core.serialization.ObjectSchema< - serializers.expressionMeasurement.InferenceResults.Raw, - Hume.expressionMeasurement.InferenceResults + serializers.expressionMeasurement.batch.InferenceResults.Raw, + Hume.expressionMeasurement.batch.InferenceResults > = core.serialization.object({ predictions: core.serialization.list(InferencePrediction), errors: core.serialization.list(Error_), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceSourcePredictResult.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceSourcePredictResult.ts index 0ab14e2c..e3dd2963 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceSourcePredictResult.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/InferenceSourcePredictResult.ts @@ -9,8 +9,8 @@ import { Source } from "./Source"; import { InferenceResults } from "./InferenceResults"; export const InferenceSourcePredictResult: core.serialization.ObjectSchema< - serializers.expressionMeasurement.InferenceSourcePredictResult.Raw, - Hume.expressionMeasurement.InferenceSourcePredictResult + serializers.expressionMeasurement.batch.InferenceSourcePredictResult.Raw, + Hume.expressionMeasurement.batch.InferenceSourcePredictResult > = core.serialization.object({ source: Source, results: InferenceResults.optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/JobEmbeddingGeneration.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/JobEmbeddingGeneration.ts index cb7a2bb3..e8aae68c 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/JobEmbeddingGeneration.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/JobEmbeddingGeneration.ts @@ -9,8 +9,8 @@ import { EmbeddingGenerationBaseRequest } from "./EmbeddingGenerationBaseRequest import { StateEmbeddingGeneration } from "./StateEmbeddingGeneration"; export const JobEmbeddingGeneration: core.serialization.ObjectSchema< - serializers.expressionMeasurement.JobEmbeddingGeneration.Raw, - Hume.expressionMeasurement.JobEmbeddingGeneration + serializers.expressionMeasurement.batch.JobEmbeddingGeneration.Raw, + Hume.expressionMeasurement.batch.JobEmbeddingGeneration > = core.serialization.object({ jobId: core.serialization.property("job_id", core.serialization.string()), userId: core.serialization.property("user_id", core.serialization.string()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/JobId.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/JobId.ts index 5b8e6850..4546c6db 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/JobId.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/JobId.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const JobId: core.serialization.ObjectSchema< - serializers.expressionMeasurement.JobId.Raw, - Hume.expressionMeasurement.JobId + serializers.expressionMeasurement.batch.JobId.Raw, + Hume.expressionMeasurement.batch.JobId > = core.serialization.object({ jobId: core.serialization.property("job_id", core.serialization.string()), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/JobInference.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/JobInference.ts index e691b618..2bb1d60d 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/JobInference.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/JobInference.ts @@ -9,8 +9,8 @@ import { InferenceRequest } from "./InferenceRequest"; import { StateInference } from "./StateInference"; export const JobInference: core.serialization.ObjectSchema< - serializers.expressionMeasurement.JobInference.Raw, - Hume.expressionMeasurement.JobInference + serializers.expressionMeasurement.batch.JobInference.Raw, + Hume.expressionMeasurement.batch.JobInference > = core.serialization.object({ jobId: core.serialization.property("job_id", core.serialization.string()), request: InferenceRequest, diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/JobTlInference.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/JobTlInference.ts index e24d8e42..dfe3f598 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/JobTlInference.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/JobTlInference.ts @@ -9,8 +9,8 @@ import { TlInferenceBaseRequest } from "./TlInferenceBaseRequest"; import { StateTlInference } from "./StateTlInference"; export const JobTlInference: core.serialization.ObjectSchema< - serializers.expressionMeasurement.JobTlInference.Raw, - Hume.expressionMeasurement.JobTlInference + serializers.expressionMeasurement.batch.JobTlInference.Raw, + Hume.expressionMeasurement.batch.JobTlInference > = core.serialization.object({ jobId: core.serialization.property("job_id", core.serialization.string()), userId: core.serialization.property("user_id", core.serialization.string()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/JobTraining.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/JobTraining.ts index f6f704b4..ea035e65 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/JobTraining.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/JobTraining.ts @@ -9,8 +9,8 @@ import { TrainingBaseRequest } from "./TrainingBaseRequest"; import { StateTraining } from "./StateTraining"; export const JobTraining: core.serialization.ObjectSchema< - serializers.expressionMeasurement.JobTraining.Raw, - Hume.expressionMeasurement.JobTraining + serializers.expressionMeasurement.batch.JobTraining.Raw, + Hume.expressionMeasurement.batch.JobTraining > = core.serialization.object({ jobId: core.serialization.property("job_id", core.serialization.string()), userId: core.serialization.property("user_id", core.serialization.string()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Language.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Language.ts index 63f5232d..3200d736 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Language.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Language.ts @@ -9,8 +9,8 @@ import { Granularity } from "./Granularity"; import { Unconfigurable } from "./Unconfigurable"; export const Language: core.serialization.ObjectSchema< - serializers.expressionMeasurement.Language.Raw, - Hume.expressionMeasurement.Language + serializers.expressionMeasurement.batch.Language.Raw, + Hume.expressionMeasurement.batch.Language > = core.serialization.object({ granularity: Granularity.optional(), sentiment: Unconfigurable.optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/LanguagePrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/LanguagePrediction.ts index 440fc6dd..88a7e4f8 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/LanguagePrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/LanguagePrediction.ts @@ -12,8 +12,8 @@ import { SentimentScore } from "./SentimentScore"; import { ToxicityScore } from "./ToxicityScore"; export const LanguagePrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.LanguagePrediction.Raw, - Hume.expressionMeasurement.LanguagePrediction + serializers.expressionMeasurement.batch.LanguagePrediction.Raw, + Hume.expressionMeasurement.batch.LanguagePrediction > = core.serialization.object({ text: core.serialization.string(), position: PositionInterval, diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Models.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Models.ts index 3e5b95ad..65005fbd 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Models.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Models.ts @@ -12,8 +12,8 @@ import { Language } from "./Language"; import { Ner } from "./Ner"; export const Models: core.serialization.ObjectSchema< - serializers.expressionMeasurement.Models.Raw, - Hume.expressionMeasurement.Models + serializers.expressionMeasurement.batch.Models.Raw, + Hume.expressionMeasurement.batch.Models > = core.serialization.object({ face: Face.optional(), burst: Unconfigurable.optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/ModelsPredictions.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/ModelsPredictions.ts index acd4476b..b484e90d 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/ModelsPredictions.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/ModelsPredictions.ts @@ -13,8 +13,8 @@ import { PredictionsOptionalTranscriptionMetadataNerPrediction } from "./Predict import { PredictionsOptionalNullFacemeshPrediction } from "./PredictionsOptionalNullFacemeshPrediction"; export const ModelsPredictions: core.serialization.ObjectSchema< - serializers.expressionMeasurement.ModelsPredictions.Raw, - Hume.expressionMeasurement.ModelsPredictions + serializers.expressionMeasurement.batch.ModelsPredictions.Raw, + Hume.expressionMeasurement.batch.ModelsPredictions > = core.serialization.object({ face: PredictionsOptionalNullFacePrediction.optional(), burst: PredictionsOptionalNullBurstPrediction.optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Ner.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Ner.ts index 73ea0ea1..d49592c4 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Ner.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Ner.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Ner: core.serialization.ObjectSchema< - serializers.expressionMeasurement.Ner.Raw, - Hume.expressionMeasurement.Ner + serializers.expressionMeasurement.batch.Ner.Raw, + Hume.expressionMeasurement.batch.Ner > = core.serialization.object({ identifySpeakers: core.serialization.property("identify_speakers", core.serialization.boolean().optional()), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/NerPrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/NerPrediction.ts index d7e2f38a..aead465c 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/NerPrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/NerPrediction.ts @@ -10,8 +10,8 @@ import { TimeInterval } from "./TimeInterval"; import { EmotionScore } from "./EmotionScore"; export const NerPrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.NerPrediction.Raw, - Hume.expressionMeasurement.NerPrediction + serializers.expressionMeasurement.batch.NerPrediction.Raw, + Hume.expressionMeasurement.batch.NerPrediction > = core.serialization.object({ entity: core.serialization.string(), position: PositionInterval, diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Null.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Null.ts index 9cfd2a87..036dfebc 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Null.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Null.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Null: core.serialization.Schema< - serializers.expressionMeasurement.Null.Raw, - Hume.expressionMeasurement.Null + serializers.expressionMeasurement.batch.Null.Raw, + Hume.expressionMeasurement.batch.Null > = core.serialization.record(core.serialization.string(), core.serialization.unknown()); export declare namespace Null { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/PositionInterval.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/PositionInterval.ts index 0c9b16c0..de1aad8a 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/PositionInterval.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/PositionInterval.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const PositionInterval: core.serialization.ObjectSchema< - serializers.expressionMeasurement.PositionInterval.Raw, - Hume.expressionMeasurement.PositionInterval + serializers.expressionMeasurement.batch.PositionInterval.Raw, + Hume.expressionMeasurement.batch.PositionInterval > = core.serialization.object({ begin: core.serialization.number(), end: core.serialization.number(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullBurstPrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullBurstPrediction.ts index d59b4ced..fb2d5eb0 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullBurstPrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullBurstPrediction.ts @@ -9,8 +9,8 @@ import { Null } from "./Null"; import { GroupedPredictionsBurstPrediction } from "./GroupedPredictionsBurstPrediction"; export const PredictionsOptionalNullBurstPrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.PredictionsOptionalNullBurstPrediction.Raw, - Hume.expressionMeasurement.PredictionsOptionalNullBurstPrediction + serializers.expressionMeasurement.batch.PredictionsOptionalNullBurstPrediction.Raw, + Hume.expressionMeasurement.batch.PredictionsOptionalNullBurstPrediction > = core.serialization.object({ metadata: Null.optional(), groupedPredictions: core.serialization.property( diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacePrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacePrediction.ts index b6be4af6..7e563bf5 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacePrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacePrediction.ts @@ -9,8 +9,8 @@ import { Null } from "./Null"; import { GroupedPredictionsFacePrediction } from "./GroupedPredictionsFacePrediction"; export const PredictionsOptionalNullFacePrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.PredictionsOptionalNullFacePrediction.Raw, - Hume.expressionMeasurement.PredictionsOptionalNullFacePrediction + serializers.expressionMeasurement.batch.PredictionsOptionalNullFacePrediction.Raw, + Hume.expressionMeasurement.batch.PredictionsOptionalNullFacePrediction > = core.serialization.object({ metadata: Null.optional(), groupedPredictions: core.serialization.property( diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacemeshPrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacemeshPrediction.ts index f838ecb2..e584626f 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacemeshPrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalNullFacemeshPrediction.ts @@ -9,8 +9,8 @@ import { Null } from "./Null"; import { GroupedPredictionsFacemeshPrediction } from "./GroupedPredictionsFacemeshPrediction"; export const PredictionsOptionalNullFacemeshPrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.PredictionsOptionalNullFacemeshPrediction.Raw, - Hume.expressionMeasurement.PredictionsOptionalNullFacemeshPrediction + serializers.expressionMeasurement.batch.PredictionsOptionalNullFacemeshPrediction.Raw, + Hume.expressionMeasurement.batch.PredictionsOptionalNullFacemeshPrediction > = core.serialization.object({ metadata: Null.optional(), groupedPredictions: core.serialization.property( diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataLanguagePrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataLanguagePrediction.ts index f555d0e4..c3ebd421 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataLanguagePrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataLanguagePrediction.ts @@ -9,8 +9,8 @@ import { TranscriptionMetadata } from "./TranscriptionMetadata"; import { GroupedPredictionsLanguagePrediction } from "./GroupedPredictionsLanguagePrediction"; export const PredictionsOptionalTranscriptionMetadataLanguagePrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.PredictionsOptionalTranscriptionMetadataLanguagePrediction.Raw, - Hume.expressionMeasurement.PredictionsOptionalTranscriptionMetadataLanguagePrediction + serializers.expressionMeasurement.batch.PredictionsOptionalTranscriptionMetadataLanguagePrediction.Raw, + Hume.expressionMeasurement.batch.PredictionsOptionalTranscriptionMetadataLanguagePrediction > = core.serialization.object({ metadata: TranscriptionMetadata.optional(), groupedPredictions: core.serialization.property( diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataNerPrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataNerPrediction.ts index 1cc28f6e..9ba2bce7 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataNerPrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataNerPrediction.ts @@ -9,8 +9,8 @@ import { TranscriptionMetadata } from "./TranscriptionMetadata"; import { GroupedPredictionsNerPrediction } from "./GroupedPredictionsNerPrediction"; export const PredictionsOptionalTranscriptionMetadataNerPrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.PredictionsOptionalTranscriptionMetadataNerPrediction.Raw, - Hume.expressionMeasurement.PredictionsOptionalTranscriptionMetadataNerPrediction + serializers.expressionMeasurement.batch.PredictionsOptionalTranscriptionMetadataNerPrediction.Raw, + Hume.expressionMeasurement.batch.PredictionsOptionalTranscriptionMetadataNerPrediction > = core.serialization.object({ metadata: TranscriptionMetadata.optional(), groupedPredictions: core.serialization.property( diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataProsodyPrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataProsodyPrediction.ts index 7c98cc11..66968444 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataProsodyPrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/PredictionsOptionalTranscriptionMetadataProsodyPrediction.ts @@ -9,8 +9,8 @@ import { TranscriptionMetadata } from "./TranscriptionMetadata"; import { GroupedPredictionsProsodyPrediction } from "./GroupedPredictionsProsodyPrediction"; export const PredictionsOptionalTranscriptionMetadataProsodyPrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.PredictionsOptionalTranscriptionMetadataProsodyPrediction.Raw, - Hume.expressionMeasurement.PredictionsOptionalTranscriptionMetadataProsodyPrediction + serializers.expressionMeasurement.batch.PredictionsOptionalTranscriptionMetadataProsodyPrediction.Raw, + Hume.expressionMeasurement.batch.PredictionsOptionalTranscriptionMetadataProsodyPrediction > = core.serialization.object({ metadata: TranscriptionMetadata.optional(), groupedPredictions: core.serialization.property( diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Prosody.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Prosody.ts index d1acad3d..a139a4f5 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Prosody.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Prosody.ts @@ -9,8 +9,8 @@ import { Granularity } from "./Granularity"; import { Window } from "./Window"; export const Prosody: core.serialization.ObjectSchema< - serializers.expressionMeasurement.Prosody.Raw, - Hume.expressionMeasurement.Prosody + serializers.expressionMeasurement.batch.Prosody.Raw, + Hume.expressionMeasurement.batch.Prosody > = core.serialization.object({ granularity: Granularity.optional(), window: Window.optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/ProsodyPrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/ProsodyPrediction.ts index f05f9658..606effea 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/ProsodyPrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/ProsodyPrediction.ts @@ -9,8 +9,8 @@ import { TimeInterval } from "./TimeInterval"; import { EmotionScore } from "./EmotionScore"; export const ProsodyPrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.ProsodyPrediction.Raw, - Hume.expressionMeasurement.ProsodyPrediction + serializers.expressionMeasurement.batch.ProsodyPrediction.Raw, + Hume.expressionMeasurement.batch.ProsodyPrediction > = core.serialization.object({ text: core.serialization.string().optional(), time: TimeInterval, diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Queued.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Queued.ts index 33bed74c..49f276d4 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Queued.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Queued.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Queued: core.serialization.ObjectSchema< - serializers.expressionMeasurement.Queued.Raw, - Hume.expressionMeasurement.Queued + serializers.expressionMeasurement.batch.Queued.Raw, + Hume.expressionMeasurement.batch.Queued > = core.serialization.object({ createdTimestampMs: core.serialization.property("created_timestamp_ms", core.serialization.number()), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/QueuedState.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/QueuedState.ts index 0437bb41..7ab18cdd 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/QueuedState.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/QueuedState.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Queued } from "./Queued"; export const QueuedState: core.serialization.ObjectSchema< - serializers.expressionMeasurement.QueuedState.Raw, - Hume.expressionMeasurement.QueuedState + serializers.expressionMeasurement.batch.QueuedState.Raw, + Hume.expressionMeasurement.batch.QueuedState > = core.serialization.object({}).extend(Queued); export declare namespace QueuedState { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/RegistryFileDetail.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/RegistryFileDetail.ts index 2306eb8d..b3775e43 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/RegistryFileDetail.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/RegistryFileDetail.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const RegistryFileDetail: core.serialization.ObjectSchema< - serializers.expressionMeasurement.RegistryFileDetail.Raw, - Hume.expressionMeasurement.RegistryFileDetail + serializers.expressionMeasurement.batch.RegistryFileDetail.Raw, + Hume.expressionMeasurement.batch.RegistryFileDetail > = core.serialization.object({ fileId: core.serialization.property("file_id", core.serialization.string()), fileUrl: core.serialization.property("file_url", core.serialization.string()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Regression.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Regression.ts index 60b60632..1decf7f2 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Regression.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Regression.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Regression: core.serialization.Schema< - serializers.expressionMeasurement.Regression.Raw, - Hume.expressionMeasurement.Regression + serializers.expressionMeasurement.batch.Regression.Raw, + Hume.expressionMeasurement.batch.Regression > = core.serialization.record(core.serialization.string(), core.serialization.unknown()); export declare namespace Regression { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/SentimentScore.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/SentimentScore.ts index eaeaed15..2ff1e905 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/SentimentScore.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/SentimentScore.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const SentimentScore: core.serialization.ObjectSchema< - serializers.expressionMeasurement.SentimentScore.Raw, - Hume.expressionMeasurement.SentimentScore + serializers.expressionMeasurement.batch.SentimentScore.Raw, + Hume.expressionMeasurement.batch.SentimentScore > = core.serialization.object({ name: core.serialization.string(), score: core.serialization.number(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/SortBy.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/SortBy.ts index ad6d51d3..91246d33 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/SortBy.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/SortBy.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const SortBy: core.serialization.Schema< - serializers.expressionMeasurement.SortBy.Raw, - Hume.expressionMeasurement.SortBy + serializers.expressionMeasurement.batch.SortBy.Raw, + Hume.expressionMeasurement.batch.SortBy > = core.serialization.enum_(["created", "started", "ended"]); export declare namespace SortBy { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Source.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Source.ts index 993f1bd3..80c8d3bc 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Source.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Source.ts @@ -10,15 +10,15 @@ import { SourceFile } from "./SourceFile"; import { SourceTextSource } from "./SourceTextSource"; export const Source: core.serialization.Schema< - serializers.expressionMeasurement.Source.Raw, - Hume.expressionMeasurement.Source + serializers.expressionMeasurement.batch.Source.Raw, + Hume.expressionMeasurement.batch.Source > = core.serialization .union("type", { url: SourceUrl, file: SourceFile, text: SourceTextSource, }) - .transform({ + .transform({ transform: (value) => value, untransform: (value) => value, }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/SourceFile.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/SourceFile.ts index eafd6ed5..682c265e 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/SourceFile.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/SourceFile.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { File_ } from "./File_"; export const SourceFile: core.serialization.ObjectSchema< - serializers.expressionMeasurement.SourceFile.Raw, - Hume.expressionMeasurement.SourceFile + serializers.expressionMeasurement.batch.SourceFile.Raw, + Hume.expressionMeasurement.batch.SourceFile > = core.serialization.object({}).extend(File_); export declare namespace SourceFile { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/SourceTextSource.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/SourceTextSource.ts index a4de3feb..5d012b4d 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/SourceTextSource.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/SourceTextSource.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const SourceTextSource: core.serialization.ObjectSchema< - serializers.expressionMeasurement.SourceTextSource.Raw, - Hume.expressionMeasurement.SourceTextSource + serializers.expressionMeasurement.batch.SourceTextSource.Raw, + Hume.expressionMeasurement.batch.SourceTextSource > = core.serialization.object({}); export declare namespace SourceTextSource { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/SourceUrl.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/SourceUrl.ts index 89c04c4e..c64bb7aa 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/SourceUrl.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/SourceUrl.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Url } from "./Url"; export const SourceUrl: core.serialization.ObjectSchema< - serializers.expressionMeasurement.SourceUrl.Raw, - Hume.expressionMeasurement.SourceUrl + serializers.expressionMeasurement.batch.SourceUrl.Raw, + Hume.expressionMeasurement.batch.SourceUrl > = core.serialization.object({}).extend(Url); export declare namespace SourceUrl { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGeneration.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGeneration.ts index e70b0dae..c800ea7c 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGeneration.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGeneration.ts @@ -11,8 +11,8 @@ import { StateEmbeddingGenerationCompletedEmbeddingGeneration } from "./StateEmb import { StateEmbeddingGenerationFailed } from "./StateEmbeddingGenerationFailed"; export const StateEmbeddingGeneration: core.serialization.Schema< - serializers.expressionMeasurement.StateEmbeddingGeneration.Raw, - Hume.expressionMeasurement.StateEmbeddingGeneration + serializers.expressionMeasurement.batch.StateEmbeddingGeneration.Raw, + Hume.expressionMeasurement.batch.StateEmbeddingGeneration > = core.serialization .union("status", { QUEUED: StateEmbeddingGenerationQueued, @@ -20,7 +20,7 @@ export const StateEmbeddingGeneration: core.serialization.Schema< COMPLETED: StateEmbeddingGenerationCompletedEmbeddingGeneration, FAILED: StateEmbeddingGenerationFailed, }) - .transform({ + .transform({ transform: (value) => value, untransform: (value) => value, }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationCompletedEmbeddingGeneration.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationCompletedEmbeddingGeneration.ts index 1a88ecbd..47e9894a 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationCompletedEmbeddingGeneration.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationCompletedEmbeddingGeneration.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { CompletedEmbeddingGeneration } from "./CompletedEmbeddingGeneration"; export const StateEmbeddingGenerationCompletedEmbeddingGeneration: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StateEmbeddingGenerationCompletedEmbeddingGeneration.Raw, - Hume.expressionMeasurement.StateEmbeddingGenerationCompletedEmbeddingGeneration + serializers.expressionMeasurement.batch.StateEmbeddingGenerationCompletedEmbeddingGeneration.Raw, + Hume.expressionMeasurement.batch.StateEmbeddingGenerationCompletedEmbeddingGeneration > = core.serialization.object({}).extend(CompletedEmbeddingGeneration); export declare namespace StateEmbeddingGenerationCompletedEmbeddingGeneration { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationFailed.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationFailed.ts index a025e1f2..e98b79e7 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationFailed.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationFailed.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Failed } from "./Failed"; export const StateEmbeddingGenerationFailed: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StateEmbeddingGenerationFailed.Raw, - Hume.expressionMeasurement.StateEmbeddingGenerationFailed + serializers.expressionMeasurement.batch.StateEmbeddingGenerationFailed.Raw, + Hume.expressionMeasurement.batch.StateEmbeddingGenerationFailed > = core.serialization.object({}).extend(Failed); export declare namespace StateEmbeddingGenerationFailed { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationInProgress.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationInProgress.ts index cb218124..76bc9970 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationInProgress.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationInProgress.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { InProgress } from "./InProgress"; export const StateEmbeddingGenerationInProgress: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StateEmbeddingGenerationInProgress.Raw, - Hume.expressionMeasurement.StateEmbeddingGenerationInProgress + serializers.expressionMeasurement.batch.StateEmbeddingGenerationInProgress.Raw, + Hume.expressionMeasurement.batch.StateEmbeddingGenerationInProgress > = core.serialization.object({}).extend(InProgress); export declare namespace StateEmbeddingGenerationInProgress { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationQueued.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationQueued.ts index a4c12db1..b29e029f 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationQueued.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateEmbeddingGenerationQueued.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Queued } from "./Queued"; export const StateEmbeddingGenerationQueued: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StateEmbeddingGenerationQueued.Raw, - Hume.expressionMeasurement.StateEmbeddingGenerationQueued + serializers.expressionMeasurement.batch.StateEmbeddingGenerationQueued.Raw, + Hume.expressionMeasurement.batch.StateEmbeddingGenerationQueued > = core.serialization.object({}).extend(Queued); export declare namespace StateEmbeddingGenerationQueued { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateInference.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateInference.ts index 1784b7e3..8a40608f 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateInference.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateInference.ts @@ -11,8 +11,8 @@ import { CompletedState } from "./CompletedState"; import { FailedState } from "./FailedState"; export const StateInference: core.serialization.Schema< - serializers.expressionMeasurement.StateInference.Raw, - Hume.expressionMeasurement.StateInference + serializers.expressionMeasurement.batch.StateInference.Raw, + Hume.expressionMeasurement.batch.StateInference > = core.serialization .union("status", { QUEUED: QueuedState, @@ -20,7 +20,7 @@ export const StateInference: core.serialization.Schema< COMPLETED: CompletedState, FAILED: FailedState, }) - .transform({ + .transform({ transform: (value) => value, untransform: (value) => value, }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInference.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInference.ts index 4e602ff3..dc2261a0 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInference.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInference.ts @@ -11,8 +11,8 @@ import { StateTlInferenceCompletedTlInference } from "./StateTlInferenceComplete import { StateTlInferenceFailed } from "./StateTlInferenceFailed"; export const StateTlInference: core.serialization.Schema< - serializers.expressionMeasurement.StateTlInference.Raw, - Hume.expressionMeasurement.StateTlInference + serializers.expressionMeasurement.batch.StateTlInference.Raw, + Hume.expressionMeasurement.batch.StateTlInference > = core.serialization .union("status", { QUEUED: StateTlInferenceQueued, @@ -20,7 +20,7 @@ export const StateTlInference: core.serialization.Schema< COMPLETED: StateTlInferenceCompletedTlInference, FAILED: StateTlInferenceFailed, }) - .transform({ + .transform({ transform: (value) => value, untransform: (value) => value, }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceCompletedTlInference.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceCompletedTlInference.ts index ba3e9f5e..6fd64a9c 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceCompletedTlInference.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceCompletedTlInference.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { CompletedTlInference } from "./CompletedTlInference"; export const StateTlInferenceCompletedTlInference: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StateTlInferenceCompletedTlInference.Raw, - Hume.expressionMeasurement.StateTlInferenceCompletedTlInference + serializers.expressionMeasurement.batch.StateTlInferenceCompletedTlInference.Raw, + Hume.expressionMeasurement.batch.StateTlInferenceCompletedTlInference > = core.serialization.object({}).extend(CompletedTlInference); export declare namespace StateTlInferenceCompletedTlInference { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceFailed.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceFailed.ts index d2958dd6..9b9528a3 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceFailed.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceFailed.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Failed } from "./Failed"; export const StateTlInferenceFailed: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StateTlInferenceFailed.Raw, - Hume.expressionMeasurement.StateTlInferenceFailed + serializers.expressionMeasurement.batch.StateTlInferenceFailed.Raw, + Hume.expressionMeasurement.batch.StateTlInferenceFailed > = core.serialization.object({}).extend(Failed); export declare namespace StateTlInferenceFailed { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceInProgress.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceInProgress.ts index de50cb00..c4c05868 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceInProgress.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceInProgress.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { InProgress } from "./InProgress"; export const StateTlInferenceInProgress: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StateTlInferenceInProgress.Raw, - Hume.expressionMeasurement.StateTlInferenceInProgress + serializers.expressionMeasurement.batch.StateTlInferenceInProgress.Raw, + Hume.expressionMeasurement.batch.StateTlInferenceInProgress > = core.serialization.object({}).extend(InProgress); export declare namespace StateTlInferenceInProgress { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceQueued.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceQueued.ts index 7cc91352..88fc418a 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceQueued.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTlInferenceQueued.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Queued } from "./Queued"; export const StateTlInferenceQueued: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StateTlInferenceQueued.Raw, - Hume.expressionMeasurement.StateTlInferenceQueued + serializers.expressionMeasurement.batch.StateTlInferenceQueued.Raw, + Hume.expressionMeasurement.batch.StateTlInferenceQueued > = core.serialization.object({}).extend(Queued); export declare namespace StateTlInferenceQueued { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTraining.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTraining.ts index 63a01675..bcdf8666 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTraining.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTraining.ts @@ -11,8 +11,8 @@ import { StateTrainingCompletedTraining } from "./StateTrainingCompletedTraining import { StateTrainingFailed } from "./StateTrainingFailed"; export const StateTraining: core.serialization.Schema< - serializers.expressionMeasurement.StateTraining.Raw, - Hume.expressionMeasurement.StateTraining + serializers.expressionMeasurement.batch.StateTraining.Raw, + Hume.expressionMeasurement.batch.StateTraining > = core.serialization .union("status", { QUEUED: StateTrainingQueued, @@ -20,7 +20,7 @@ export const StateTraining: core.serialization.Schema< COMPLETED: StateTrainingCompletedTraining, FAILED: StateTrainingFailed, }) - .transform({ + .transform({ transform: (value) => value, untransform: (value) => value, }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingCompletedTraining.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingCompletedTraining.ts index 52ac42ae..42c201a3 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingCompletedTraining.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingCompletedTraining.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { CompletedTraining } from "./CompletedTraining"; export const StateTrainingCompletedTraining: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StateTrainingCompletedTraining.Raw, - Hume.expressionMeasurement.StateTrainingCompletedTraining + serializers.expressionMeasurement.batch.StateTrainingCompletedTraining.Raw, + Hume.expressionMeasurement.batch.StateTrainingCompletedTraining > = core.serialization.object({}).extend(CompletedTraining); export declare namespace StateTrainingCompletedTraining { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingFailed.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingFailed.ts index c1d35e44..9f88b081 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingFailed.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingFailed.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Failed } from "./Failed"; export const StateTrainingFailed: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StateTrainingFailed.Raw, - Hume.expressionMeasurement.StateTrainingFailed + serializers.expressionMeasurement.batch.StateTrainingFailed.Raw, + Hume.expressionMeasurement.batch.StateTrainingFailed > = core.serialization.object({}).extend(Failed); export declare namespace StateTrainingFailed { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingInProgress.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingInProgress.ts index 3a2581cf..1af8c486 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingInProgress.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingInProgress.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { InProgress } from "./InProgress"; export const StateTrainingInProgress: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StateTrainingInProgress.Raw, - Hume.expressionMeasurement.StateTrainingInProgress + serializers.expressionMeasurement.batch.StateTrainingInProgress.Raw, + Hume.expressionMeasurement.batch.StateTrainingInProgress > = core.serialization.object({}).extend(InProgress); export declare namespace StateTrainingInProgress { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingQueued.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingQueued.ts index f7a91676..15d20e93 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingQueued.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/StateTrainingQueued.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Queued } from "./Queued"; export const StateTrainingQueued: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StateTrainingQueued.Raw, - Hume.expressionMeasurement.StateTrainingQueued + serializers.expressionMeasurement.batch.StateTrainingQueued.Raw, + Hume.expressionMeasurement.batch.StateTrainingQueued > = core.serialization.object({}).extend(Queued); export declare namespace StateTrainingQueued { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Status.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Status.ts index a9eba9c4..777f725a 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Status.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Status.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Status: core.serialization.Schema< - serializers.expressionMeasurement.Status.Raw, - Hume.expressionMeasurement.Status + serializers.expressionMeasurement.batch.Status.Raw, + Hume.expressionMeasurement.batch.Status > = core.serialization.enum_(["QUEUED", "IN_PROGRESS", "COMPLETED", "FAILED"]); export declare namespace Status { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Tag.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Tag.ts index fddf00e2..ac2935b9 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Tag.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Tag.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Tag: core.serialization.ObjectSchema< - serializers.expressionMeasurement.Tag.Raw, - Hume.expressionMeasurement.Tag + serializers.expressionMeasurement.batch.Tag.Raw, + Hume.expressionMeasurement.batch.Tag > = core.serialization.object({ key: core.serialization.string(), value: core.serialization.string(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Target.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Target.ts index 86b16adb..0125c6c0 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Target.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Target.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Target: core.serialization.Schema< - serializers.expressionMeasurement.Target.Raw, - Hume.expressionMeasurement.Target + serializers.expressionMeasurement.batch.Target.Raw, + Hume.expressionMeasurement.batch.Target > = core.serialization.undiscriminatedUnion([ core.serialization.number(), core.serialization.number(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Task.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Task.ts index 25fd7a10..cfbafbea 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Task.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Task.ts @@ -9,14 +9,14 @@ import { TaskClassification } from "./TaskClassification"; import { TaskRegression } from "./TaskRegression"; export const Task: core.serialization.Schema< - serializers.expressionMeasurement.Task.Raw, - Hume.expressionMeasurement.Task + serializers.expressionMeasurement.batch.Task.Raw, + Hume.expressionMeasurement.batch.Task > = core.serialization .union("type", { classification: TaskClassification, regression: TaskRegression, }) - .transform({ + .transform({ transform: (value) => value, untransform: (value) => value, }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/TaskClassification.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/TaskClassification.ts index d332e885..980695a7 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/TaskClassification.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/TaskClassification.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const TaskClassification: core.serialization.ObjectSchema< - serializers.expressionMeasurement.TaskClassification.Raw, - Hume.expressionMeasurement.TaskClassification + serializers.expressionMeasurement.batch.TaskClassification.Raw, + Hume.expressionMeasurement.batch.TaskClassification > = core.serialization.object({}); export declare namespace TaskClassification { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/TaskRegression.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/TaskRegression.ts index 640f7e82..fd57f5fe 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/TaskRegression.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/TaskRegression.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const TaskRegression: core.serialization.ObjectSchema< - serializers.expressionMeasurement.TaskRegression.Raw, - Hume.expressionMeasurement.TaskRegression + serializers.expressionMeasurement.batch.TaskRegression.Raw, + Hume.expressionMeasurement.batch.TaskRegression > = core.serialization.object({}); export declare namespace TaskRegression { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/TextSource.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/TextSource.ts index e18aa4d1..a879b493 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/TextSource.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/TextSource.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const TextSource: core.serialization.Schema< - serializers.expressionMeasurement.TextSource.Raw, - Hume.expressionMeasurement.TextSource + serializers.expressionMeasurement.batch.TextSource.Raw, + Hume.expressionMeasurement.batch.TextSource > = core.serialization.record(core.serialization.string(), core.serialization.unknown()); export declare namespace TextSource { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/TimeInterval.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/TimeInterval.ts index 3b7df5b0..3fb47880 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/TimeInterval.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/TimeInterval.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const TimeInterval: core.serialization.ObjectSchema< - serializers.expressionMeasurement.TimeInterval.Raw, - Hume.expressionMeasurement.TimeInterval + serializers.expressionMeasurement.batch.TimeInterval.Raw, + Hume.expressionMeasurement.batch.TimeInterval > = core.serialization.object({ begin: core.serialization.number(), end: core.serialization.number(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferenceBaseRequest.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferenceBaseRequest.ts index a258bf48..027ea8f2 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferenceBaseRequest.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferenceBaseRequest.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { CustomModel } from "./CustomModel"; export const TlInferenceBaseRequest: core.serialization.ObjectSchema< - serializers.expressionMeasurement.TlInferenceBaseRequest.Raw, - Hume.expressionMeasurement.TlInferenceBaseRequest + serializers.expressionMeasurement.batch.TlInferenceBaseRequest.Raw, + Hume.expressionMeasurement.batch.TlInferenceBaseRequest > = core.serialization.object({ customModel: core.serialization.property("custom_model", CustomModel), urls: core.serialization.list(core.serialization.string()).optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferencePrediction.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferencePrediction.ts index d3c4b288..7dd79390 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferencePrediction.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferencePrediction.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { CustomModelPrediction } from "./CustomModelPrediction"; export const TlInferencePrediction: core.serialization.ObjectSchema< - serializers.expressionMeasurement.TlInferencePrediction.Raw, - Hume.expressionMeasurement.TlInferencePrediction + serializers.expressionMeasurement.batch.TlInferencePrediction.Raw, + Hume.expressionMeasurement.batch.TlInferencePrediction > = core.serialization.object({ file: core.serialization.string(), fileType: core.serialization.property("file_type", core.serialization.string()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferenceResults.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferenceResults.ts index 8b16a702..b6a16bb1 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferenceResults.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferenceResults.ts @@ -9,8 +9,8 @@ import { TlInferencePrediction } from "./TlInferencePrediction"; import { Error_ } from "./Error_"; export const TlInferenceResults: core.serialization.ObjectSchema< - serializers.expressionMeasurement.TlInferenceResults.Raw, - Hume.expressionMeasurement.TlInferenceResults + serializers.expressionMeasurement.batch.TlInferenceResults.Raw, + Hume.expressionMeasurement.batch.TlInferenceResults > = core.serialization.object({ predictions: core.serialization.list(TlInferencePrediction), errors: core.serialization.list(Error_), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferenceSourcePredictResult.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferenceSourcePredictResult.ts index 41d7e44b..9c2792cb 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferenceSourcePredictResult.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/TlInferenceSourcePredictResult.ts @@ -9,8 +9,8 @@ import { Source } from "./Source"; import { TlInferenceResults } from "./TlInferenceResults"; export const TlInferenceSourcePredictResult: core.serialization.ObjectSchema< - serializers.expressionMeasurement.TlInferenceSourcePredictResult.Raw, - Hume.expressionMeasurement.TlInferenceSourcePredictResult + serializers.expressionMeasurement.batch.TlInferenceSourcePredictResult.Raw, + Hume.expressionMeasurement.batch.TlInferenceSourcePredictResult > = core.serialization.object({ source: Source, results: TlInferenceResults.optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/ToxicityScore.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/ToxicityScore.ts index 7dcbcf5f..84036e7b 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/ToxicityScore.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/ToxicityScore.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const ToxicityScore: core.serialization.ObjectSchema< - serializers.expressionMeasurement.ToxicityScore.Raw, - Hume.expressionMeasurement.ToxicityScore + serializers.expressionMeasurement.batch.ToxicityScore.Raw, + Hume.expressionMeasurement.batch.ToxicityScore > = core.serialization.object({ name: core.serialization.string(), score: core.serialization.number(), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/TrainingBaseRequest.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/TrainingBaseRequest.ts index 3f8cdfb2..92dfee54 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/TrainingBaseRequest.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/TrainingBaseRequest.ts @@ -12,8 +12,8 @@ import { EvaluationArgs } from "./EvaluationArgs"; import { Alternative } from "./Alternative"; export const TrainingBaseRequest: core.serialization.ObjectSchema< - serializers.expressionMeasurement.TrainingBaseRequest.Raw, - Hume.expressionMeasurement.TrainingBaseRequest + serializers.expressionMeasurement.batch.TrainingBaseRequest.Raw, + Hume.expressionMeasurement.batch.TrainingBaseRequest > = core.serialization.object({ customModel: core.serialization.property("custom_model", CustomModelRequest), dataset: Dataset, diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/TrainingCustomModel.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/TrainingCustomModel.ts index f56c018c..98b802ff 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/TrainingCustomModel.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/TrainingCustomModel.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const TrainingCustomModel: core.serialization.ObjectSchema< - serializers.expressionMeasurement.TrainingCustomModel.Raw, - Hume.expressionMeasurement.TrainingCustomModel + serializers.expressionMeasurement.batch.TrainingCustomModel.Raw, + Hume.expressionMeasurement.batch.TrainingCustomModel > = core.serialization.object({ id: core.serialization.string(), versionId: core.serialization.property("version_id", core.serialization.string().optional()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Transcription.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Transcription.ts index 7b826003..3207d4d5 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Transcription.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Transcription.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Bcp47Tag } from "./Bcp47Tag"; export const Transcription: core.serialization.ObjectSchema< - serializers.expressionMeasurement.Transcription.Raw, - Hume.expressionMeasurement.Transcription + serializers.expressionMeasurement.batch.Transcription.Raw, + Hume.expressionMeasurement.batch.Transcription > = core.serialization.object({ language: Bcp47Tag.optional(), identifySpeakers: core.serialization.property("identify_speakers", core.serialization.boolean().optional()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/TranscriptionMetadata.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/TranscriptionMetadata.ts index e59fd7f0..09df43e1 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/TranscriptionMetadata.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/TranscriptionMetadata.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Bcp47Tag } from "./Bcp47Tag"; export const TranscriptionMetadata: core.serialization.ObjectSchema< - serializers.expressionMeasurement.TranscriptionMetadata.Raw, - Hume.expressionMeasurement.TranscriptionMetadata + serializers.expressionMeasurement.batch.TranscriptionMetadata.Raw, + Hume.expressionMeasurement.batch.TranscriptionMetadata > = core.serialization.object({ confidence: core.serialization.number(), detectedLanguage: core.serialization.property("detected_language", Bcp47Tag.optional()), diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Type.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Type.ts index 3bf07324..dc528176 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Type.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Type.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Type: core.serialization.Schema< - serializers.expressionMeasurement.Type.Raw, - Hume.expressionMeasurement.Type + serializers.expressionMeasurement.batch.Type.Raw, + Hume.expressionMeasurement.batch.Type > = core.serialization.enum_(["EMBEDDING_GENERATION", "INFERENCE", "TL_INFERENCE", "TRAINING"]); export declare namespace Type { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Unconfigurable.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Unconfigurable.ts index 902485b3..0a142a5a 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Unconfigurable.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Unconfigurable.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Unconfigurable: core.serialization.Schema< - serializers.expressionMeasurement.Unconfigurable.Raw, - Hume.expressionMeasurement.Unconfigurable + serializers.expressionMeasurement.batch.Unconfigurable.Raw, + Hume.expressionMeasurement.batch.Unconfigurable > = core.serialization.record(core.serialization.string(), core.serialization.unknown()); export declare namespace Unconfigurable { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/UnionJob.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/UnionJob.ts index 188ca3e5..73b2532a 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/UnionJob.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/UnionJob.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { InferenceJob } from "./InferenceJob"; export const UnionJob: core.serialization.ObjectSchema< - serializers.expressionMeasurement.UnionJob.Raw, - Hume.expressionMeasurement.UnionJob + serializers.expressionMeasurement.batch.UnionJob.Raw, + Hume.expressionMeasurement.batch.UnionJob > = InferenceJob; export declare namespace UnionJob { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/UnionPredictResult.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/UnionPredictResult.ts index f69f34da..75442ea7 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/UnionPredictResult.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/UnionPredictResult.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { InferenceSourcePredictResult } from "./InferenceSourcePredictResult"; export const UnionPredictResult: core.serialization.ObjectSchema< - serializers.expressionMeasurement.UnionPredictResult.Raw, - Hume.expressionMeasurement.UnionPredictResult + serializers.expressionMeasurement.batch.UnionPredictResult.Raw, + Hume.expressionMeasurement.batch.UnionPredictResult > = InferenceSourcePredictResult; export declare namespace UnionPredictResult { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Url.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Url.ts index 513a7373..1d9b9825 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Url.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Url.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Url: core.serialization.ObjectSchema< - serializers.expressionMeasurement.Url.Raw, - Hume.expressionMeasurement.Url + serializers.expressionMeasurement.batch.Url.Raw, + Hume.expressionMeasurement.batch.Url > = core.serialization.object({ url: core.serialization.string(), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/ValidationArgs.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/ValidationArgs.ts index 46b1c56f..a07fc708 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/ValidationArgs.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/ValidationArgs.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Target } from "./Target"; export const ValidationArgs: core.serialization.ObjectSchema< - serializers.expressionMeasurement.ValidationArgs.Raw, - Hume.expressionMeasurement.ValidationArgs + serializers.expressionMeasurement.batch.ValidationArgs.Raw, + Hume.expressionMeasurement.batch.ValidationArgs > = core.serialization.object({ positiveLabel: core.serialization.property("positive_label", Target.optional()), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/When.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/When.ts index 70a5c615..0822776a 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/When.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/When.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const When: core.serialization.Schema< - serializers.expressionMeasurement.When.Raw, - Hume.expressionMeasurement.When + serializers.expressionMeasurement.batch.When.Raw, + Hume.expressionMeasurement.batch.When > = core.serialization.enum_(["created_before", "created_after"]); export declare namespace When { diff --git a/src/serialization/resources/expressionMeasurement/resources/batch/types/Window.ts b/src/serialization/resources/expressionMeasurement/resources/batch/types/Window.ts index afa698bc..69bb62e5 100644 --- a/src/serialization/resources/expressionMeasurement/resources/batch/types/Window.ts +++ b/src/serialization/resources/expressionMeasurement/resources/batch/types/Window.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const Window: core.serialization.ObjectSchema< - serializers.expressionMeasurement.Window.Raw, - Hume.expressionMeasurement.Window + serializers.expressionMeasurement.batch.Window.Raw, + Hume.expressionMeasurement.batch.Window > = core.serialization.object({ length: core.serialization.number().optional(), step: core.serialization.number().optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/index.ts b/src/serialization/resources/expressionMeasurement/resources/index.ts index c493c22a..99457488 100644 --- a/src/serialization/resources/expressionMeasurement/resources/index.ts +++ b/src/serialization/resources/expressionMeasurement/resources/index.ts @@ -1,4 +1,2 @@ export * as batch from "./batch"; -export * from "./batch/types"; export * as stream from "./stream"; -export * from "./stream/types"; diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/Config.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/Config.ts index bc8b7cf0..f3efda80 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/Config.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/Config.ts @@ -9,8 +9,8 @@ import { StreamModelsEndpointPayloadModelsFace } from "./StreamModelsEndpointPay import { StreamModelsEndpointPayloadModelsLanguage } from "./StreamModelsEndpointPayloadModelsLanguage"; export const Config: core.serialization.ObjectSchema< - serializers.expressionMeasurement.Config.Raw, - Hume.expressionMeasurement.Config + serializers.expressionMeasurement.stream.Config.Raw, + Hume.expressionMeasurement.stream.Config > = core.serialization.object({ burst: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), face: StreamModelsEndpointPayloadModelsFace.optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/EmotionEmbedding.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/EmotionEmbedding.ts index fffbf5eb..161f033c 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/EmotionEmbedding.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/EmotionEmbedding.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { EmotionEmbeddingItem } from "./EmotionEmbeddingItem"; export const EmotionEmbedding: core.serialization.Schema< - serializers.expressionMeasurement.EmotionEmbedding.Raw, - Hume.expressionMeasurement.EmotionEmbedding + serializers.expressionMeasurement.stream.EmotionEmbedding.Raw, + Hume.expressionMeasurement.stream.EmotionEmbedding > = core.serialization.list(EmotionEmbeddingItem); export declare namespace EmotionEmbedding { diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/EmotionEmbeddingItem.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/EmotionEmbeddingItem.ts index 2a7f4161..86237291 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/EmotionEmbeddingItem.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/EmotionEmbeddingItem.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const EmotionEmbeddingItem: core.serialization.ObjectSchema< - serializers.expressionMeasurement.EmotionEmbeddingItem.Raw, - Hume.expressionMeasurement.EmotionEmbeddingItem + serializers.expressionMeasurement.stream.EmotionEmbeddingItem.Raw, + Hume.expressionMeasurement.stream.EmotionEmbeddingItem > = core.serialization.object({ name: core.serialization.string().optional(), score: core.serialization.number().optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/JobDetails.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/JobDetails.ts index aaa613ef..95ec0a86 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/JobDetails.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/JobDetails.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const JobDetails: core.serialization.ObjectSchema< - serializers.expressionMeasurement.JobDetails.Raw, - Hume.expressionMeasurement.JobDetails + serializers.expressionMeasurement.stream.JobDetails.Raw, + Hume.expressionMeasurement.stream.JobDetails > = core.serialization.object({ jobId: core.serialization.property("job_id", core.serialization.string().optional()), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/Sentiment.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/Sentiment.ts index 4d1cd81c..2ffef065 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/Sentiment.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/Sentiment.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { SentimentItem } from "./SentimentItem"; export const Sentiment: core.serialization.Schema< - serializers.expressionMeasurement.Sentiment.Raw, - Hume.expressionMeasurement.Sentiment + serializers.expressionMeasurement.stream.Sentiment.Raw, + Hume.expressionMeasurement.stream.Sentiment > = core.serialization.list(SentimentItem); export declare namespace Sentiment { diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/SentimentItem.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/SentimentItem.ts index 3a5af8e4..3e3b4e1c 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/SentimentItem.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/SentimentItem.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const SentimentItem: core.serialization.ObjectSchema< - serializers.expressionMeasurement.SentimentItem.Raw, - Hume.expressionMeasurement.SentimentItem + serializers.expressionMeasurement.stream.SentimentItem.Raw, + Hume.expressionMeasurement.stream.SentimentItem > = core.serialization.object({ name: core.serialization.string().optional(), score: core.serialization.number().optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamBoundingBox.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamBoundingBox.ts index 74fbcad1..99bba91c 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamBoundingBox.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamBoundingBox.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const StreamBoundingBox: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamBoundingBox.Raw, - Hume.expressionMeasurement.StreamBoundingBox + serializers.expressionMeasurement.stream.StreamBoundingBox.Raw, + Hume.expressionMeasurement.stream.StreamBoundingBox > = core.serialization.object({ x: core.serialization.number().optional(), y: core.serialization.number().optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamErrorMessage.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamErrorMessage.ts index cdabadb5..23ccacac 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamErrorMessage.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamErrorMessage.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { JobDetails } from "./JobDetails"; export const StreamErrorMessage: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamErrorMessage.Raw, - Hume.expressionMeasurement.StreamErrorMessage + serializers.expressionMeasurement.stream.StreamErrorMessage.Raw, + Hume.expressionMeasurement.stream.StreamErrorMessage > = core.serialization.object({ error: core.serialization.string().optional(), code: core.serialization.string().optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurst.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurst.ts index 663678d0..29433d04 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurst.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurst.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { StreamModelPredictionsBurstPredictionsItem } from "./StreamModelPredictionsBurstPredictionsItem"; export const StreamModelPredictionsBurst: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamModelPredictionsBurst.Raw, - Hume.expressionMeasurement.StreamModelPredictionsBurst + serializers.expressionMeasurement.stream.StreamModelPredictionsBurst.Raw, + Hume.expressionMeasurement.stream.StreamModelPredictionsBurst > = core.serialization.object({ predictions: core.serialization.list(StreamModelPredictionsBurstPredictionsItem).optional(), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurstPredictionsItem.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurstPredictionsItem.ts index 60316207..fa567454 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurstPredictionsItem.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsBurstPredictionsItem.ts @@ -10,8 +10,8 @@ import { EmotionEmbedding } from "./EmotionEmbedding"; import { EmotionEmbeddingItem } from "./EmotionEmbeddingItem"; export const StreamModelPredictionsBurstPredictionsItem: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamModelPredictionsBurstPredictionsItem.Raw, - Hume.expressionMeasurement.StreamModelPredictionsBurstPredictionsItem + serializers.expressionMeasurement.stream.StreamModelPredictionsBurstPredictionsItem.Raw, + Hume.expressionMeasurement.stream.StreamModelPredictionsBurstPredictionsItem > = core.serialization.object({ time: TimeRange.optional(), emotions: EmotionEmbedding.optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFace.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFace.ts index 5db902b5..0117c5f5 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFace.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFace.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { StreamModelPredictionsFacePredictionsItem } from "./StreamModelPredictionsFacePredictionsItem"; export const StreamModelPredictionsFace: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamModelPredictionsFace.Raw, - Hume.expressionMeasurement.StreamModelPredictionsFace + serializers.expressionMeasurement.stream.StreamModelPredictionsFace.Raw, + Hume.expressionMeasurement.stream.StreamModelPredictionsFace > = core.serialization.object({ predictions: core.serialization.list(StreamModelPredictionsFacePredictionsItem).optional(), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts index 715d4ccb..2cee6816 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacePredictionsItem.ts @@ -10,8 +10,8 @@ import { EmotionEmbedding } from "./EmotionEmbedding"; import { EmotionEmbeddingItem } from "./EmotionEmbeddingItem"; export const StreamModelPredictionsFacePredictionsItem: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamModelPredictionsFacePredictionsItem.Raw, - Hume.expressionMeasurement.StreamModelPredictionsFacePredictionsItem + serializers.expressionMeasurement.stream.StreamModelPredictionsFacePredictionsItem.Raw, + Hume.expressionMeasurement.stream.StreamModelPredictionsFacePredictionsItem > = core.serialization.object({ frame: core.serialization.number().optional(), time: core.serialization.number().optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemesh.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemesh.ts index 2f3427d3..cec0622f 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemesh.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemesh.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { StreamModelPredictionsFacemeshPredictionsItem } from "./StreamModelPredictionsFacemeshPredictionsItem"; export const StreamModelPredictionsFacemesh: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamModelPredictionsFacemesh.Raw, - Hume.expressionMeasurement.StreamModelPredictionsFacemesh + serializers.expressionMeasurement.stream.StreamModelPredictionsFacemesh.Raw, + Hume.expressionMeasurement.stream.StreamModelPredictionsFacemesh > = core.serialization.object({ predictions: core.serialization.list(StreamModelPredictionsFacemeshPredictionsItem).optional(), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemeshPredictionsItem.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemeshPredictionsItem.ts index a9afbc5b..bc95b108 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemeshPredictionsItem.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsFacemeshPredictionsItem.ts @@ -9,8 +9,8 @@ import { EmotionEmbedding } from "./EmotionEmbedding"; import { EmotionEmbeddingItem } from "./EmotionEmbeddingItem"; export const StreamModelPredictionsFacemeshPredictionsItem: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamModelPredictionsFacemeshPredictionsItem.Raw, - Hume.expressionMeasurement.StreamModelPredictionsFacemeshPredictionsItem + serializers.expressionMeasurement.stream.StreamModelPredictionsFacemeshPredictionsItem.Raw, + Hume.expressionMeasurement.stream.StreamModelPredictionsFacemeshPredictionsItem > = core.serialization.object({ emotions: EmotionEmbedding.optional(), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsJobDetails.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsJobDetails.ts index 500f9b84..5e647333 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsJobDetails.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsJobDetails.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const StreamModelPredictionsJobDetails: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamModelPredictionsJobDetails.Raw, - Hume.expressionMeasurement.StreamModelPredictionsJobDetails + serializers.expressionMeasurement.stream.StreamModelPredictionsJobDetails.Raw, + Hume.expressionMeasurement.stream.StreamModelPredictionsJobDetails > = core.serialization.object({ jobId: core.serialization.property("job_id", core.serialization.string().optional()), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguage.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguage.ts index 63a44acf..9b509bb7 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguage.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguage.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { StreamModelPredictionsLanguagePredictionsItem } from "./StreamModelPredictionsLanguagePredictionsItem"; export const StreamModelPredictionsLanguage: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamModelPredictionsLanguage.Raw, - Hume.expressionMeasurement.StreamModelPredictionsLanguage + serializers.expressionMeasurement.stream.StreamModelPredictionsLanguage.Raw, + Hume.expressionMeasurement.stream.StreamModelPredictionsLanguage > = core.serialization.object({ predictions: core.serialization.list(StreamModelPredictionsLanguagePredictionsItem).optional(), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguagePredictionsItem.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguagePredictionsItem.ts index 8947cec5..4458f4fb 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguagePredictionsItem.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsLanguagePredictionsItem.ts @@ -14,8 +14,8 @@ import { SentimentItem } from "./SentimentItem"; import { ToxicityItem } from "./ToxicityItem"; export const StreamModelPredictionsLanguagePredictionsItem: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamModelPredictionsLanguagePredictionsItem.Raw, - Hume.expressionMeasurement.StreamModelPredictionsLanguagePredictionsItem + serializers.expressionMeasurement.stream.StreamModelPredictionsLanguagePredictionsItem.Raw, + Hume.expressionMeasurement.stream.StreamModelPredictionsLanguagePredictionsItem > = core.serialization.object({ text: core.serialization.string().optional(), position: TextPosition.optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsody.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsody.ts index 1fb7f8d5..3cee2f80 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsody.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsody.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { StreamModelPredictionsProsodyPredictionsItem } from "./StreamModelPredictionsProsodyPredictionsItem"; export const StreamModelPredictionsProsody: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamModelPredictionsProsody.Raw, - Hume.expressionMeasurement.StreamModelPredictionsProsody + serializers.expressionMeasurement.stream.StreamModelPredictionsProsody.Raw, + Hume.expressionMeasurement.stream.StreamModelPredictionsProsody > = core.serialization.object({ predictions: core.serialization.list(StreamModelPredictionsProsodyPredictionsItem).optional(), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsodyPredictionsItem.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsodyPredictionsItem.ts index ed976c25..69ba517f 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsodyPredictionsItem.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelPredictionsProsodyPredictionsItem.ts @@ -10,8 +10,8 @@ import { EmotionEmbedding } from "./EmotionEmbedding"; import { EmotionEmbeddingItem } from "./EmotionEmbeddingItem"; export const StreamModelPredictionsProsodyPredictionsItem: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamModelPredictionsProsodyPredictionsItem.Raw, - Hume.expressionMeasurement.StreamModelPredictionsProsodyPredictionsItem + serializers.expressionMeasurement.stream.StreamModelPredictionsProsodyPredictionsItem.Raw, + Hume.expressionMeasurement.stream.StreamModelPredictionsProsodyPredictionsItem > = core.serialization.object({ time: TimeRange.optional(), emotions: EmotionEmbedding.optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayload.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayload.ts index e489fabf..ea14ab4d 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayload.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayload.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { Config } from "./Config"; export const StreamModelsEndpointPayload: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamModelsEndpointPayload.Raw, - Hume.expressionMeasurement.StreamModelsEndpointPayload + serializers.expressionMeasurement.stream.StreamModelsEndpointPayload.Raw, + Hume.expressionMeasurement.stream.StreamModelsEndpointPayload > = core.serialization.object({ data: core.serialization.string().optional(), models: Config.optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayloadModelsFace.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayloadModelsFace.ts index fa061453..5585adaf 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayloadModelsFace.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayloadModelsFace.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const StreamModelsEndpointPayloadModelsFace: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamModelsEndpointPayloadModelsFace.Raw, - Hume.expressionMeasurement.StreamModelsEndpointPayloadModelsFace + serializers.expressionMeasurement.stream.StreamModelsEndpointPayloadModelsFace.Raw, + Hume.expressionMeasurement.stream.StreamModelsEndpointPayloadModelsFace > = core.serialization.object({ facs: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), descriptions: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayloadModelsLanguage.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayloadModelsLanguage.ts index 6b87cd51..cfa05d08 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayloadModelsLanguage.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamModelsEndpointPayloadModelsLanguage.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const StreamModelsEndpointPayloadModelsLanguage: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamModelsEndpointPayloadModelsLanguage.Raw, - Hume.expressionMeasurement.StreamModelsEndpointPayloadModelsLanguage + serializers.expressionMeasurement.stream.StreamModelsEndpointPayloadModelsLanguage.Raw, + Hume.expressionMeasurement.stream.StreamModelsEndpointPayloadModelsLanguage > = core.serialization.object({ sentiment: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), toxicity: core.serialization.record(core.serialization.string(), core.serialization.unknown()).optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamWarningMessage.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamWarningMessage.ts index 20a1b4e7..39a61ef1 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamWarningMessage.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamWarningMessage.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { StreamWarningMessageJobDetails } from "./StreamWarningMessageJobDetails"; export const StreamWarningMessage: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamWarningMessage.Raw, - Hume.expressionMeasurement.StreamWarningMessage + serializers.expressionMeasurement.stream.StreamWarningMessage.Raw, + Hume.expressionMeasurement.stream.StreamWarningMessage > = core.serialization.object({ warning: core.serialization.string().optional(), code: core.serialization.string().optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamWarningMessageJobDetails.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamWarningMessageJobDetails.ts index 793fa5a2..d5ced472 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamWarningMessageJobDetails.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/StreamWarningMessageJobDetails.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const StreamWarningMessageJobDetails: core.serialization.ObjectSchema< - serializers.expressionMeasurement.StreamWarningMessageJobDetails.Raw, - Hume.expressionMeasurement.StreamWarningMessageJobDetails + serializers.expressionMeasurement.stream.StreamWarningMessageJobDetails.Raw, + Hume.expressionMeasurement.stream.StreamWarningMessageJobDetails > = core.serialization.object({ jobId: core.serialization.property("job_id", core.serialization.string().optional()), }); diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/SubscribeEvent.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/SubscribeEvent.ts index 303a11df..2b3a5e8f 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/SubscribeEvent.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/SubscribeEvent.ts @@ -10,8 +10,8 @@ import { StreamErrorMessage } from "./StreamErrorMessage"; import { StreamWarningMessage } from "./StreamWarningMessage"; export const SubscribeEvent: core.serialization.Schema< - serializers.expressionMeasurement.SubscribeEvent.Raw, - Hume.expressionMeasurement.SubscribeEvent + serializers.expressionMeasurement.stream.SubscribeEvent.Raw, + Hume.expressionMeasurement.stream.SubscribeEvent > = core.serialization.undiscriminatedUnion([Config, StreamErrorMessage, StreamWarningMessage]); export declare namespace SubscribeEvent { diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/TextPosition.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/TextPosition.ts index e6ed020d..2cf28e6a 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/TextPosition.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/TextPosition.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const TextPosition: core.serialization.ObjectSchema< - serializers.expressionMeasurement.TextPosition.Raw, - Hume.expressionMeasurement.TextPosition + serializers.expressionMeasurement.stream.TextPosition.Raw, + Hume.expressionMeasurement.stream.TextPosition > = core.serialization.object({ begin: core.serialization.number().optional(), end: core.serialization.number().optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/TimeRange.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/TimeRange.ts index 37d6f65e..29ade483 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/TimeRange.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/TimeRange.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const TimeRange: core.serialization.ObjectSchema< - serializers.expressionMeasurement.TimeRange.Raw, - Hume.expressionMeasurement.TimeRange + serializers.expressionMeasurement.stream.TimeRange.Raw, + Hume.expressionMeasurement.stream.TimeRange > = core.serialization.object({ begin: core.serialization.number().optional(), end: core.serialization.number().optional(), diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/Toxicity.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/Toxicity.ts index f6cce707..76e50fec 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/Toxicity.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/Toxicity.ts @@ -8,8 +8,8 @@ import * as core from "../../../../../../core"; import { ToxicityItem } from "./ToxicityItem"; export const Toxicity: core.serialization.Schema< - serializers.expressionMeasurement.Toxicity.Raw, - Hume.expressionMeasurement.Toxicity + serializers.expressionMeasurement.stream.Toxicity.Raw, + Hume.expressionMeasurement.stream.Toxicity > = core.serialization.list(ToxicityItem); export declare namespace Toxicity { diff --git a/src/serialization/resources/expressionMeasurement/resources/stream/types/ToxicityItem.ts b/src/serialization/resources/expressionMeasurement/resources/stream/types/ToxicityItem.ts index c7ae93f7..8788d756 100644 --- a/src/serialization/resources/expressionMeasurement/resources/stream/types/ToxicityItem.ts +++ b/src/serialization/resources/expressionMeasurement/resources/stream/types/ToxicityItem.ts @@ -7,8 +7,8 @@ import * as Hume from "../../../../../../api/index"; import * as core from "../../../../../../core"; export const ToxicityItem: core.serialization.ObjectSchema< - serializers.expressionMeasurement.ToxicityItem.Raw, - Hume.expressionMeasurement.ToxicityItem + serializers.expressionMeasurement.stream.ToxicityItem.Raw, + Hume.expressionMeasurement.stream.ToxicityItem > = core.serialization.object({ name: core.serialization.string().optional(), score: core.serialization.number().optional(), diff --git a/src/version.ts b/src/version.ts index 13c970cd..2b00a52a 100644 --- a/src/version.ts +++ b/src/version.ts @@ -1 +1 @@ -export const SDK_VERSION = "0.8.9"; +export const SDK_VERSION = "0.8.11"; diff --git a/yarn.lock b/yarn.lock index c90bcc49..4088ab97 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1121,9 +1121,9 @@ camelcase@^6.2.0: integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== caniuse-lite@^1.0.30001646: - version "1.0.30001659" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001659.tgz#f370c311ffbc19c4965d8ec0064a3625c8aaa7af" - integrity sha512-Qxxyfv3RdHAfJcXelgf0hU4DFUVXBGTjqrBUZLUh8AtlGnsDo+CnncYtTd95+ZKfnANUOzxyIQCuU/UeBZBYoA== + version "1.0.30001660" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001660.tgz#31218de3463fabb44d0b7607b652e56edf2e2355" + integrity sha512-GacvNTTuATm26qC74pt+ad1fW15mlQ/zuTzzY1ZoIzECTP8HURDfF43kNxPgf7H1jmelCBQTTbBNxdSXOA7Bqg== chalk@^2.4.2: version "2.4.2" @@ -1334,9 +1334,9 @@ domexception@^4.0.0: webidl-conversions "^7.0.0" electron-to-chromium@^1.5.4: - version "1.5.18" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.18.tgz#5fe62b9d21efbcfa26571066502d94f3ed97e495" - integrity sha512-1OfuVACu+zKlmjsNdcJuVQuVE61sZOLbNM4JAQ1Rvh6EOj0/EUKhMJjRH73InPlXSh8HIJk1cVZ8pyOV/FMdUQ== + version "1.5.19" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.19.tgz#aeaa0a076f3f0f0e8db2c57fd10158508f00725a" + integrity sha512-kpLJJi3zxTR1U828P+LIUDZ5ohixyo68/IcYOHLqnbTPr/wdgn4i1ECvmALN9E16JPA6cvCG5UG79gVwVdEK5w== emittery@^0.13.1: version "0.13.1"