diff --git a/abacusai/__init__.py b/abacusai/__init__.py
index 0d1f2f562..e73cdecc5 100644
--- a/abacusai/__init__.py
+++ b/abacusai/__init__.py
@@ -20,6 +20,8 @@
from .app_user_group_sign_in_token import AppUserGroupSignInToken
from .application_connector import ApplicationConnector
from .audio_gen_settings import AudioGenSettings
+from .audio_url_result import AudioUrlResult
+from .audit_log import AuditLog
from .batch_prediction import BatchPrediction
from .batch_prediction_version import BatchPredictionVersion
from .batch_prediction_version_logs import BatchPredictionVersionLogs
@@ -28,24 +30,37 @@
from .chat_message import ChatMessage
from .chat_session import ChatSession
from .chatllm_computer import ChatllmComputer
+from .chatllm_computer_status import ChatllmComputerStatus
+from .chatllm_memory import ChatllmMemory
+from .chatllm_project import ChatllmProject
+from .chatllm_project_permissions import ChatllmProjectPermissions
from .chatllm_referral_invite import ChatllmReferralInvite
from .chatllm_task import ChatllmTask
-from .client import AgentResponse, ApiClient, ApiException, ClientOptions, ReadOnlyClient, _request_context
+from .client import AgentResponse, ApiClient, ApiException, ClientOptions, ReadOnlyClient, ToolResponse, _request_context
from .code_agent_response import CodeAgentResponse
+from .code_autocomplete_edit_prediction_response import CodeAutocompleteEditPredictionResponse
from .code_autocomplete_response import CodeAutocompleteResponse
from .code_bot import CodeBot
from .code_edit import CodeEdit
from .code_edit_response import CodeEditResponse
from .code_edits import CodeEdits
+from .code_embeddings import CodeEmbeddings
+from .code_llm_changed_files import CodeLlmChangedFiles
from .code_source import CodeSource
+from .code_suggestion_validation_response import CodeSuggestionValidationResponse
+from .code_summary_response import CodeSummaryResponse
+from .codellm_embedding_constants import CodellmEmbeddingConstants
from .compute_point_info import ComputePointInfo
from .concatenation_config import ConcatenationConfig
+from .constants_autocomplete_response import ConstantsAutocompleteResponse
from .cpu_gpu_memory_specs import CpuGpuMemorySpecs
from .custom_chat_instructions import CustomChatInstructions
+from .custom_domain import CustomDomain
from .custom_loss_function import CustomLossFunction
from .custom_metric import CustomMetric
from .custom_metric_version import CustomMetricVersion
from .custom_train_function_info import CustomTrainFunctionInfo
+from .daemon_task_conversation import DaemonTaskConversation
from .data_consistency_duplication import DataConsistencyDuplication
from .data_metrics import DataMetrics
from .data_prep_logs import DataPrepLogs
@@ -59,6 +74,7 @@
from .dataset_column import DatasetColumn
from .dataset_version import DatasetVersion
from .dataset_version_logs import DatasetVersionLogs
+from .default_llm import DefaultLlm
from .deployment import Deployment
from .deployment_auth_token import DeploymentAuthToken
from .deployment_conversation import DeploymentConversation
@@ -69,6 +85,9 @@
from .document_retriever import DocumentRetriever
from .document_retriever_lookup_result import DocumentRetrieverLookupResult
from .document_retriever_version import DocumentRetrieverVersion
+from .document_retriever_version_logs import DocumentRetrieverVersionLogs
+from .domain_purchase_result import DomainPurchaseResult
+from .domain_search_result import DomainSearchResult
from .drift_distribution import DriftDistribution
from .drift_distributions import DriftDistributions
from .eda import Eda
@@ -79,7 +98,9 @@
from .eda_feature_collinearity import EdaFeatureCollinearity
from .eda_forecasting_analysis import EdaForecastingAnalysis
from .eda_version import EdaVersion
+from .edit_image_models import EditImageModels
from .embedding_feature_drift_distribution import EmbeddingFeatureDriftDistribution
+from .entri_auth_token import EntriAuthToken
from .execute_feature_group_operation import ExecuteFeatureGroupOperation
from .external_application import ExternalApplication
from .external_invite import ExternalInvite
@@ -106,7 +127,6 @@
from .feature_importance import FeatureImportance
from .feature_mapping import FeatureMapping
from .feature_performance_analysis import FeaturePerformanceAnalysis
-from .feature_record import FeatureRecord
from .file_connector import FileConnector
from .file_connector_instructions import FileConnectorInstructions
from .file_connector_verification import FileConnectorVerification
@@ -114,19 +134,31 @@
from .forecasting_analysis_graph_data import ForecastingAnalysisGraphData
from .forecasting_monitor_item_analysis import ForecastingMonitorItemAnalysis
from .forecasting_monitor_summary import ForecastingMonitorSummary
+from .fs_entry import FsEntry
from .function_logs import FunctionLogs
from .generated_pit_feature_config_option import GeneratedPitFeatureConfigOption
from .graph_dashboard import GraphDashboard
from .holdout_analysis import HoldoutAnalysis
from .holdout_analysis_version import HoldoutAnalysisVersion
+from .hosted_app import HostedApp
+from .hosted_app_container import HostedAppContainer
+from .hosted_app_file_read import HostedAppFileRead
+from .hosted_artifact import HostedArtifact
+from .hosted_database import HostedDatabase
+from .hosted_database_snapshot import HostedDatabaseSnapshot
from .hosted_model_token import HostedModelToken
+from .hostname_info import HostnameInfo
from .hume_voice import HumeVoice
+from .image_gen_model import ImageGenModel
+from .image_gen_model_options import ImageGenModelOptions
from .image_gen_settings import ImageGenSettings
from .indexing_config import IndexingConfig
from .inferred_database_column_to_feature_mappings import InferredDatabaseColumnToFeatureMappings
from .inferred_feature_mappings import InferredFeatureMappings
from .item_statistics import ItemStatistics
+from .lip_sync_gen_settings import LipSyncGenSettings
from .llm_app import LlmApp
+from .llm_artifact import LlmArtifact
from .llm_code_block import LlmCodeBlock
from .llm_execution_preview import LlmExecutionPreview
from .llm_execution_result import LlmExecutionResult
@@ -134,6 +166,10 @@
from .llm_input import LlmInput
from .llm_parameters import LlmParameters
from .llm_response import LlmResponse
+from .mcp_config import McpConfig
+from .mcp_server import McpServer
+from .mcp_server_connection import McpServerConnection
+from .mcp_server_query_result import McpServerQueryResult
from .memory_options import MemoryOptions
from .messaging_connector_response import MessagingConnectorResponse
from .model import Model
@@ -190,6 +226,8 @@
from .prediction_log_record import PredictionLogRecord
from .prediction_operator import PredictionOperator
from .prediction_operator_version import PredictionOperatorVersion
+from .presentation_export_result import PresentationExportResult
+from .private_web_app_deployment import PrivateWebAppDeployment
from .problem_type import ProblemType
from .project import Project
from .project_config import ProjectConfig
@@ -205,35 +243,53 @@
from .refresh_policy import RefreshPolicy
from .refresh_schedule import RefreshSchedule
from .regenerate_llm_external_application import RegenerateLlmExternalApplication
+from .regenerate_llm_option import RegenerateLlmOption
from .resolved_feature_group_template import ResolvedFeatureGroupTemplate
from .routing_action import RoutingAction
from .schema import Schema
+from .session_summary import SessionSummary
+from .session_transcript import SessionTranscript
+from .session_transcripts import SessionTranscripts
from .sftp_key import SftpKey
from .streaming_auth_token import StreamingAuthToken
from .streaming_client import StreamingClient
from .streaming_connector import StreamingConnector
from .streaming_row_count import StreamingRowCount
from .streaming_sample_code import StreamingSampleCode
+from .sts_gen_settings import StsGenSettings
+from .stt_gen_model import SttGenModel
+from .stt_gen_model_options import SttGenModelOptions
+from .stt_gen_settings import SttGenSettings
from .template_node_details import TemplateNodeDetails
from .test_point_predictions import TestPointPredictions
from .tone_details import ToneDetails
from .training_config_options import TrainingConfigOptions
+from .tts_gen_settings import TtsGenSettings
from .twitter_search_result import TwitterSearchResult
+from .unified_connector import UnifiedConnector
from .upload import Upload
from .upload_part import UploadPart
from .use_case import UseCase
from .use_case_requirements import UseCaseRequirements
from .user import User
from .user_exception import UserException
+from .user_group_object_permission import UserGroupObjectPermission
+from .video_gen_costs import VideoGenCosts
+from .video_gen_model import VideoGenModel
+from .video_gen_model_options import VideoGenModelOptions
from .video_gen_settings import VideoGenSettings
from .video_search_result import VideoSearchResult
from .voice_gen_details import VoiceGenDetails
+from .web_app_conversation import WebAppConversation
+from .web_app_deployment_permission_dict import WebAppDeploymentPermissionDict
+from .web_app_domain import WebAppDomain
from .web_page_response import WebPageResponse
from .web_search_response import WebSearchResponse
from .web_search_result import WebSearchResult
+from .web_service_trigger_run import WebServiceTriggerRun
from .webhook import Webhook
from .workflow_graph_node_details import WorkflowGraphNodeDetails
from .workflow_node_template import WorkflowNodeTemplate
-__version__ = "1.4.31"
+__version__ = "1.4.76"
diff --git a/abacusai/api_class/ai_agents.py b/abacusai/api_class/ai_agents.py
index 1f0da2ec4..e28a052b0 100644
--- a/abacusai/api_class/ai_agents.py
+++ b/abacusai/api_class/ai_agents.py
@@ -1,16 +1,24 @@
import ast
import dataclasses
+import uuid
from typing import Dict, List, Tuple, Union
from . import enums
from .abstract import ApiClass, get_clean_function_source_code_for_agent, validate_constructor_arg_types
-def validate_input_dict_param(dict_object, friendly_class_name, must_contain=[]):
+MIN_AGENT_SLEEP_TIME = 300
+
+
+def validate_input_dict_param(dict_object, friendly_class_name, must_contain=[], schema={}):
if not isinstance(dict_object, dict):
raise ValueError(friendly_class_name, 'Invalid argument. Provided argument should be a dictionary.')
if any(field not in dict_object for field in must_contain):
raise ValueError(friendly_class_name, f'One or more keys are missing in the argument provided. Must contain keys - {must_contain}.')
+ for key, value_type in schema.items():
+ value = dict_object.get(key)
+ if value and not isinstance(value, value_type):
+ raise ValueError(friendly_class_name, f'Invalid parameter "{key}". It should be of type {value_type.__name__}.')
@dataclasses.dataclass
@@ -64,6 +72,7 @@ class WorkflowNodeInputMapping(ApiClass):
Set to `None` if the type is `USER_INPUT` and the variable doesn't need a pre-filled initial value.
is_required (bool): Indicates whether the input is required. Defaults to True.
description (str): The description of this input.
+ long_description (str): The detailed description of this input.
constant_value (str): The constant value of this input if variable type is CONSTANT. Only applicable for template nodes.
"""
name: str
@@ -72,6 +81,7 @@ class WorkflowNodeInputMapping(ApiClass):
source_prop: str = dataclasses.field(default=None)
is_required: bool = dataclasses.field(default=True)
description: str = dataclasses.field(default=None)
+ long_description: str = dataclasses.field(default=None)
constant_value: str = dataclasses.field(default=None)
def __post_init__(self):
@@ -98,6 +108,7 @@ def to_dict(self):
'source_prop': self.source_prop or self.name,
'is_required': self.is_required,
'description': self.description,
+ 'long_description': self.long_description,
'constant_value': self.constant_value
}
@@ -115,6 +126,7 @@ def from_dict(cls, mapping: dict):
source_prop=mapping.get('source_prop') or mapping['name'] if mapping.get('variable_source') else None,
is_required=mapping.get('is_required', True),
description=mapping.get('description'),
+ long_description=mapping.get('long_description'),
constant_value=mapping.get('constant_value')
)
@@ -197,12 +209,40 @@ def from_input_mappings(cls, input_mappings: List[WorkflowNodeInputMapping]):
json_schema = {
'type': 'object',
'required': [input_mapping.name for input_mapping in user_input_mappings if input_mapping.is_required],
- 'properties': {input_mapping.name: {'title': input_mapping.name, 'type': 'string'} for input_mapping in user_input_mappings}
+ 'properties': {input_mapping.name: {'title': input_mapping.name, 'type': 'string', **({'description': input_mapping.description} if input_mapping.description else {})} for input_mapping in user_input_mappings}
}
return cls(json_schema=json_schema)
else:
return cls(json_schema={})
+ @classmethod
+ def from_tool_variable_mappings(cls, tool_variable_mappings: dict):
+ """
+ Creates a WorkflowNodeInputSchema for the given tool variable mappings.
+
+ Args:
+ tool_variable_mappings (List[dict]): The tool variable mappings for the node.
+ """
+ json_schema = {'type': 'object', 'properties': {}, 'required': []}
+ ui_schema = {}
+ for mapping in tool_variable_mappings:
+ json_schema['properties'][mapping['name']] = {'title': mapping['name'], 'type': enums.PythonFunctionArgumentType.to_json_type(mapping['variable_type'])}
+ if mapping.get('description'):
+ json_schema['properties'][mapping['name']]['description'] = mapping['description']
+ if mapping['variable_type'] == enums.PythonFunctionArgumentType.ATTACHMENT:
+ json_schema['properties'][mapping['name']]['format'] = 'data-url'
+ ui_schema[mapping['name']] = {'ui:widget': 'file'}
+ if mapping['variable_type'] == enums.PythonFunctionArgumentType.LIST:
+ if mapping['item_type'] == enums.PythonFunctionArgumentType.ATTACHMENT:
+ json_schema['properties'][mapping['name']]['type'] = 'string'
+ json_schema['properties'][mapping['name']]['format'] = 'data-url'
+ ui_schema[mapping['name']] = {'ui:widget': 'file', 'ui:options': {'multiple': True}}
+ else:
+ json_schema['properties'][mapping['name']]['items'] = {'type': enums.PythonFunctionArgumentType.to_json_type(mapping['item_type'])}
+ if mapping['is_required']:
+ json_schema['required'].append(mapping['name'])
+ return cls(json_schema=json_schema, ui_schema=ui_schema)
+
@validate_constructor_arg_types('output_mapping')
@dataclasses.dataclass
@@ -214,10 +254,12 @@ class WorkflowNodeOutputMapping(ApiClass):
name (str): The name of the output.
variable_type (Union[WorkflowNodeOutputType, str]): The type of the output in the form of an enum or a string.
description (str): The description of this output.
+ long_description (str): The detailed description of this output.
"""
name: str
variable_type: Union[enums.WorkflowNodeOutputType, str] = dataclasses.field(default=enums.WorkflowNodeOutputType.ANY)
description: str = dataclasses.field(default=None)
+ long_description: str = dataclasses.field(default=None)
def __post_init__(self):
if isinstance(self.variable_type, str):
@@ -227,7 +269,8 @@ def to_dict(self):
return {
'name': self.name,
'variable_type': self.variable_type.value,
- 'description': self.description
+ 'description': self.description,
+ 'long_description': self.long_description
}
@classmethod
@@ -241,7 +284,8 @@ def from_dict(cls, mapping: dict):
return cls(
name=mapping['name'],
variable_type=enums.WorkflowNodeOutputType(variable_type),
- description=mapping.get('description')
+ description=mapping.get('description'),
+ long_description=mapping.get('long_description')
)
@@ -302,10 +346,11 @@ class WorkflowGraphNode(ApiClass):
Args:
name (str): A unique name for the workflow node.
input_mappings (List[WorkflowNodeInputMapping]): List of input mappings for the node. Each arg/kwarg of the node function should have a corresponding input mapping.
- output_mappings (List[WorkflowNodeOutputMapping]): List of output mappings for the node. Each field in the returned dict/AgentResponse must have a corresponding output mapping.
+ output_mappings (List[str]): List of outputs for the node. Each field in the returned dict/AgentResponse must have a corresponding output in the list.
function (callable): The callable node function reference.
- input_schema (WorkflowNodeInputSchema): The react json schema for the user input variables.
- output_schema (WorkflowNodeOutputSchema): The react json schema for the output to be shown on UI.
+ input_schema (WorkflowNodeInputSchema): The react json schema for the user input variables. This should be empty for CHAT interface.
+ output_schema (List[str]): The list of outputs to be shown on UI. Each output corresponds to a field in the output mappings of the node.
+ description (str): A description of the workflow node.
Additional Attributes:
function_name (str): The name of the function.
@@ -313,9 +358,20 @@ class WorkflowGraphNode(ApiClass):
trigger_config (TriggerConfig): The configuration for a trigger workflow node.
"""
- def __init__(self, name: str, function: callable = None, input_mappings: Union[Dict[str, WorkflowNodeInputMapping], List[WorkflowNodeInputMapping]] = None, output_mappings: Union[List[str], Dict[str, str], List[WorkflowNodeOutputMapping]] = None, function_name: str = None, source_code: str = None, input_schema: Union[List[str], WorkflowNodeInputSchema] = None, output_schema: Union[List[str], WorkflowNodeOutputSchema] = None, template_metadata: dict = None, trigger_config: TriggerConfig = None):
+ @classmethod
+ def is_special_node_type(cls) -> bool:
+ """Returns True if this node type skips source code validation.
+
+ Special node types (like InputNode and LLMAgentNode) are executed
+ directly without source code validation.
+ """
+ return False
+
+ def __init__(self, name: str, function: callable = None, input_mappings: Union[Dict[str, WorkflowNodeInputMapping], List[WorkflowNodeInputMapping]] = None, output_mappings: Union[List[str], Dict[str, str], List[WorkflowNodeOutputMapping]] = None, function_name: str = None, source_code: str = None, input_schema: Union[List[str], WorkflowNodeInputSchema] = None, output_schema: Union[List[str], WorkflowNodeOutputSchema] = None, template_metadata: dict = None, trigger_config: TriggerConfig = None, description: str = None):
self.template_metadata = template_metadata
self.trigger_config = trigger_config
+ self.node_type = 'workflow_node'
+ self.description = description
if self.template_metadata and not self.template_metadata.get('initialized'):
self.name = name
self.function_name = None
@@ -325,49 +381,53 @@ def __init__(self, name: str, function: callable = None, input_mappings: Union[D
self.input_schema = input_schema
self.output_schema = output_schema
else:
+ is_agentic_loop_tool = self.template_metadata and self.template_metadata.get('is_agentic_loop_tool', False)
+ is_special_node_type = self.is_special_node_type()
if function:
self.function = function
self.function_name = function.__name__
self.source_code = get_clean_function_source_code_for_agent(function)
- elif function_name and source_code:
+ elif function_name and (is_agentic_loop_tool or is_special_node_type or source_code):
self.function_name = function_name
- self.source_code = source_code
+ self.source_code = source_code if not (is_agentic_loop_tool or is_special_node_type) else None
else:
raise ValueError('workflow_graph_node', 'Either function or function_name and source_code must be provided.')
self.name = name
- try:
- tree = ast.parse(self.source_code)
- except SyntaxError as e:
- raise ValueError(f'"{name}" source code', f'SyntaxError: "{e}"')
- arg_defaults = {}
- function_found = False
- for node in ast.iter_child_nodes(tree):
- if isinstance(node, ast.FunctionDef) and node.name == self.function_name:
- function_found = True
- input_arguments = [arg.arg for arg in node.args.args]
- defaults = [None] * (len(input_arguments) - len(node.args.defaults)) + node.args.defaults
- arg_defaults = dict(zip(input_arguments, defaults))
- if not function_found:
- raise ValueError(f'"{name}" source code', f'Function "{self.function_name}" not found in the provided source code.')
+ if not (is_agentic_loop_tool or is_special_node_type):
+ try:
+ tree = ast.parse(self.source_code)
+ except SyntaxError as e:
+ raise ValueError(f'"{name}" source code', f'SyntaxError: "{e}"')
+ arg_defaults = {}
+ function_found = False
+ for node in ast.iter_child_nodes(tree):
+ if isinstance(node, ast.FunctionDef) and node.name == self.function_name:
+ function_found = True
+ input_arguments = [arg.arg for arg in node.args.args]
+ defaults = [None] * (len(input_arguments) - len(node.args.defaults)) + node.args.defaults
+ arg_defaults = dict(zip(input_arguments, defaults))
+ if not function_found:
+ raise ValueError(f'"{name}" source code', f'Function "{self.function_name}" not found in the provided source code.')
is_shortform_input_mappings = False
if input_mappings is None:
input_mappings = {}
if isinstance(input_mappings, List) and all(isinstance(input, WorkflowNodeInputMapping) for input in input_mappings):
self.input_mappings = input_mappings
- input_mapping_args = [input.name for input in input_mappings]
- for input_name in input_mapping_args:
- if input_name not in arg_defaults:
- raise ValueError('workflow_graph_node', f'Invalid input mapping. Argument "{input_name}" not found in function "{self.function_name}".')
- for arg, default in arg_defaults.items():
- if arg not in input_mapping_args:
- self.input_mappings.append(WorkflowNodeInputMapping(name=arg, variable_type=enums.WorkflowNodeInputType.USER_INPUT, is_required=default is None))
+ if not (is_agentic_loop_tool or is_special_node_type):
+ input_mapping_args = [input.name for input in input_mappings]
+ for input_name in input_mapping_args:
+ if input_name not in arg_defaults:
+ raise ValueError('workflow_graph_node', f'Invalid input mapping. Argument "{input_name}" not found in function "{self.function_name}".')
+ for arg, default in arg_defaults.items():
+ if arg not in input_mapping_args:
+ self.input_mappings.append(WorkflowNodeInputMapping(name=arg, variable_type=enums.WorkflowNodeInputType.USER_INPUT, is_required=default is None))
elif isinstance(input_mappings, Dict) and all(isinstance(key, str) and isinstance(value, (WorkflowNodeInputMapping, WorkflowGraphNode)) for key, value in input_mappings.items()):
is_shortform_input_mappings = True
self.input_mappings = [WorkflowNodeInputMapping(name=arg, variable_type=enums.WorkflowNodeInputType.USER_INPUT, is_required=default is None) for arg, default in arg_defaults.items() if arg not in input_mappings]
for key, value in input_mappings.items():
- if key not in arg_defaults:
+ if not (is_agentic_loop_tool or is_special_node_type) and key not in arg_defaults:
raise ValueError('workflow_graph_node', f'Invalid input mapping. Argument "{key}" not found in function "{self.function_name}".')
if isinstance(value, WorkflowGraphNode):
self.input_mappings.append(WorkflowNodeInputMapping(name=key, variable_type=enums.WorkflowNodeInputType.WORKFLOW_VARIABLE, variable_source=value.name, source_prop=key, is_required=arg_defaults.get(key) is None))
@@ -417,7 +477,7 @@ def __init__(self, name: str, function: callable = None, input_mappings: Union[D
raise ValueError('workflow_graph_node', 'Invalid output schema. Must be a WorkflowNodeOutputSchema or a list of output section names.')
@classmethod
- def _raw_init(cls, name: str, input_mappings: List[WorkflowNodeInputMapping] = None, output_mappings: List[WorkflowNodeOutputMapping] = None, function: callable = None, function_name: str = None, source_code: str = None, input_schema: WorkflowNodeInputSchema = None, output_schema: WorkflowNodeOutputSchema = None, template_metadata: dict = None, trigger_config: TriggerConfig = None):
+ def _raw_init(cls, name: str, input_mappings: List[WorkflowNodeInputMapping] = None, output_mappings: List[WorkflowNodeOutputMapping] = None, function: callable = None, function_name: str = None, source_code: str = None, input_schema: WorkflowNodeInputSchema = None, output_schema: WorkflowNodeOutputSchema = None, template_metadata: dict = None, trigger_config: TriggerConfig = None, description: str = None):
workflow_node = cls.__new__(cls, name, input_mappings, output_mappings, input_schema, output_schema, template_metadata, trigger_config)
workflow_node.name = name
if function:
@@ -438,6 +498,7 @@ def _raw_init(cls, name: str, input_mappings: List[WorkflowNodeInputMapping] = N
workflow_node.output_schema = output_schema
workflow_node.template_metadata = template_metadata
workflow_node.trigger_config = trigger_config
+ workflow_node.description = description
return workflow_node
@classmethod
@@ -447,7 +508,7 @@ def from_template(cls, template_name: str, name: str, configs: dict = None, inpu
if isinstance(input_mappings, List) and all(isinstance(input, WorkflowNodeInputMapping) for input in input_mappings):
instance_input_mappings = input_mappings
elif isinstance(input_mappings, Dict) and all(isinstance(key, str) and isinstance(value, WorkflowNodeInputMapping) for key, value in input_mappings.items()):
- instance_input_mappings = [WorkflowNodeInputMapping(name=arg, variable_type=mapping.variable_type, variable_source=mapping.variable_source, source_prop=mapping.source_prop, is_required=mapping.is_required, description=mapping.description) for arg, mapping in input_mappings]
+ instance_input_mappings = [WorkflowNodeInputMapping(name=arg, variable_type=mapping.variable_type, variable_source=mapping.variable_source, source_prop=mapping.source_prop, is_required=mapping.is_required, description=mapping.description, long_description=mapping.long_description) for arg, mapping in input_mappings]
elif input_mappings is None:
instance_input_mappings = []
else:
@@ -473,6 +534,14 @@ def from_template(cls, template_name: str, name: str, configs: dict = None, inpu
else:
raise ValueError('workflow_graph_node', 'Invalid output schema. Must be a WorkflowNodeOutputSchema or a list of output section names.')
+ if sleep_time is not None:
+ if isinstance(sleep_time, str) and sleep_time.isdigit():
+ sleep_time = int(sleep_time)
+ if not isinstance(sleep_time, int) or sleep_time < 0:
+ raise ValueError('workflow_graph_node', 'Invalid sleep time. Must be a non-negative integer.')
+ if sleep_time < MIN_AGENT_SLEEP_TIME:
+ raise ValueError('workflow_graph_node', f'Sleep time for an autonomous agent cannot be less than {MIN_AGENT_SLEEP_TIME} seconds.')
+
return cls(
name=name,
input_mappings=instance_input_mappings,
@@ -486,6 +555,49 @@ def from_template(cls, template_name: str, name: str, configs: dict = None, inpu
}
)
+ @classmethod
+ def from_tool(cls, tool_name: str, name: str, configs: dict = None, input_mappings: Union[Dict[str, WorkflowNodeInputMapping], List[WorkflowNodeInputMapping]] = None, input_schema: Union[List[str], WorkflowNodeInputSchema] = None, output_schema: Union[List[str], WorkflowNodeOutputSchema] = None):
+ """
+ Creates and returns a WorkflowGraphNode based on an available user created tool.
+ Note: DO NOT specify the output mapping for the tool; it will be inferred automatically. Doing so will raise an error.
+
+ Args:
+ tool_name: The name of the tool. There should already be a tool created in the platform with tool_name.
+ name: The name to assign to the WorkflowGraphNode instance.
+ configs (optional): The configuration state of the tool to use (if necessary). If not specified, will use the tool's default configuration.
+ input_mappings (optional): The WorkflowNodeInputMappings for this node.
+ input_schema (optional): The WorkflowNodeInputSchema for this node.
+ output_schema (optional): The WorkflowNodeOutputSchema for this node.
+ """
+ node = cls.from_template(
+ template_name=tool_name,
+ name=name,
+ configs=configs,
+ input_mappings=input_mappings,
+ input_schema=input_schema,
+ output_schema=output_schema
+ )
+ node.template_metadata['template_type'] = 'tool'
+ return node
+
+ @classmethod
+ def from_system_tool(cls, tool_name: str, name: str, configs: dict = None, input_mappings: Union[Dict[str, WorkflowNodeInputMapping], List[WorkflowNodeInputMapping]] = None, input_schema: Union[List[str], WorkflowNodeInputSchema] = None, output_schema: Union[List[str], WorkflowNodeOutputSchema] = None):
+ """
+ Creates and returns a WorkflowGraphNode based on the name of an available system tool.
+ Note: DO NOT specify the output mapping for the tool; it will be inferred automatically. Doing so will raise an error.
+
+ Args:
+ tool_name: The name of the tool. There should already be a tool created in the platform with tool_name.
+ name: The name to assign to the WorkflowGraphNode instance.
+ configs (optional): The configuration state of the tool to use (if necessary). If not specified, will use the tool's default configuration.
+ input_mappings (optional): The WorkflowNodeInputMappings for this node.
+ input_schema (optional): The WorkflowNodeInputSchema for this node.
+ output_schema (optional): The WorkflowNodeOutputSchema for this node.
+ """
+ node = cls.from_tool(tool_name, name, configs, input_mappings, input_schema, output_schema)
+ node.template_metadata['is_system_tool'] = True
+ return node
+
def to_dict(self):
return {
'name': self.name,
@@ -496,7 +608,9 @@ def to_dict(self):
'input_schema': self.input_schema.to_dict(),
'output_schema': self.output_schema.to_dict(),
'template_metadata': self.template_metadata,
- 'trigger_config': self.trigger_config.to_dict() if self.trigger_config else None
+ 'trigger_config': self.trigger_config.to_dict() if self.trigger_config else None,
+ 'node_type': self.node_type,
+ 'description': self.description,
}
def is_template_node(self):
@@ -525,7 +639,8 @@ def from_dict(cls, node: dict):
input_schema=WorkflowNodeInputSchema.from_dict(node.get('input_schema', {})),
output_schema=WorkflowNodeOutputSchema.from_dict(node.get('output_schema', {})),
template_metadata=node.get('template_metadata'),
- trigger_config=TriggerConfig.from_dict(node.get('trigger_config')) if node.get('trigger_config') else None
+ trigger_config=TriggerConfig.from_dict(node.get('trigger_config')) if node.get('trigger_config') else None,
+ description=node.get('description')
)
return instance
@@ -552,16 +667,335 @@ def __init__(self, node: 'WorkflowGraphNode'):
self.node = node
def __getattr__(self, name):
+ if self.node.is_template_node():
+ return WorkflowNodeInputMapping(name, enums.WorkflowNodeInputType.WORKFLOW_VARIABLE, variable_source=self.node.name, source_prop=name)
for mapping in self.node.output_mappings:
if mapping.name == name:
return WorkflowNodeInputMapping(name, enums.WorkflowNodeInputType.WORKFLOW_VARIABLE, variable_source=self.node.name, source_prop=name)
- raise AttributeError(f'Output mapping "{name}" not found in node "{self.name}".')
+ raise AttributeError(f'Output mapping "{name}" not found in node "{self.node.name}".')
@property
def outputs(self):
return self.Outputs(self)
+@validate_constructor_arg_types('decision_node')
+@dataclasses.dataclass
+class DecisionNode(WorkflowGraphNode):
+ """
+ Represents a decision node in an Agent workflow graph. It is connected between two workflow nodes and is used to determine if subsequent nodes should be executed.
+ """
+
+ def __init__(self, name: str, condition: str, input_mappings: Union[Dict[str, WorkflowNodeInputMapping], List[WorkflowNodeInputMapping]], description: str = None):
+ self.node_type = 'decision_node'
+ self.name = name
+ self.source_code = condition
+ self.description = description
+ if isinstance(input_mappings, List) and all(isinstance(input, WorkflowNodeInputMapping) for input in input_mappings):
+ self.input_mappings = input_mappings
+ elif isinstance(input_mappings, Dict) and all(isinstance(key, str) and isinstance(value, (WorkflowNodeInputMapping, WorkflowGraphNode)) for key, value in input_mappings.items()):
+ self.input_mappings = []
+ for key, value in input_mappings.items():
+ variable_source = value.name if isinstance(value, WorkflowGraphNode) else value.variable_source
+ source_prop = key if isinstance(value, WorkflowGraphNode) else value.source_prop
+ self.input_mappings.append(WorkflowNodeInputMapping(name=key, variable_type=value.variable_type, variable_source=variable_source, source_prop=source_prop, is_required=value.is_required))
+ else:
+ raise ValueError('workflow_graph_decision_node', 'Invalid input mappings. Must be a list of WorkflowNodeInputMapping or a dictionary of input mappings in the form {arg_name: node_name.outputs.prop_name}.')
+ for mapping in self.input_mappings:
+ if mapping.variable_type != enums.WorkflowNodeInputType.WORKFLOW_VARIABLE:
+ raise ValueError('workflow_graph_decision_node', 'Invalid input mappings. Decision node input mappings must be of type WORKFLOW_VARIABLE.')
+ if not mapping.is_required:
+ raise ValueError('workflow_graph_decision_node', 'Invalid input mappings. Decision node input mappings must be required.')
+ self.output_mappings = [WorkflowNodeOutputMapping(name=input_mapping.name) for input_mapping in self.input_mappings]
+ self.template_metadata = None
+ self.trigger_config = None
+ self.input_schema = None
+ self.output_schema = None
+ self.function_name = None
+
+ def to_dict(self):
+ return {
+ 'name': self.name,
+ 'source_code': self.source_code,
+ 'input_mappings': [mapping.to_dict() for mapping in self.input_mappings],
+ 'output_mappings': [mapping.to_dict() for mapping in self.output_mappings],
+ 'node_type': self.node_type,
+ 'description': self.description,
+ }
+
+ @classmethod
+ def from_dict(cls, node: dict):
+ return cls(
+ name=node['name'],
+ condition=node['source_code'],
+ input_mappings=[WorkflowNodeInputMapping.from_dict(mapping) for mapping in node.get('input_mappings', [])],
+ description=node.get('description')
+ )
+
+
+@validate_constructor_arg_types('llm_agent_node')
+@dataclasses.dataclass
+class LLMAgentNode(WorkflowGraphNode):
+ """
+ Represents an LLM agent node in an Agent workflow graph. The LLM Agent Node can be initialized using either chatbot_deployment_id or creation_parameters.
+
+ Args:
+ name (str): A unique name for the LLM agent node.
+ chatbot_deployment_id (str): The deployment ID of the chatbot to use for this node. If not provided, a new chatbot will be created.
+ chatbot_parameters (dict): Parameters for configuring the chatbot, such as data_feature_group_ids and document_retrievers.
+ uid (str): A unique identifier for the LLM agent node. If not provided, a unique ID will be auto-generated.
+ """
+ name: str
+ chatbot_deployment_id: str = dataclasses.field(default=None)
+ chatbot_parameters: dict = dataclasses.field(default_factory=dict)
+ uid: str = dataclasses.field(default=None)
+
+ @classmethod
+ def is_special_node_type(cls) -> bool:
+ """LLM agent nodes skip source code validation."""
+ return True
+
+ def __init__(self, name: str, chatbot_deployment_id: str = None, chatbot_parameters: dict = None, input_schema: WorkflowNodeInputSchema = None, output_schema: WorkflowNodeOutputSchema = None):
+ # Set LLM-specific properties - uid is auto-generated internally
+ self.uid = str(uuid.uuid4())[:6]
+ self.chatbot_deployment_id = chatbot_deployment_id
+ self.chatbot_parameters = chatbot_parameters or {}
+
+ # Prepare input and output mappings
+ input_mappings = [
+ WorkflowNodeInputMapping(name='user_input', variable_type=enums.WorkflowNodeInputType.USER_INPUT, is_required=True)
+ ]
+ output_mappings = [WorkflowNodeOutputMapping(name='chat_output', variable_type=enums.WorkflowNodeOutputType.DICT, description='The output of the chatbot')]
+
+ # Determine function_name and create minimal source_code for parent validation
+ # Always use get_function_name() which includes uid to ensure uniqueness
+ function_name = self.get_function_name()
+ minimal_source_code = f'def {function_name}(user_input):\n pass'
+
+ # Generate input schema from input mappings if not provided
+ if input_schema is None:
+ input_schema = WorkflowNodeInputSchema.from_input_mappings(input_mappings)
+
+ # Initialize parent class with minimal source code to satisfy validation
+ super().__init__(name=name, function_name=function_name, source_code=minimal_source_code, input_mappings=input_mappings, output_mappings=output_mappings, input_schema=input_schema, output_schema=output_schema or WorkflowNodeOutputSchema.from_fields_list(['chat_output']))
+ self.node_type = 'llm_agent_node'
+ self.source_code = None
+
+ def get_source_code(self):
+ return None
+
+ def get_function_name(self):
+ return f'llm_agent_node_{self.uid}'
+
+ def to_dict(self):
+ # Reuse parent's to_dict and add LLM-specific fields
+ result = super().to_dict()
+ result.update({
+ 'chatbot_deployment_id': self.chatbot_deployment_id,
+ 'chatbot_parameters': self.chatbot_parameters,
+ 'uid': self.uid,
+ })
+ return result
+
+ @classmethod
+ def from_dict(cls, node: dict):
+ instance = cls(
+ name=node['name'],
+ chatbot_deployment_id=node['chatbot_deployment_id'],
+ chatbot_parameters=node['chatbot_parameters'],
+ input_schema=WorkflowNodeInputSchema.from_dict(node.get('input_schema', {})),
+ output_schema=WorkflowNodeOutputSchema.from_dict(node.get('output_schema', {})),
+ )
+ # Preserve uid from dict if it exists (for round-trip serialization of existing workflows)
+ if 'uid' in node and node.get('uid'):
+ instance.uid = node['uid']
+ # Preserve input_mappings and output_mappings from the dict if they exist
+ if 'input_mappings' in node:
+ instance.input_mappings = [WorkflowNodeInputMapping.from_dict(mapping) for mapping in node['input_mappings']]
+ if 'output_mappings' in node:
+ instance.output_mappings = [WorkflowNodeOutputMapping.from_dict(mapping) for mapping in node['output_mappings']]
+ # Preserve function_name and source_code from the dict if they exist
+ if 'function_name' in node:
+ instance.function_name = node['function_name']
+ if 'source_code' in node:
+ instance.source_code = node['source_code']
+ # Preserve template_metadata and trigger_config from the dict if they exist
+ if 'template_metadata' in node:
+ instance.template_metadata = node['template_metadata']
+ if 'trigger_config' in node:
+ instance.trigger_config = TriggerConfig.from_dict(node['trigger_config']) if node['trigger_config'] else None
+ # Note: execution_context is intentionally not preserved on SDK objects - it's an internal runtime detail
+ # that should only exist in dict representations
+ return instance
+
+
+@validate_constructor_arg_types('input_node')
+@dataclasses.dataclass
+class InputNode(WorkflowGraphNode):
+ """Represents an input node in an agent workflow graph.
+
+ Accepts multiple user inputs (text and file) and processes them into a single combined text output.
+ File inputs are processed using OCR/document extraction when applicable.
+
+ Args:
+ name (str): The name of the input node.
+ input_mappings (Union[Dict[str, WorkflowNodeInputMapping], List[WorkflowNodeInputMapping]]): The input mappings for the input node. If not provided, defaults to a text_input and file_input.
+ output_mappings (Union[List[str], Dict[str, str], List[WorkflowNodeOutputMapping]]): The output mappings for the input node. If not provided, defaults to a single 'answer' output.
+ input_schema (WorkflowNodeInputSchema): The input schema for the input node. If not provided, will be generated from input_mappings.
+ output_schema (WorkflowNodeOutputSchema): The output schema for the input node. If not provided, will be generated from output_mappings.
+ description (str): The description of the input node.
+ uid (str): A unique identifier for the input node. Auto-generated if not provided.
+ """
+ name: str
+ input_mappings: Union[Dict[str, WorkflowNodeInputMapping], List[WorkflowNodeInputMapping]] = dataclasses.field(default=None)
+ output_mappings: Union[List[str], Dict[str, str], List[WorkflowNodeOutputMapping]] = dataclasses.field(default=None)
+ input_schema: WorkflowNodeInputSchema = dataclasses.field(default=None)
+ output_schema: WorkflowNodeOutputSchema = dataclasses.field(default=None)
+ description: str = dataclasses.field(default=None)
+ uid: str = dataclasses.field(default=None, init=False)
+
+ @classmethod
+ def is_special_node_type(cls) -> bool:
+ """Input nodes skip source code validation."""
+ return True
+
+ @staticmethod
+ def _mark_file_inputs_in_schema(input_schema: WorkflowNodeInputSchema) -> None:
+ """Mark file inputs in the schema based on format and ui:widget indicators."""
+ json_schema_props = input_schema.json_schema.get('properties', {})
+ if not input_schema.ui_schema:
+ input_schema.ui_schema = {}
+
+ for prop_name, prop_schema in json_schema_props.items():
+ ui_widget = input_schema.ui_schema.get(prop_name)
+ is_file_input = (
+ prop_schema.get('format') == 'data-url' or
+ (isinstance(ui_widget, dict) and ui_widget.get('ui:widget') == 'file') or
+ ui_widget == 'file'
+ )
+ if is_file_input:
+ prop_schema['type'] = 'string'
+ prop_schema['format'] = 'data-url'
+ input_schema.ui_schema[prop_name] = {'ui:widget': 'file'}
+
+ def __init__(self, name: str, input_mappings: Union[Dict[str, WorkflowNodeInputMapping], List[WorkflowNodeInputMapping]] = None, output_mappings: Union[List[str], Dict[str, str], List[WorkflowNodeOutputMapping]] = None, input_schema: WorkflowNodeInputSchema = None, output_schema: WorkflowNodeOutputSchema = None, description: str = None, function_name: str = None, source_code: str = None, uid: str = None):
+ self.uid = uid or str(uuid.uuid4())[:6]
+
+ if input_mappings is None:
+ input_mappings = [
+ WorkflowNodeInputMapping(
+ name='text_input',
+ variable_type=enums.WorkflowNodeInputType.USER_INPUT,
+ is_required=True
+ ),
+ WorkflowNodeInputMapping(
+ name='file_input',
+ variable_type=enums.WorkflowNodeInputType.USER_INPUT,
+ is_required=False,
+ description='Optional file attachment to process'
+ )
+ ]
+
+ if output_mappings is None:
+ output_mappings = [
+ WorkflowNodeOutputMapping(
+ name='answer',
+ variable_type=enums.WorkflowNodeOutputType.STRING,
+ description='The combined output text'
+ )
+ ]
+
+ function_name = function_name or self.get_function_name()
+
+ if input_schema is None:
+ input_schema = WorkflowNodeInputSchema.from_input_mappings(input_mappings)
+ # Mark file inputs based on input_mappings - check descriptions for file/attachment keywords
+ json_schema_props = input_schema.json_schema.get('properties', {})
+ if not input_schema.ui_schema:
+ input_schema.ui_schema = {}
+ for mapping in input_mappings:
+ mapping_name = None
+ if isinstance(mapping, WorkflowNodeInputMapping):
+ mapping_name, desc = mapping.name, (mapping.description or '').lower()
+ elif isinstance(mapping, dict):
+ mapping_name, desc = mapping.get('name'), (mapping.get('description') or '').lower()
+ else:
+ continue
+ if mapping_name and mapping_name in json_schema_props and ('file' in desc or 'attachment' in desc):
+ json_schema_props[mapping_name]['format'] = 'data-url'
+ input_schema.ui_schema[mapping_name] = {'ui:widget': 'file'}
+
+ self._mark_file_inputs_in_schema(input_schema)
+
+ if output_schema is None:
+ output_schema = WorkflowNodeOutputSchema.from_fields_list(['answer'])
+
+ super().__init__(
+ name=name,
+ function_name=function_name,
+ source_code=source_code,
+ input_mappings=input_mappings,
+ output_mappings=output_mappings,
+ input_schema=input_schema,
+ output_schema=output_schema,
+ description=description
+ )
+ self.node_type = 'input_node'
+ self.source_code = source_code
+ self.template_metadata = None
+ self.trigger_config = None
+
+ def to_dict(self):
+ """
+ Return snake_case keys to match return filter expectations.
+ (The base ApiClass.to_dict() camelCases keys, which causes the
+ WorkflowGraphNodeDetails return filter to miss fields.)
+ """
+ return {
+ 'name': self.name,
+ 'function_name': self.function_name,
+ 'source_code': self.source_code,
+ 'input_mappings': [mapping.to_dict() for mapping in self.input_mappings],
+ 'output_mappings': [mapping.to_dict() for mapping in self.output_mappings],
+ 'input_schema': self.input_schema.to_dict() if self.input_schema else None,
+ 'output_schema': self.output_schema.to_dict() if self.output_schema else None,
+ 'template_metadata': self.template_metadata,
+ 'trigger_config': self.trigger_config.to_dict() if self.trigger_config else None,
+ 'node_type': self.node_type,
+ 'description': self.description,
+ 'uid': self.uid,
+ }
+
+ @classmethod
+ def from_dict(cls, node: dict):
+ uid = node.get('uid')
+ function_name = node.get('function_name')
+ source_code = node.get('source_code')
+
+ instance = cls(
+ name=node['name'],
+ uid=uid,
+ function_name=function_name,
+ source_code=source_code,
+ input_mappings=[WorkflowNodeInputMapping.from_dict(mapping) for mapping in node.get('input_mappings', [])],
+ output_mappings=[WorkflowNodeOutputMapping.from_dict(mapping) for mapping in node.get('output_mappings', [])],
+ input_schema=WorkflowNodeInputSchema.from_dict(node.get('input_schema', {})),
+ output_schema=WorkflowNodeOutputSchema.from_dict(node.get('output_schema', {})),
+ description=node.get('description'),
+ )
+ return instance
+
+ def get_function_name(self):
+ return f'input_node_{self.uid}'
+
+ def get_source_code(self):
+ """Return the stored source code, if any."""
+ return self.source_code
+
+ def get_input_schema(self):
+ return self.input_schema
+
+
@validate_constructor_arg_types('workflow_graph_edge')
@dataclasses.dataclass
class WorkflowGraphEdge(ApiClass):
@@ -588,6 +1022,11 @@ def __init__(self, source: Union[str, WorkflowGraphNode], target: Union[str, Wor
def to_nx_edge(self):
return [self.source, self.target, self.details]
+ @classmethod
+ def from_dict(cls, input_dict: dict):
+ validate_input_dict_param(input_dict, friendly_class_name='workflow_graph_edge', must_contain=['source', 'target', 'details'])
+ return super().from_dict(input_dict)
+
@validate_constructor_arg_types('workflow_graph')
@dataclasses.dataclass
@@ -595,21 +1034,23 @@ class WorkflowGraph(ApiClass):
"""
Represents an Agent workflow graph.
- The edges define the node invocation order.
-
Args:
- nodes (List[WorkflowGraphNode]): A list of nodes in the workflow graph.
- edges (List[WorkflowGraphEdge]): A list of edges in the workflow graph, where each edge is a tuple of source, target, and details.
- primary_start_node (Union[str, WorkflowGraphNode]): The primary node to start the workflow from.
+ nodes (List[Union[WorkflowGraphNode, DecisionNode, LLMAgentNode]]): A list of nodes in the workflow graph.
+ primary_start_node (Union[str, WorkflowGraphNode, LLMAgentNode]): The primary node to start the workflow from.
common_source_code (str): Common source code that can be used across all nodes.
"""
- nodes: List[WorkflowGraphNode] = dataclasses.field(default_factory=list)
- edges: List[Union[WorkflowGraphEdge, Tuple[WorkflowGraphNode, WorkflowGraphNode, dict], Tuple[str, str, dict]]] = dataclasses.field(default_factory=list)
- primary_start_node: Union[str, WorkflowGraphNode] = dataclasses.field(default=None)
+ nodes: List[Union[WorkflowGraphNode, DecisionNode, LLMAgentNode]] = dataclasses.field(default_factory=list)
+ edges: List[Union[WorkflowGraphEdge, Tuple[WorkflowGraphNode, WorkflowGraphNode, dict], Tuple[str, str, dict]]] = dataclasses.field(default_factory=list, metadata={'deprecated': True})
+ primary_start_node: Union[str, WorkflowGraphNode, LLMAgentNode] = dataclasses.field(default=None)
common_source_code: str = dataclasses.field(default=None)
specification_type: str = dataclasses.field(default='data_flow')
def __post_init__(self):
+ if self.specification_type == 'execution_flow':
+ if any(isinstance(node, DecisionNode) for node in self.nodes):
+ raise ValueError('workflow_graph', 'Decision nodes are not supported in execution flow specification type.')
+ if any(isinstance(node, LLMAgentNode) for node in self.nodes):
+ raise ValueError('workflow_graph', 'LLM Agent nodes are not supported in execution flow specification type.')
if self.edges:
if self.specification_type == 'execution_flow':
for index, edge in enumerate(self.edges):
@@ -632,11 +1073,17 @@ def to_dict(self):
@classmethod
def from_dict(cls, graph: dict):
- validate_input_dict_param(graph, friendly_class_name='workflow_graph')
+ validate_input_dict_param(graph, friendly_class_name='workflow_graph', schema={'nodes': list, 'edges': list, 'primary_start_node': str, 'common_source_code': str, 'specification_type': str})
if graph.get('__return_filter'):
for node in graph.get('nodes', []):
node['__return_filter'] = True
- nodes = [WorkflowGraphNode.from_dict(node) for node in graph.get('nodes', [])]
+ node_generator = {
+ 'decision_node': DecisionNode.from_dict,
+ 'workflow_node': WorkflowGraphNode.from_dict,
+ 'llm_agent_node': LLMAgentNode.from_dict,
+ 'input_node': InputNode.from_dict,
+ }
+ nodes = [node_generator[node.get('node_type') or 'workflow_node'](node) for node in graph.get('nodes', [])]
edges = [WorkflowGraphEdge.from_dict(edge) for edge in graph.get('edges', [])]
primary_start_node = graph.get('primary_start_node')
non_primary_nodes = set()
@@ -744,16 +1191,19 @@ class WorkflowNodeTemplateInput(ApiClass):
name (str): A unique name of the input.
is_required (bool): Indicates whether the input is required. Defaults to False.
description (str): The description of this input.
+ long_description (str): The detailed description of this input.
"""
name: str
is_required: bool = dataclasses.field(default=False)
description: str = dataclasses.field(default='')
+ long_description: str = dataclasses.field(default='')
def to_dict(self):
return {
'name': self.name,
'is_required': self.is_required,
- 'description': self.description
+ 'description': self.description,
+ 'long_description': self.long_description
}
@classmethod
@@ -761,7 +1211,8 @@ def from_dict(cls, mapping: dict):
return cls(
name=mapping['name'],
is_required=mapping.get('is_required', False),
- description=mapping.get('description', '')
+ description=mapping.get('description', ''),
+ long_description=mapping.get('long_description', '')
)
@@ -774,16 +1225,19 @@ class WorkflowNodeTemplateOutput(ApiClass):
name (str): The name of the output.
variable_type (WorkflowNodeOutputType): The type of the output.
description (str): The description of this output.
+ long_description (str): The detailed description of this output.
"""
name: str
variable_type: enums.WorkflowNodeOutputType = dataclasses.field(default=enums.WorkflowNodeOutputType.ANY)
description: str = dataclasses.field(default='')
+ long_description: str = dataclasses.field(default='')
def to_dict(self):
return {
'name': self.name,
'variable_type': self.variable_type.value,
- 'description': self.description
+ 'description': self.description,
+ 'long_description': self.long_description
}
@classmethod
@@ -791,5 +1245,6 @@ def from_dict(cls, mapping: dict):
return cls(
name=mapping['name'],
variable_type=enums.WorkflowNodeOutputType(mapping.get('variable_type', 'ANY')),
- description=mapping.get('description', '')
+ description=mapping.get('description', ''),
+ long_description=mapping.get('long_description', '')
)
diff --git a/abacusai/api_class/batch_prediction.py b/abacusai/api_class/batch_prediction.py
index 890f8d625..adc1fe749 100644
--- a/abacusai/api_class/batch_prediction.py
+++ b/abacusai/api_class/batch_prediction.py
@@ -124,10 +124,12 @@ class PretrainedModelsBatchPredictionArgs(BatchPredictionArgs):
Args:
for_eval (bool): If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
+ files_input_location (str): The input location for the files.
files_output_location_prefix (str): The output location prefix for the files.
channel_id_to_label_map (str): JSON string for the map from channel ids to their labels.
"""
for_eval: bool = dataclasses.field(default=None)
+ files_input_location: str = dataclasses.field(default=None)
files_output_location_prefix: str = dataclasses.field(default=None)
channel_id_to_label_map: str = dataclasses.field(default=None)
diff --git a/abacusai/api_class/blob_input.py b/abacusai/api_class/blob_input.py
index 711ebb116..3492ecdac 100644
--- a/abacusai/api_class/blob_input.py
+++ b/abacusai/api_class/blob_input.py
@@ -30,7 +30,7 @@ def __init__(self, contents: bytes, mime_type: str = None, filename: str = None,
mime_type = mimetypes.guess_type(filename)[0]
else:
import magic
- mime_type = magic.Magic(mime=True).from_buffer(contents)
+ mime_type = magic.from_buffer(contents, mime=True)
except Exception:
pass
else:
diff --git a/abacusai/api_class/dataset.py b/abacusai/api_class/dataset.py
index 8914c1d90..df5935639 100644
--- a/abacusai/api_class/dataset.py
+++ b/abacusai/api_class/dataset.py
@@ -64,7 +64,7 @@ def __post_init__(self):
if self.document_type is not None:
if DocumentType.is_ocr_forced(self.document_type):
self.highlight_relevant_text = True
- else:
+ elif self.highlight_relevant_text is None:
self.highlight_relevant_text = False
if self.highlight_relevant_text is not None:
self.extract_bounding_boxes = self.highlight_relevant_text # Highlight_relevant text acts as a wrapper over extract_bounding_boxes
@@ -75,6 +75,8 @@ def _detect_ocr_mode(self):
return OcrMode.DEFAULT
elif self.document_type == DocumentType.TABLES_AND_FORMS:
return OcrMode.LAYOUT
+ elif self.document_type == DocumentType.COMPREHENSIVE_MARKDOWN:
+ return OcrMode.COMPREHENSIVE_TABLE_MD
elif self.document_type == DocumentType.EMBEDDED_IMAGES:
return OcrMode.SCANNED
elif self.document_type == DocumentType.SCANNED_TEXT:
diff --git a/abacusai/api_class/dataset_application_connector.py b/abacusai/api_class/dataset_application_connector.py
index b98287765..745728bdb 100644
--- a/abacusai/api_class/dataset_application_connector.py
+++ b/abacusai/api_class/dataset_application_connector.py
@@ -33,12 +33,13 @@ class ConfluenceDatasetConfig(ApplicationConnectorDatasetConfig):
space_key (str): The space key of the space from which we fetch pages
pull_attachments (bool): Whether to pull attachments for each page
extract_bounding_boxes (bool): Whether to extract bounding boxes from the documents
-
+ location_type (str): The type of location to be fetched. Maps values in `location` to content type, example: 'spaceKey/folderTitle/*' -> 'folder'
"""
location: str = dataclasses.field(default=None)
space_key: str = dataclasses.field(default=None)
pull_attachments: bool = dataclasses.field(default=False)
extract_bounding_boxes: bool = dataclasses.field(default=False) # TODO: Deprecate in favour of document_processing_config
+ location_type: str = dataclasses.field(default=None)
def __post_init__(self):
self.application_connector_type = enums.ApplicationConnectorType.CONFLUENCE
@@ -89,11 +90,13 @@ class GoogleDriveDatasetConfig(ApplicationConnectorDatasetConfig):
csv_delimiter (str): If the file format is CSV, use a specific csv delimiter
extract_bounding_boxes (bool): Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True
merge_file_schemas (bool): Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
+ location_type (str): The type of path to fetch. 'shared' for shared files. if not provided, it will fetch from the root folder.
"""
location: str = dataclasses.field(default=None)
csv_delimiter: str = dataclasses.field(default=None)
extract_bounding_boxes: bool = dataclasses.field(default=False) # TODO: Deprecate in favour of document_processing_config
merge_file_schemas: bool = dataclasses.field(default=False)
+ location_type: str = dataclasses.field(default=None)
def __post_init__(self):
self.application_connector_type = enums.ApplicationConnectorType.GOOGLEDRIVE
@@ -109,6 +112,7 @@ class JiraDatasetConfig(ApplicationConnectorDatasetConfig):
custom_fields (list): A list of custom fields to include in the dataset
include_comments (bool): Fetch comments for each issue
include_watchers (bool): Fetch watchers for each issue
+
"""
jql: str = dataclasses.field(default=None)
custom_fields: list = dataclasses.field(default=None)
@@ -149,11 +153,13 @@ class SharepointDatasetConfig(ApplicationConnectorDatasetConfig):
csv_delimiter (str): If the file format is CSV, use a specific csv delimiter
extract_bounding_boxes (bool): Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True
merge_file_schemas (bool): Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
+ add_file_metadata (bool): Signifies if the file metadata should be added to the dataset
"""
location: str = dataclasses.field(default=None)
csv_delimiter: str = dataclasses.field(default=None)
extract_bounding_boxes: bool = dataclasses.field(default=False) # TODO: Deprecate in favour of document_processing_config
merge_file_schemas: bool = dataclasses.field(default=False)
+ add_file_metadata: bool = dataclasses.field(default=False)
def __post_init__(self):
self.application_connector_type = enums.ApplicationConnectorType.SHAREPOINT
@@ -202,14 +208,42 @@ class TeamsScraperDatasetConfig(ApplicationConnectorDatasetConfig):
pull_chat_messages (bool): Whether to pull teams chat messages
pull_channel_posts (bool): Whether to pull posts for each channel
pull_transcripts (bool): Whether to pull transcripts for calendar meetings
+ max_days_to_lookback (int): The maximum number of days to look back for data
"""
pull_chat_messages: bool = dataclasses.field(default=False)
pull_channel_posts: bool = dataclasses.field(default=False)
pull_transcripts: bool = dataclasses.field(default=False)
+ max_days_to_lookback: int = dataclasses.field(default=365)
def __post_init__(self):
self.application_connector_type = enums.ApplicationConnectorType.TEAMSSCRAPER
- self.is_documentset = True
+
+
+@dataclasses.dataclass
+class OutlookDatasetConfig(ApplicationConnectorDatasetConfig):
+ """
+ Dataset config for Outlook Application Connector
+ """
+
+ def __post_init__(self):
+ self.application_connector_type = enums.ApplicationConnectorType.OUTLOOK
+
+
+@dataclasses.dataclass
+class AzureStorageDatasetConfig(ApplicationConnectorDatasetConfig):
+ """
+ Dataset config for Azure Storage Application Connector
+
+ Args:
+ location (str): The location of the files to fetch
+ merge_file_schemas (bool): Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
+ """
+
+ location: str = dataclasses.field(default=None)
+ merge_file_schemas: bool = dataclasses.field(default=False)
+
+ def __post_init__(self):
+ self.application_connector_type = enums.ApplicationConnectorType.AZURESTORAGE
@dataclasses.dataclass
@@ -222,6 +256,26 @@ def __post_init__(self):
self.application_connector_type = enums.ApplicationConnectorType.FRESHSERVICE
+@dataclasses.dataclass
+class SftpDatasetConfig(ApplicationConnectorDatasetConfig):
+ """
+ Dataset config for SFTP Application Connector
+
+ Args:
+ location (str): The regex location of the files to fetch
+ csv_delimiter (str): If the file format is CSV, use a specific csv delimiter
+ extract_bounding_boxes (bool): Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True
+ merge_file_schemas (bool): Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
+ """
+ location: str = dataclasses.field(default=None)
+ csv_delimiter: str = dataclasses.field(default=None)
+ extract_bounding_boxes: bool = dataclasses.field(default=False) # TODO: Deprecate in favour of document_processing_config
+ merge_file_schemas: bool = dataclasses.field(default=False)
+
+ def __post_init__(self):
+ self.application_connector_type = enums.ApplicationConnectorType.SFTPAPPLICATION
+
+
@dataclasses.dataclass
class _ApplicationConnectorDatasetConfigFactory(_ApiClassFactory):
config_abstract_class = ApplicationConnectorDatasetConfig
@@ -238,4 +292,7 @@ class _ApplicationConnectorDatasetConfigFactory(_ApiClassFactory):
enums.ApplicationConnectorType.FRESHSERVICE: FreshserviceDatasetConfig,
enums.ApplicationConnectorType.TEAMSSCRAPER: TeamsScraperDatasetConfig,
enums.ApplicationConnectorType.BOX: BoxDatasetConfig,
+ enums.ApplicationConnectorType.SFTPAPPLICATION: SftpDatasetConfig,
+ enums.ApplicationConnectorType.OUTLOOK: OutlookDatasetConfig,
+ enums.ApplicationConnectorType.AZURESTORAGE: AzureStorageDatasetConfig,
}
diff --git a/abacusai/api_class/enums.py b/abacusai/api_class/enums.py
index 2be4251df..b94c26e65 100644
--- a/abacusai/api_class/enums.py
+++ b/abacusai/api_class/enums.py
@@ -16,7 +16,7 @@ def is_deprecated(self):
return self.value in self.__deprecated_values__
def __eq__(self, other):
- if isinstance(other, str):
+ if isinstance(other, str) and isinstance(self.value, str):
return self.value.upper() == other.upper()
elif isinstance(other, int):
return self.value == other
@@ -224,6 +224,8 @@ class FileFormat(ApiEnum):
WEBM = 'webm'
WMV = 'wmv'
MSG = 'msg'
+ TS = 'TS'
+ TSX = 'TSX'
class ExperimentationMode(ApiEnum):
@@ -392,6 +394,7 @@ class ConnectorType(ApiEnum):
class ApplicationConnectorType(ApiEnum):
GOOGLEANALYTICS = 'GOOGLEANALYTICS'
GOOGLEDRIVE = 'GOOGLEDRIVE'
+ GOOGLECALENDAR = 'GOOGLECALENDAR'
GIT = 'GIT'
CONFLUENCE = 'CONFLUENCE'
JIRA = 'JIRA'
@@ -407,13 +410,50 @@ class ApplicationConnectorType(ApiEnum):
GOOGLEDRIVEUSER = 'GOOGLEDRIVEUSER'
GOOGLEWORKSPACEUSER = 'GOOGLEWORKSPACEUSER'
GMAILUSER = 'GMAILUSER'
- GOOGLECALENDAR = 'GOOGLECALENDAR'
GOOGLESHEETS = 'GOOGLESHEETS'
GOOGLEDOCS = 'GOOGLEDOCS'
TEAMSSCRAPER = 'TEAMSSCRAPER'
GITHUBUSER = 'GITHUBUSER'
OKTASAML = 'OKTASAML'
BOX = 'BOX'
+ SFTPAPPLICATION = 'SFTPAPPLICATION'
+ OAUTH = 'OAUTH'
+ SALESFORCE = 'SALESFORCE'
+ TWITTER = 'TWITTER'
+ MCP = 'MCP'
+ ODBC = 'ODBC'
+ DBC = 'DBC'
+ GENERIC_OAUTH = 'GENERIC_OAUTH'
+ OUTLOOK = 'OUTLOOK'
+ BIGQUERY = 'BIGQUERY'
+ AZURESTORAGE = 'AZURESTORAGE'
+
+ @classmethod
+ def user_connectors(cls):
+ return [
+ cls.GOOGLEDRIVEUSER,
+ cls.GOOGLECALENDAR,
+ cls.GMAILUSER,
+ cls.SLACK,
+ cls.JIRA,
+ cls.ONEDRIVE,
+ cls.SALESFORCE,
+ cls.TWITTER,
+ cls.SHAREPOINT,
+ cls.TEAMS,
+ cls.CONFLUENCE,
+ cls.BOX,
+ cls.GITHUBUSER,
+ cls.ODBC,
+ cls.DBC,
+ cls.GENERIC_OAUTH,
+ cls.OUTLOOK,
+ cls.BIGQUERY
+ ]
+
+ @classmethod
+ def database_connectors(cls):
+ return [cls.SALESFORCE, cls.ODBC, cls.DBC, cls.BIGQUERY]
class StreamingConnectorType(ApiEnum):
@@ -434,6 +474,22 @@ class PythonFunctionArgumentType(ApiEnum):
MONITOR_ID = 'MONITOR_ID'
BATCH_PREDICTION_ID = 'BATCH_PREDICTION_ID'
DEPLOYMENT_ID = 'DEPLOYMENT_ID'
+ ATTACHMENT = 'ATTACHMENT'
+
+ @staticmethod
+ def to_json_type(type):
+ if type == PythonFunctionArgumentType.INTEGER or type == PythonFunctionArgumentType.FLOAT:
+ return 'number'
+ elif type == PythonFunctionArgumentType.STRING or type == PythonFunctionArgumentType.ATTACHMENT:
+ return 'string'
+ elif type == PythonFunctionArgumentType.BOOLEAN:
+ return 'boolean'
+ elif type == PythonFunctionArgumentType.LIST:
+ return 'array'
+ elif type == PythonFunctionArgumentType.JSON:
+ return 'object'
+ else:
+ raise ValueError(f'Invalid type: {type}. type not JSON compatible')
class PythonFunctionOutputArgumentType(ApiEnum):
@@ -450,6 +506,7 @@ class PythonFunctionOutputArgumentType(ApiEnum):
BATCH_PREDICTION_ID = 'BATCH_PREDICTION_ID'
DEPLOYMENT_ID = 'DEPLOYMENT_ID'
ANY = 'ANY'
+ ATTACHMENT = 'ATTACHMENT'
class VectorStoreTextEncoder(ApiEnum):
@@ -462,8 +519,16 @@ class VectorStoreTextEncoder(ApiEnum):
CODE_BERT = 'CODE_BERT'
-@deprecated_enums('OPENAI_GPT4_32K', 'OPENAI_GPT3_5', 'OPENAI_GPT3_5_TEXT', 'LLAMA3_LARGE_CHAT', 'CLAUDE_V3_OPUS', 'CLAUDE_V3_SONNET', 'OPENAI_GPT4', 'OPENAI_GPT4_128K', 'QWEN_2_5_32B_BASE')
+@deprecated_enums('OPENAI_GPT4_32K', 'OPENAI_GPT3_5', 'OPENAI_GPT3_5_TEXT',
+ 'OPENAI_GPT4', 'OPENAI_GPT4_128K', 'OPENAI_GPT4_128K_LATEST',
+ 'OPENAI_O1_MINI',
+ 'GEMINI_1_5_PRO', 'GEMINI_1_5_FLASH', 'GEMINI_2_PRO', 'GEMINI_2_FLASH_THINKING',
+ 'XAI_GROK',
+ 'CLAUDE_V3_OPUS', 'CLAUDE_V3_HAIKU',
+ 'LLAMA3_LARGE_CHAT', 'QWEN_2_5_32B_BASE')
class LLMName(ApiEnum):
+ OPENAI_GPT3_5 = 'OPENAI_GPT3_5'
+ OPENAI_GPT3_5_TEXT = 'OPENAI_GPT3_5_TEXT'
OPENAI_GPT4 = 'OPENAI_GPT4'
OPENAI_GPT4_32K = 'OPENAI_GPT4_32K'
OPENAI_GPT4_128K = 'OPENAI_GPT4_128K'
@@ -471,31 +536,57 @@ class LLMName(ApiEnum):
OPENAI_GPT4O = 'OPENAI_GPT4O'
OPENAI_GPT4O_MINI = 'OPENAI_GPT4O_MINI'
OPENAI_O1_MINI = 'OPENAI_O1_MINI'
- OPENAI_GPT3_5 = 'OPENAI_GPT3_5'
- OPENAI_GPT3_5_TEXT = 'OPENAI_GPT3_5_TEXT'
+ OPENAI_O3 = 'OPENAI_O3'
+ OPENAI_O3_HIGH = 'OPENAI_O3_HIGH'
+ OPENAI_O4_MINI = 'OPENAI_O4_MINI'
+ OPENAI_O4_MINI_HIGH = 'OPENAI_O4_MINI_HIGH'
+ OPENAI_GPT4_1 = 'OPENAI_GPT4_1'
+ OPENAI_GPT4_1_MINI = 'OPENAI_GPT4_1_MINI'
+ OPENAI_GPT4_1_NANO = 'OPENAI_GPT4_1_NANO'
+ OPENAI_GPT5 = 'OPENAI_GPT5'
+ OPENAI_GPT5_MINI = 'OPENAI_GPT5_MINI'
+ OPENAI_GPT5_MINI_HIGH = 'OPENAI_GPT5_MINI_HIGH'
+ OPENAI_GPT5_MINI_LOW = 'OPENAI_GPT5_MINI_LOW'
+ OPENAI_GPT5_NANO = 'OPENAI_GPT5_NANO'
+ OPENAI_GPT5_NANO_HIGH = 'OPENAI_GPT5_NANO_HIGH'
+ OPENAI_GPT5_1 = 'OPENAI_GPT5_1'
+ OPENAI_GPT5_2 = 'OPENAI_GPT5_2'
+ CLAUDE_V3_HAIKU = 'CLAUDE_V3_HAIKU'
+ CLAUDE_V3_OPUS = 'CLAUDE_V3_OPUS'
+ CLAUDE_V3_5_HAIKU = 'CLAUDE_V3_5_HAIKU'
+ CLAUDE_V3_5_SONNET = 'CLAUDE_V3_5_SONNET'
+ CLAUDE_V3_7_SONNET = 'CLAUDE_V3_7_SONNET'
+ CLAUDE_V4_SONNET = 'CLAUDE_V4_SONNET'
+ CLAUDE_V4_OPUS = 'CLAUDE_V4_OPUS'
+ CLAUDE_V4_5_SONNET = 'CLAUDE_V4_5_SONNET'
+ GEMINI_1_5_PRO = 'GEMINI_1_5_PRO'
+ GEMINI_1_5_FLASH = 'GEMINI_1_5_FLASH'
+ GEMINI_2_PRO = 'GEMINI_2_PRO'
+ GEMINI_2_FLASH = 'GEMINI_2_FLASH'
+ GEMINI_2_5_PRO = 'GEMINI_2_5_PRO'
+ GEMINI_3_PRO = 'GEMINI_3_PRO'
+ GEMINI_2_5_FLASH = 'GEMINI_2_5_FLASH'
+ GEMINI_3_FLASH = 'GEMINI_3_FLASH'
+ XAI_GROK = 'XAI_GROK'
+ XAI_GROK_3 = 'XAI_GROK_3'
+ XAI_GROK_3_MINI = 'XAI_GROK_3_MINI'
+ XAI_GROK_4 = 'XAI_GROK_4'
+ LLAMA4_MAVERICK = 'LLAMA4_MAVERICK'
LLAMA3_1_405B = 'LLAMA3_1_405B'
LLAMA3_1_70B = 'LLAMA3_1_70B'
LLAMA3_1_8B = 'LLAMA3_1_8B'
LLAMA3_3_70B = 'LLAMA3_3_70B'
LLAMA3_LARGE_CHAT = 'LLAMA3_LARGE_CHAT'
- CLAUDE_V3_OPUS = 'CLAUDE_V3_OPUS'
- CLAUDE_V3_SONNET = 'CLAUDE_V3_SONNET'
- CLAUDE_V3_HAIKU = 'CLAUDE_V3_HAIKU'
- CLAUDE_V3_5_SONNET = 'CLAUDE_V3_5_SONNET'
- CLAUDE_V3_5_HAIKU = 'CLAUDE_V3_5_HAIKU'
- GEMINI_1_5_PRO = 'GEMINI_1_5_PRO'
- GEMINI_2_FLASH = 'GEMINI_2_FLASH'
- GEMINI_2_FLASH_THINKING = 'GEMINI_2_FLASH_THINKING'
- GEMINI_2_PRO = 'GEMINI_2_PRO'
ABACUS_SMAUG3 = 'ABACUS_SMAUG3'
ABACUS_DRACARYS = 'ABACUS_DRACARYS'
QWEN_2_5_32B = 'QWEN_2_5_32B'
QWEN_2_5_32B_BASE = 'QWEN_2_5_32B_BASE'
QWEN_2_5_72B = 'QWEN_2_5_72B'
QWQ_32B = 'QWQ_32B'
- GEMINI_1_5_FLASH = 'GEMINI_1_5_FLASH'
- XAI_GROK = 'XAI_GROK'
- DEEPSEEK_V3 = 'DEEPSEEK_V3'
+ QWEN3_32B = 'QWEN3_32B'
+ QWEN3_235B_A22B = 'QWEN3_235B_A22B'
+ QWEN3_CODER = 'QWEN3_CODER'
+ DEEPSEEK_V3_1 = 'DEEPSEEK_V3_1'
DEEPSEEK_R1 = 'DEEPSEEK_R1'
@@ -542,6 +633,7 @@ class PythonFunctionType(ApiEnum):
PLOTLY_FIG = 'PLOTLY_FIG'
STEP_FUNCTION = 'STEP_FUNCTION'
USERCODE_TOOL = 'USERCODE_TOOL'
+ CONNECTOR_TOOL = 'CONNECTOR_TOOL'
class EvalArtifactType(ApiEnum):
@@ -579,10 +671,16 @@ class WorkflowNodeOutputType(ApiEnum):
ANY = 'ANY'
@classmethod
- def normalize_type(cls, python_type: Union[str, type, None, 'WorkflowNodeOutputType']) -> 'WorkflowNodeOutputType':
+ def normalize_type(cls, python_type: Union[str, type, None, 'WorkflowNodeOutputType', 'PythonFunctionOutputArgumentType']) -> 'WorkflowNodeOutputType':
if isinstance(python_type, WorkflowNodeOutputType):
return python_type
+ if isinstance(python_type, PythonFunctionOutputArgumentType):
+ if python_type.value in cls.__members__:
+ return cls(python_type.value)
+ else:
+ return cls.ANY
+
if isinstance(python_type, type) or isinstance(python_type, str):
python_type = python_type.__name__ if isinstance(python_type, type) else python_type
else:
@@ -625,11 +723,12 @@ def aws_ocr_modes(cls):
class DocumentType(ApiEnum):
- SIMPLE_TEXT = 'SIMPLE_TEXT' # digital text
- TEXT = 'TEXT' # general text with OCR
- TABLES_AND_FORMS = 'TABLES_AND_FORMS' # tables and forms with OCR
- EMBEDDED_IMAGES = 'EMBEDDED_IMAGES' # embedded images with OCR TODO: remove?
- SCANNED_TEXT = 'SCANNED_TEXT' # scanned text with OCR
+ SIMPLE_TEXT = 'SIMPLE_TEXT' # digital text
+ TEXT = 'TEXT' # general text with OCR
+ TABLES_AND_FORMS = 'TABLES_AND_FORMS' # tables and forms with OCR
+ EMBEDDED_IMAGES = 'EMBEDDED_IMAGES' # embedded images with OCR TODO: remove?
+ SCANNED_TEXT = 'SCANNED_TEXT' # scanned text with OCR
+ COMPREHENSIVE_MARKDOWN = 'COMPREHENSIVE_MARKDOWN' # comprehensive text with Gemini OCR
@classmethod
def is_ocr_forced(cls, document_type: 'DocumentType'):
@@ -722,3 +821,25 @@ class DeploymentConversationType(ApiEnum):
COPILOT = 'COPILOT'
AGENT_CONTROLLER = 'AGENT_CONTROLLER'
CODE_LLM = 'CODE_LLM'
+ CODE_LLM_AGENT = 'CODE_LLM_AGENT'
+ CHAT_LLM_TASK = 'CHAT_LLM_TASK'
+ COMPUTER_AGENT = 'COMPUTER_AGENT'
+ SEARCH_LLM = 'SEARCH_LLM'
+ APP_LLM = 'APP_LLM'
+ TEST_AGENT = 'TEST_AGENT'
+ SUPER_AGENT = 'SUPER_AGENT'
+ CODE_LLM_NON_INTERACTIVE = 'CODE_LLM_NON_INTERACTIVE'
+ BROWSER_EXTENSION = 'BROWSER_EXTENSION'
+
+
+class AgentClientType(ApiEnum):
+ CHAT_UI = 'CHAT_UI'
+ MESSAGING_APP = 'MESSAGING_APP'
+ API = 'API'
+
+
+class OrganizationSecretType(ApiEnum):
+ """Enum for organization secret types"""
+ ORG_SECRET = 'ORG_SECRET'
+ ORG_API_CREDENTIALS = 'ORG_API_CREDENTIALS'
+ USER_API_CREDENTIALS = 'USER_API_CREDENTIALS'
diff --git a/abacusai/api_class/model.py b/abacusai/api_class/model.py
index 41d7b9edb..bc8fdcf55 100644
--- a/abacusai/api_class/model.py
+++ b/abacusai/api_class/model.py
@@ -447,6 +447,19 @@ def __post_init__(self):
self.problem_type = enums.ProblemType.NATURAL_LANGUAGE_SEARCH
+@dataclasses.dataclass
+class SystemConnectorTool(ApiClass):
+ """
+ System connector tool used to integrate chatbots with external services.
+
+ Args:
+ value (str): The name of the tool.
+ configs (dict): Optional. A dictionary of key-value pairs that are used to configure the tool.
+ """
+ value: str = dataclasses.field()
+ configs: dict = dataclasses.field(default=None)
+
+
@dataclasses.dataclass
class ChatLLMTrainingConfig(TrainingConfig):
"""
@@ -473,19 +486,26 @@ class ChatLLMTrainingConfig(TrainingConfig):
data_prompt_column_context (Dict[str, str]): Dict of 'table_name.column_name' and 'column_context' pairs to provide column context for some selected columns in the selected structured data table. This replaces the default auto-generated information about the column data.
hide_sql_and_code (bool): When running data queries, this will hide the generated SQL and Code in the response.
disable_data_summarization (bool): After executing a query summarize the reponse and reply back with only the table and query run.
+ disable_data_fetch_for_training (bool): Train using only table and column schema metadata without fetching sample data. This speeds up training but may result in less context for the model.
data_columns_to_ignore (List[str]): Columns to ignore while encoding information about structured data tables in context for the LLM. A list of strings of format "."
search_score_cutoff (float): Minimum search score to consider a document as a valid search result.
include_bm25_retrieval (bool): Combine BM25 search score with vector search using reciprocal rank fusion.
database_connector_id (str): Database connector ID to use for connecting external database that gives access to structured data to the LLM.
+ database_connector_ids (List[str]): List of database connector IDs to use for connecting external databases that give access to structured data to the LLM.
database_connector_tables (List[str]): List of tables to use from the database connector for the ChatLLM.
enable_code_execution (bool): Enable python code execution in the ChatLLM. This equips the LLM with a python kernel in which all its code is executed.
enable_response_caching (bool): Enable caching of LLM responses to speed up response times and improve reproducibility.
unknown_answer_phrase (str): Fallback response when the LLM can't find an answer.
- enable_tool_bar (bool): Enable the tool bar in Enterprise ChatLLM to provide additional functionalities like tool_use, web_search, image_gen, etc.
+ enable_tool_bar (bool): Enable the tool bar in Enterprise ChatLLM to provide additional functionalities like tool_use, web_search, image_gen, etc. Enabling this requires enable_web_search to be enabled.
enable_inline_source_citations (bool): Enable inline citations of the sources in the response.
response_format: (str): When set to 'JSON', the LLM will generate a JSON formatted string.
json_response_instructions (str): Instructions to be followed while generating the json_response if `response_format` is set to "JSON". This can include the schema information if the schema is dynamic and its keys cannot be pre-determined.
json_response_schema (str): Specifies the JSON schema that the model should adhere to if `response_format` is set to "JSON". This should be a json-formatted string where each field of the expected schema is mapped to a dictionary containing the fields 'type', 'required' and 'description'. For example - '{"sample_field": {"type": "integer", "required": true, "description": "Sample Field"}}'
+ mask_pii (bool): Mask PII in the prompts and uploaded documents before sending it to the LLM. Only available for Enterprise users and will cause validation errors if set to True for ChatLLM Teams users.
+ builtin_tools (List[SystemConnectorTool]): List of builtin system connector tools to use in the ChatLLM. Using builtin tools does not require enabling tool bar (enable_tool_bar flag).
+ config_connectors (List[str]): List of names of config connectors to use in the ChatLLM. This should not be used with document_retrievers.
+ mcp_servers (List[str]): List of names of MCP servers to use in the ChatLLM. This should not be used with document_retrievers.
+ agentic_loop_mode (bool): Enables use of agentic loop that uses a series of tool calls when needed to respond. If set to False, the agentic loop will not be used. If not set or set to Auto, the agentic loop will be automatically used based on certain conditions like presence of tools in the model.
"""
document_retrievers: List[str] = dataclasses.field(default=None)
num_completion_tokens: int = dataclasses.field(default=None)
@@ -507,10 +527,12 @@ class ChatLLMTrainingConfig(TrainingConfig):
data_prompt_column_context: Dict[str, str] = dataclasses.field(default=None)
hide_sql_and_code: bool = dataclasses.field(default=None)
disable_data_summarization: bool = dataclasses.field(default=None)
+ disable_data_fetch_for_training: bool = dataclasses.field(default=None)
data_columns_to_ignore: List[str] = dataclasses.field(default=None)
search_score_cutoff: float = dataclasses.field(default=None)
include_bm25_retrieval: bool = dataclasses.field(default=None)
- database_connector_id: str = dataclasses.field(default=None)
+ database_connector_id: str = dataclasses.field(default=None) # deprecated
+ database_connector_ids: List[str] = dataclasses.field(default=None)
database_connector_tables: List[str] = dataclasses.field(default=None)
enable_code_execution: bool = dataclasses.field(default=None)
metadata_columns: list = dataclasses.field(default=None, metadata={'deprecated': True})
@@ -522,6 +544,11 @@ class ChatLLMTrainingConfig(TrainingConfig):
response_format: str = dataclasses.field(default=None)
json_response_instructions: str = dataclasses.field(default=None)
json_response_schema: str = dataclasses.field(default=None)
+ mask_pii: bool = dataclasses.field(default=None)
+ builtin_tools: List[SystemConnectorTool] = dataclasses.field(default=None)
+ config_connectors: List[str] = dataclasses.field(default=None)
+ mcp_servers: List[str] = dataclasses.field(default=None)
+ agentic_loop_mode: bool = dataclasses.field(default=None)
def __post_init__(self):
self.problem_type = enums.ProblemType.CHAT_LLM
diff --git a/abacusai/api_class/python_functions.py b/abacusai/api_class/python_functions.py
index 02c81feca..c774be2d7 100644
--- a/abacusai/api_class/python_functions.py
+++ b/abacusai/api_class/python_functions.py
@@ -17,6 +17,7 @@ class PythonFunctionArgument(ApiClass):
value (Any): The value of the argument
pipeline_variable (str): The name of the pipeline variable to use as the value
description (str): The description of the argument
+ long_description (str): The detailed description of the argument
item_type (str): Type of items when variable_type is LIST
"""
variable_type: enums.PythonFunctionArgumentType = dataclasses.field(default=None)
@@ -25,6 +26,7 @@ class PythonFunctionArgument(ApiClass):
value: Any = dataclasses.field(default=None)
pipeline_variable: str = dataclasses.field(default=None)
description: str = dataclasses.field(default=None)
+ long_description: str = dataclasses.field(default=None)
item_type: str = dataclasses.field(default=None)
@@ -36,6 +38,10 @@ class OutputVariableMapping(ApiClass):
Args:
variable_type (PythonFunctionOutputArgumentType): The type of the python function output argument
name (str): The name of the python function variable
+ description (str): The description of the output
+ long_description (str): The detailed description of the output
"""
variable_type: enums.PythonFunctionOutputArgumentType = dataclasses.field(default=None)
name: str = dataclasses.field(default=None)
+ description: str = dataclasses.field(default=None)
+ long_description: str = dataclasses.field(default=None)
diff --git a/abacusai/api_class/segments.py b/abacusai/api_class/segments.py
index 85c227a25..04c589ee4 100644
--- a/abacusai/api_class/segments.py
+++ b/abacusai/api_class/segments.py
@@ -1,5 +1,6 @@
import dataclasses
import uuid
+import warnings
from typing import Any, List
from . import enums
@@ -81,9 +82,21 @@ class TextResponseSection(ResponseSection):
segment: str
- def __init__(self, text: str, section_key: str = None):
+ def __init__(self, segment: str = None, section_key: str = None, text: str = None): # text is added for backward compatibility
+ if segment is not None and text is not None:
+ raise ValueError("Cannot specify both 'segment' and 'text' parameters. The 'text' parameter is deprecated, please use 'segment' only.")
+ if segment is None and text is None:
+ raise TypeError("'Segment' parameter is required.")
+
+ if text is not None:
+ warnings.warn(
+ "The 'text' parameter is deprecated and will be removed in future. Please use 'segment' instead.",
+ DeprecationWarning,
+ stacklevel=2
+ )
+
super().__init__(type=enums.ResponseSectionType.TEXT, id=section_key)
- self.segment = text
+ self.segment = segment if segment is not None else text
@dataclasses.dataclass
diff --git a/abacusai/api_client_utils.py b/abacusai/api_client_utils.py
index 33933f23f..d218e4bd2 100644
--- a/abacusai/api_client_utils.py
+++ b/abacusai/api_client_utils.py
@@ -468,9 +468,22 @@ def get_pandas_pages_df(cls, df, feature_group_version: str, doc_id_column: str,
chunk_size = 10 * 1024 * 1024
+ def is_valid_config(x):
+ if not isinstance(x, dict):
+ return False
+ if cls.DOCUMENT_PROCESSING_CONFIG not in x:
+ return False
+ if x[cls.DOCUMENT_PROCESSING_CONFIG] is None:
+ return False
+ if x[cls.DOCUMENT_PROCESSING_CONFIG] == {}:
+ return False
+ # if all keys are None, return False
+ if all(v is None for v in x[cls.DOCUMENT_PROCESSING_CONFIG].values()):
+ return False
+ return True
+
pages_df_with_config = None
- df_with_config = df[df[document_column].apply(lambda x: isinstance(
- x, dict) and x.get(cls.DOCUMENT_PROCESSING_CONFIG) is not None)]
+ df_with_config = df[df[document_column].apply(is_valid_config)]
df = df[~df[doc_id_column].isin(df_with_config[doc_id_column])]
if len(df_with_config) > 0:
@@ -613,7 +626,7 @@ def tokens_to_text(tokens: List[dict]) -> str:
def combine_doc_info(group):
page_infos = group.to_dict(orient='records')
document_data = {
- cls.METADATA: [{cls.HEIGHT: page.get(cls.HEIGHT, None), cls.WIDTH: page.get(cls.WIDTH, None), cls.PAGE: page_no + 1, cls.MARKDOWN_FEATURES: page.get(cls.MARKDOWN_FEATURES)}
+ cls.METADATA: [{cls.HEIGHT: page.get(cls.HEIGHT, None), cls.WIDTH: page.get(cls.WIDTH, None), cls.PAGE: page_no, cls.MARKDOWN_FEATURES: page.get(cls.MARKDOWN_FEATURES)}
for page_no, page in enumerate(page_infos)],
cls.TOKENS: [token for page in page_infos for token in page.get(cls.TOKENS) or []],
# default to embedded text
diff --git a/abacusai/api_endpoint.py b/abacusai/api_endpoint.py
index 16662a4b2..f0d4b7bd6 100644
--- a/abacusai/api_endpoint.py
+++ b/abacusai/api_endpoint.py
@@ -13,9 +13,11 @@ class ApiEndpoint(AbstractApiClass):
llmEndpoint (str): The URI that can be used to make llm api calls
externalChatEndpoint (str): The URI that can be used to access the external chat
dashboardEndpoint (str): The URI that the external chat will use to go back to the dashboard
+ hostingDomain (str): The domain for hosted app deployments
+ publicHostingDomain (str): The domain for publicly hosted app deployments
"""
- def __init__(self, client, apiEndpoint=None, predictEndpoint=None, proxyEndpoint=None, llmEndpoint=None, externalChatEndpoint=None, dashboardEndpoint=None):
+ def __init__(self, client, apiEndpoint=None, predictEndpoint=None, proxyEndpoint=None, llmEndpoint=None, externalChatEndpoint=None, dashboardEndpoint=None, hostingDomain=None, publicHostingDomain=None):
super().__init__(client, None)
self.api_endpoint = apiEndpoint
self.predict_endpoint = predictEndpoint
@@ -23,11 +25,13 @@ def __init__(self, client, apiEndpoint=None, predictEndpoint=None, proxyEndpoint
self.llm_endpoint = llmEndpoint
self.external_chat_endpoint = externalChatEndpoint
self.dashboard_endpoint = dashboardEndpoint
+ self.hosting_domain = hostingDomain
+ self.public_hosting_domain = publicHostingDomain
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'api_endpoint': repr(self.api_endpoint), f'predict_endpoint': repr(self.predict_endpoint), f'proxy_endpoint': repr(self.proxy_endpoint), f'llm_endpoint': repr(
- self.llm_endpoint), f'external_chat_endpoint': repr(self.external_chat_endpoint), f'dashboard_endpoint': repr(self.dashboard_endpoint)}
+ repr_dict = {f'api_endpoint': repr(self.api_endpoint), f'predict_endpoint': repr(self.predict_endpoint), f'proxy_endpoint': repr(self.proxy_endpoint), f'llm_endpoint': repr(self.llm_endpoint), f'external_chat_endpoint': repr(
+ self.external_chat_endpoint), f'dashboard_endpoint': repr(self.dashboard_endpoint), f'hosting_domain': repr(self.hosting_domain), f'public_hosting_domain': repr(self.public_hosting_domain)}
class_name = "ApiEndpoint"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -40,6 +44,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'api_endpoint': self.api_endpoint, 'predict_endpoint': self.predict_endpoint, 'proxy_endpoint': self.proxy_endpoint,
- 'llm_endpoint': self.llm_endpoint, 'external_chat_endpoint': self.external_chat_endpoint, 'dashboard_endpoint': self.dashboard_endpoint}
+ resp = {'api_endpoint': self.api_endpoint, 'predict_endpoint': self.predict_endpoint, 'proxy_endpoint': self.proxy_endpoint, 'llm_endpoint': self.llm_endpoint,
+ 'external_chat_endpoint': self.external_chat_endpoint, 'dashboard_endpoint': self.dashboard_endpoint, 'hosting_domain': self.hosting_domain, 'public_hosting_domain': self.public_hosting_domain}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/api_key.py b/abacusai/api_key.py
index f8b9fc621..ee0e619e5 100644
--- a/abacusai/api_key.py
+++ b/abacusai/api_key.py
@@ -11,7 +11,7 @@ class ApiKey(AbstractApiClass):
apiKey (str): The unique API key scoped to a specific organization. Value will be partially obscured.
apiKeySuffix (str): The last 4 characters of the API key.
tag (str): A user-friendly tag for the API key.
- type (str): The type of the API key, either 'default', 'code-llm', or 'computer-use'.
+ type (str): The type of the API key, either 'default', 'code-llm', 'terminal', or 'computer-use'.
createdAt (str): The timestamp when the API key was created.
expiresAt (str): The timestamp when the API key will expire.
isExpired (bool): Whether the API key has expired.
diff --git a/abacusai/app_user_group.py b/abacusai/app_user_group.py
index e337bd7d1..09cfd8cc3 100644
--- a/abacusai/app_user_group.py
+++ b/abacusai/app_user_group.py
@@ -16,10 +16,11 @@ class AppUserGroup(AbstractApiClass):
hasExternalApplicationReporting (bool): Whether users in the App User Group have permission to view all reports in their organization.
isExternalServiceGroup (bool): Whether the App User Group corresponds to a user group that's defined in an external service (i.e Microsft Active Directory or Okta) or not
externalServiceGroupId (str): The identifier that corresponds to the app user group's external service group representation
+ creationSource (str): The source of the app user group
users (User): The users in the user group.
"""
- def __init__(self, client, name=None, userGroupId=None, externalApplicationIds=None, invitedUserEmails=None, publicUserGroup=None, hasExternalApplicationReporting=None, isExternalServiceGroup=None, externalServiceGroupId=None, users={}):
+ def __init__(self, client, name=None, userGroupId=None, externalApplicationIds=None, invitedUserEmails=None, publicUserGroup=None, hasExternalApplicationReporting=None, isExternalServiceGroup=None, externalServiceGroupId=None, creationSource=None, users={}):
super().__init__(client, None)
self.name = name
self.user_group_id = userGroupId
@@ -29,12 +30,13 @@ def __init__(self, client, name=None, userGroupId=None, externalApplicationIds=N
self.has_external_application_reporting = hasExternalApplicationReporting
self.is_external_service_group = isExternalServiceGroup
self.external_service_group_id = externalServiceGroupId
+ self.creation_source = creationSource
self.users = client._build_class(User, users)
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'name': repr(self.name), f'user_group_id': repr(self.user_group_id), f'external_application_ids': repr(self.external_application_ids), f'invited_user_emails': repr(self.invited_user_emails), f'public_user_group': repr(
- self.public_user_group), f'has_external_application_reporting': repr(self.has_external_application_reporting), f'is_external_service_group': repr(self.is_external_service_group), f'external_service_group_id': repr(self.external_service_group_id), f'users': repr(self.users)}
+ repr_dict = {f'name': repr(self.name), f'user_group_id': repr(self.user_group_id), f'external_application_ids': repr(self.external_application_ids), f'invited_user_emails': repr(self.invited_user_emails), f'public_user_group': repr(self.public_user_group), f'has_external_application_reporting': repr(
+ self.has_external_application_reporting), f'is_external_service_group': repr(self.is_external_service_group), f'external_service_group_id': repr(self.external_service_group_id), f'creation_source': repr(self.creation_source), f'users': repr(self.users)}
class_name = "AppUserGroup"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -47,6 +49,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'name': self.name, 'user_group_id': self.user_group_id, 'external_application_ids': self.external_application_ids, 'invited_user_emails': self.invited_user_emails, 'public_user_group': self.public_user_group,
- 'has_external_application_reporting': self.has_external_application_reporting, 'is_external_service_group': self.is_external_service_group, 'external_service_group_id': self.external_service_group_id, 'users': self._get_attribute_as_dict(self.users)}
+ resp = {'name': self.name, 'user_group_id': self.user_group_id, 'external_application_ids': self.external_application_ids, 'invited_user_emails': self.invited_user_emails, 'public_user_group': self.public_user_group, 'has_external_application_reporting':
+ self.has_external_application_reporting, 'is_external_service_group': self.is_external_service_group, 'external_service_group_id': self.external_service_group_id, 'creation_source': self.creation_source, 'users': self._get_attribute_as_dict(self.users)}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/application_connector.py b/abacusai/application_connector.py
index 3c0b95777..2b16e67bb 100644
--- a/abacusai/application_connector.py
+++ b/abacusai/application_connector.py
@@ -9,16 +9,18 @@ class ApplicationConnector(AbstractApiClass):
client (ApiClient): An authenticated API Client instance
applicationConnectorId (str): The unique ID for the connection.
service (str): The service this connection connects to
+ serviceName (str): For OAuth services, the specific provider name (e.g., 'spotify')
name (str): A user-friendly name for the service
createdAt (str): When the API key was created
status (str): The status of the Application Connector
auth (dict): Non-secret connection information for this connector
"""
- def __init__(self, client, applicationConnectorId=None, service=None, name=None, createdAt=None, status=None, auth=None):
+ def __init__(self, client, applicationConnectorId=None, service=None, serviceName=None, name=None, createdAt=None, status=None, auth=None):
super().__init__(client, applicationConnectorId)
self.application_connector_id = applicationConnectorId
self.service = service
+ self.service_name = serviceName
self.name = name
self.created_at = createdAt
self.status = status
@@ -26,8 +28,8 @@ def __init__(self, client, applicationConnectorId=None, service=None, name=None,
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'application_connector_id': repr(self.application_connector_id), f'service': repr(self.service), f'name': repr(
- self.name), f'created_at': repr(self.created_at), f'status': repr(self.status), f'auth': repr(self.auth)}
+ repr_dict = {f'application_connector_id': repr(self.application_connector_id), f'service': repr(self.service), f'service_name': repr(
+ self.service_name), f'name': repr(self.name), f'created_at': repr(self.created_at), f'status': repr(self.status), f'auth': repr(self.auth)}
class_name = "ApplicationConnector"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -40,7 +42,7 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'application_connector_id': self.application_connector_id, 'service': self.service,
+ resp = {'application_connector_id': self.application_connector_id, 'service': self.service, 'service_name': self.service_name,
'name': self.name, 'created_at': self.created_at, 'status': self.status, 'auth': self.auth}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/audio_gen_settings.py b/abacusai/audio_gen_settings.py
index 768c1f0f2..4b42deda8 100644
--- a/abacusai/audio_gen_settings.py
+++ b/abacusai/audio_gen_settings.py
@@ -7,16 +7,19 @@ class AudioGenSettings(AbstractApiClass):
Args:
client (ApiClient): An authenticated API Client instance
- model (dict): Dropdown for models available for audio generation.
+ model (dict): names of models available for audio generation.
+ settings (dict): settings for each model.
"""
- def __init__(self, client, model=None):
+ def __init__(self, client, model=None, settings=None):
super().__init__(client, None)
self.model = model
+ self.settings = settings
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'model': repr(self.model)}
+ repr_dict = {f'model': repr(self.model),
+ f'settings': repr(self.settings)}
class_name = "AudioGenSettings"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -29,5 +32,5 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'model': self.model}
+ resp = {'model': self.model, 'settings': self.settings}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/audio_url_result.py b/abacusai/audio_url_result.py
new file mode 100644
index 000000000..89eaee698
--- /dev/null
+++ b/abacusai/audio_url_result.py
@@ -0,0 +1,36 @@
+from .return_class import AbstractApiClass
+
+
+class AudioUrlResult(AbstractApiClass):
+ """
+ TTS result
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ audioUrl (str): The audio url.
+ creditsUsed (float): The credits used.
+ """
+
+ def __init__(self, client, audioUrl=None, creditsUsed=None):
+ super().__init__(client, None)
+ self.audio_url = audioUrl
+ self.credits_used = creditsUsed
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'audio_url': repr(
+ self.audio_url), f'credits_used': repr(self.credits_used)}
+ class_name = "AudioUrlResult"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'audio_url': self.audio_url, 'credits_used': self.credits_used}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/audit_log.py b/abacusai/audit_log.py
new file mode 100644
index 000000000..3bd8ca60a
--- /dev/null
+++ b/abacusai/audit_log.py
@@ -0,0 +1,47 @@
+from .return_class import AbstractApiClass
+
+
+class AuditLog(AbstractApiClass):
+ """
+ An audit log entry
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ createdAt (str): The timestamp when the audit log entry was created
+ userId (str): The hashed ID of the user who performed the action
+ objectId (str): The hashed ID of the object that was affected by the action
+ action (str): The action performed (create, modify, start, stop, delete, share, hide, credential_change, login)
+ source (str): The source of the action (api, ui, pipeline, cli, system)
+ refreshPolicyId (str): The hashed ID of the refresh policy if applicable
+ pipelineId (str): The hashed ID of the pipeline if applicable
+ """
+
+ def __init__(self, client, createdAt=None, userId=None, objectId=None, action=None, source=None, refreshPolicyId=None, pipelineId=None):
+ super().__init__(client, None)
+ self.created_at = createdAt
+ self.user_id = userId
+ self.object_id = objectId
+ self.action = action
+ self.source = source
+ self.refresh_policy_id = refreshPolicyId
+ self.pipeline_id = pipelineId
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'created_at': repr(self.created_at), f'user_id': repr(self.user_id), f'object_id': repr(self.object_id), f'action': repr(
+ self.action), f'source': repr(self.source), f'refresh_policy_id': repr(self.refresh_policy_id), f'pipeline_id': repr(self.pipeline_id)}
+ class_name = "AuditLog"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'created_at': self.created_at, 'user_id': self.user_id, 'object_id': self.object_id, 'action': self.action,
+ 'source': self.source, 'refresh_policy_id': self.refresh_policy_id, 'pipeline_id': self.pipeline_id}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/batch_prediction.py b/abacusai/batch_prediction.py
index 34609d8d9..e5928ffb8 100644
--- a/abacusai/batch_prediction.py
+++ b/abacusai/batch_prediction.py
@@ -84,7 +84,7 @@ def __init__(self, client, batchPredictionId=None, createdAt=None, name=None, de
BatchPredictionArgs, globalPredictionArgs)
self.batch_prediction_args = client._build_class(getattr(
api_class, batchPredictionArgsType, BatchPredictionArgs) if batchPredictionArgsType else BatchPredictionArgs, batchPredictionArgs)
- self.deprecated_keys = {'global_prediction_args', 'explanations'}
+ self.deprecated_keys = {'explanations', 'global_prediction_args'}
def __repr__(self):
repr_dict = {f'batch_prediction_id': repr(self.batch_prediction_id), f'created_at': repr(self.created_at), f'name': repr(self.name), f'deployment_id': repr(self.deployment_id), f'file_connector_output_location': repr(self.file_connector_output_location), f'database_connector_id': repr(self.database_connector_id), f'database_output_configuration': repr(self.database_output_configuration), f'file_output_format': repr(self.file_output_format), f'connector_type': repr(self.connector_type), f'legacy_input_location': repr(self.legacy_input_location), f'output_feature_group_id': repr(self.output_feature_group_id), f'feature_group_table_name': repr(self.feature_group_table_name), f'output_feature_group_table_name': repr(self.output_feature_group_table_name), f'summary_feature_group_table_name': repr(self.summary_feature_group_table_name), f'csv_input_prefix': repr(
diff --git a/abacusai/batch_prediction_version.py b/abacusai/batch_prediction_version.py
index 5e53ea27b..f89c47a1f 100644
--- a/abacusai/batch_prediction_version.py
+++ b/abacusai/batch_prediction_version.py
@@ -100,7 +100,7 @@ def __init__(self, client, batchPredictionVersion=None, batchPredictionId=None,
BatchPredictionArgs, globalPredictionArgs)
self.batch_prediction_args = client._build_class(getattr(
api_class, batchPredictionArgsType, BatchPredictionArgs) if batchPredictionArgsType else BatchPredictionArgs, batchPredictionArgs)
- self.deprecated_keys = {'global_prediction_args', 'explanations'}
+ self.deprecated_keys = {'explanations', 'global_prediction_args'}
def __repr__(self):
repr_dict = {f'batch_prediction_version': repr(self.batch_prediction_version), f'batch_prediction_id': repr(self.batch_prediction_id), f'status': repr(self.status), f'drift_monitor_status': repr(self.drift_monitor_status), f'deployment_id': repr(self.deployment_id), f'model_id': repr(self.model_id), f'model_version': repr(self.model_version), f'predictions_started_at': repr(self.predictions_started_at), f'predictions_completed_at': repr(self.predictions_completed_at), f'database_output_error': repr(self.database_output_error), f'total_predictions': repr(self.total_predictions), f'failed_predictions': repr(self.failed_predictions), f'database_connector_id': repr(self.database_connector_id), f'database_output_configuration': repr(self.database_output_configuration), f'file_connector_output_location': repr(self.file_connector_output_location), f'file_output_format': repr(self.file_output_format), f'connector_type': repr(self.connector_type), f'legacy_input_location': repr(self.legacy_input_location), f'error': repr(self.error), f'drift_monitor_error': repr(self.drift_monitor_error), f'monitor_warnings': repr(self.monitor_warnings), f'csv_input_prefix': repr(
diff --git a/abacusai/chat_message.py b/abacusai/chat_message.py
index 01f61e3e0..b240518ae 100644
--- a/abacusai/chat_message.py
+++ b/abacusai/chat_message.py
@@ -16,9 +16,10 @@ class ChatMessage(AbstractApiClass):
hotkeyTitle (str): The title of the hotkey prompt if the message has one
tasks (list[str]): The list of spawned tasks, if the message was broken down into smaller sub-tasks.
keywordArguments (dict): A dict of kwargs used to generate the response.
+ computePointsUsed (int): The number of compute points used for the message.
"""
- def __init__(self, client, role=None, text=None, timestamp=None, isUseful=None, feedback=None, docIds=None, hotkeyTitle=None, tasks=None, keywordArguments=None):
+ def __init__(self, client, role=None, text=None, timestamp=None, isUseful=None, feedback=None, docIds=None, hotkeyTitle=None, tasks=None, keywordArguments=None, computePointsUsed=None):
super().__init__(client, None)
self.role = role
self.text = text
@@ -29,11 +30,12 @@ def __init__(self, client, role=None, text=None, timestamp=None, isUseful=None,
self.hotkey_title = hotkeyTitle
self.tasks = tasks
self.keyword_arguments = keywordArguments
+ self.compute_points_used = computePointsUsed
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'role': repr(self.role), f'text': repr(self.text), f'timestamp': repr(self.timestamp), f'is_useful': repr(self.is_useful), f'feedback': repr(
- self.feedback), f'doc_ids': repr(self.doc_ids), f'hotkey_title': repr(self.hotkey_title), f'tasks': repr(self.tasks), f'keyword_arguments': repr(self.keyword_arguments)}
+ repr_dict = {f'role': repr(self.role), f'text': repr(self.text), f'timestamp': repr(self.timestamp), f'is_useful': repr(self.is_useful), f'feedback': repr(self.feedback), f'doc_ids': repr(
+ self.doc_ids), f'hotkey_title': repr(self.hotkey_title), f'tasks': repr(self.tasks), f'keyword_arguments': repr(self.keyword_arguments), f'compute_points_used': repr(self.compute_points_used)}
class_name = "ChatMessage"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -46,6 +48,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'role': self.role, 'text': self.text, 'timestamp': self.timestamp, 'is_useful': self.is_useful, 'feedback': self.feedback,
- 'doc_ids': self.doc_ids, 'hotkey_title': self.hotkey_title, 'tasks': self.tasks, 'keyword_arguments': self.keyword_arguments}
+ resp = {'role': self.role, 'text': self.text, 'timestamp': self.timestamp, 'is_useful': self.is_useful, 'feedback': self.feedback, 'doc_ids': self.doc_ids,
+ 'hotkey_title': self.hotkey_title, 'tasks': self.tasks, 'keyword_arguments': self.keyword_arguments, 'compute_points_used': self.compute_points_used}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/chatllm_computer_status.py b/abacusai/chatllm_computer_status.py
new file mode 100644
index 000000000..48bfe1eb8
--- /dev/null
+++ b/abacusai/chatllm_computer_status.py
@@ -0,0 +1,41 @@
+from .return_class import AbstractApiClass
+
+
+class ChatllmComputerStatus(AbstractApiClass):
+ """
+ ChatLLM Computer Status
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ computerId (str): The ID of the computer, it can be a deployment_conversation_id or a computer_id (TODO: add separate field for deployment_conversation_id)
+ vncEndpoint (str): The VNC endpoint of the computer
+ computerStarted (bool): Whether the computer has started
+ restartRequired (bool): Whether the computer needs to be restarted
+ """
+
+ def __init__(self, client, computerId=None, vncEndpoint=None, computerStarted=None, restartRequired=None):
+ super().__init__(client, None)
+ self.computer_id = computerId
+ self.vnc_endpoint = vncEndpoint
+ self.computer_started = computerStarted
+ self.restart_required = restartRequired
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'computer_id': repr(self.computer_id), f'vnc_endpoint': repr(
+ self.vnc_endpoint), f'computer_started': repr(self.computer_started), f'restart_required': repr(self.restart_required)}
+ class_name = "ChatllmComputerStatus"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'computer_id': self.computer_id, 'vnc_endpoint': self.vnc_endpoint,
+ 'computer_started': self.computer_started, 'restart_required': self.restart_required}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/chatllm_memory.py b/abacusai/chatllm_memory.py
new file mode 100644
index 000000000..c3ed17f75
--- /dev/null
+++ b/abacusai/chatllm_memory.py
@@ -0,0 +1,39 @@
+from .return_class import AbstractApiClass
+
+
+class ChatllmMemory(AbstractApiClass):
+ """
+ An LLM created memory in ChatLLM
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ chatllmMemoryId (str): The ID of the chatllm memory.
+ memory (str): The text of the ChatLLM memory.
+ sourceDeploymentConversationId (str): The deployment conversation where this memory was created.
+ """
+
+ def __init__(self, client, chatllmMemoryId=None, memory=None, sourceDeploymentConversationId=None):
+ super().__init__(client, chatllmMemoryId)
+ self.chatllm_memory_id = chatllmMemoryId
+ self.memory = memory
+ self.source_deployment_conversation_id = sourceDeploymentConversationId
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'chatllm_memory_id': repr(self.chatllm_memory_id), f'memory': repr(
+ self.memory), f'source_deployment_conversation_id': repr(self.source_deployment_conversation_id)}
+ class_name = "ChatllmMemory"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'chatllm_memory_id': self.chatllm_memory_id, 'memory': self.memory,
+ 'source_deployment_conversation_id': self.source_deployment_conversation_id}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/chatllm_project.py b/abacusai/chatllm_project.py
new file mode 100644
index 000000000..065ceb380
--- /dev/null
+++ b/abacusai/chatllm_project.py
@@ -0,0 +1,45 @@
+from .return_class import AbstractApiClass
+
+
+class ChatllmProject(AbstractApiClass):
+ """
+ ChatLLM Project
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ chatllmProjectId (id): The ID of the chatllm project.
+ name (str): The name of the chatllm project.
+ description (str): The description of the chatllm project.
+ customInstructions (str): The custom instructions of the chatllm project.
+ createdAt (str): The creation time of the chatllm project.
+ updatedAt (str): The update time of the chatllm project.
+ """
+
+ def __init__(self, client, chatllmProjectId=None, name=None, description=None, customInstructions=None, createdAt=None, updatedAt=None):
+ super().__init__(client, chatllmProjectId)
+ self.chatllm_project_id = chatllmProjectId
+ self.name = name
+ self.description = description
+ self.custom_instructions = customInstructions
+ self.created_at = createdAt
+ self.updated_at = updatedAt
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'chatllm_project_id': repr(self.chatllm_project_id), f'name': repr(self.name), f'description': repr(
+ self.description), f'custom_instructions': repr(self.custom_instructions), f'created_at': repr(self.created_at), f'updated_at': repr(self.updated_at)}
+ class_name = "ChatllmProject"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'chatllm_project_id': self.chatllm_project_id, 'name': self.name, 'description': self.description,
+ 'custom_instructions': self.custom_instructions, 'created_at': self.created_at, 'updated_at': self.updated_at}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/chatllm_project_permissions.py b/abacusai/chatllm_project_permissions.py
new file mode 100644
index 000000000..b10f31da8
--- /dev/null
+++ b/abacusai/chatllm_project_permissions.py
@@ -0,0 +1,41 @@
+from .return_class import AbstractApiClass
+
+
+class ChatllmProjectPermissions(AbstractApiClass):
+ """
+ ChatLLM Project Permissions
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ chatllmProjectId (id): The ID of the chatllm project.
+ accessLevel (str): The access level of the chatllm project.
+ userPermissions (list): List of tuples containing (user_id, permission).
+ userGroupPermissions (list): List of tuples containing (user_group_id, permission).
+ """
+
+ def __init__(self, client, chatllmProjectId=None, accessLevel=None, userPermissions=None, userGroupPermissions=None):
+ super().__init__(client, None)
+ self.chatllm_project_id = chatllmProjectId
+ self.access_level = accessLevel
+ self.user_permissions = userPermissions
+ self.user_group_permissions = userGroupPermissions
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'chatllm_project_id': repr(self.chatllm_project_id), f'access_level': repr(
+ self.access_level), f'user_permissions': repr(self.user_permissions), f'user_group_permissions': repr(self.user_group_permissions)}
+ class_name = "ChatllmProjectPermissions"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'chatllm_project_id': self.chatllm_project_id, 'access_level': self.access_level,
+ 'user_permissions': self.user_permissions, 'user_group_permissions': self.user_group_permissions}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/chatllm_task.py b/abacusai/chatllm_task.py
index 82b44ded7..1bb705f99 100644
--- a/abacusai/chatllm_task.py
+++ b/abacusai/chatllm_task.py
@@ -8,32 +8,52 @@ class ChatllmTask(AbstractApiClass):
Args:
client (ApiClient): An authenticated API Client instance
chatllmTaskId (str): The id of the chatllm task.
+ daemonTaskId (str): The id of the daemon task.
+ taskType (str): The type of task ('chatllm' or 'daemon').
name (str): The name of the chatllm task.
instructions (str): The instructions of the chatllm task.
+ description (str): The description of the chatllm task.
lifecycle (str): The lifecycle of the chatllm task.
scheduleInfo (dict): The schedule info of the chatllm task.
externalApplicationId (str): The external application id associated with the chatllm task.
deploymentConversationId (str): The deployment conversation id associated with the chatllm task.
+ sourceDeploymentConversationId (str): The source deployment conversation id associated with the chatllm task.
enableEmailAlerts (bool): Whether email alerts are enabled for the chatllm task.
email (str): The email to send alerts to.
+ numUnreadTaskInstances (int): The number of unread task instances for the chatllm task.
+ computePointsUsed (int): The compute points used for the chatllm task.
+ displayMarkdown (str): The display markdown for the chatllm task.
+ requiresNewConversation (bool): Whether a new conversation is required for the chatllm task.
+ executionMode (str): The execution mode of the chatllm task.
+ taskDefinition (dict): The task definition (for web_service_trigger tasks).
"""
- def __init__(self, client, chatllmTaskId=None, name=None, instructions=None, lifecycle=None, scheduleInfo=None, externalApplicationId=None, deploymentConversationId=None, enableEmailAlerts=None, email=None):
+ def __init__(self, client, chatllmTaskId=None, daemonTaskId=None, taskType=None, name=None, instructions=None, description=None, lifecycle=None, scheduleInfo=None, externalApplicationId=None, deploymentConversationId=None, sourceDeploymentConversationId=None, enableEmailAlerts=None, email=None, numUnreadTaskInstances=None, computePointsUsed=None, displayMarkdown=None, requiresNewConversation=None, executionMode=None, taskDefinition=None):
super().__init__(client, chatllmTaskId)
self.chatllm_task_id = chatllmTaskId
+ self.daemon_task_id = daemonTaskId
+ self.task_type = taskType
self.name = name
self.instructions = instructions
+ self.description = description
self.lifecycle = lifecycle
self.schedule_info = scheduleInfo
self.external_application_id = externalApplicationId
self.deployment_conversation_id = deploymentConversationId
+ self.source_deployment_conversation_id = sourceDeploymentConversationId
self.enable_email_alerts = enableEmailAlerts
self.email = email
+ self.num_unread_task_instances = numUnreadTaskInstances
+ self.compute_points_used = computePointsUsed
+ self.display_markdown = displayMarkdown
+ self.requires_new_conversation = requiresNewConversation
+ self.execution_mode = executionMode
+ self.task_definition = taskDefinition
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'chatllm_task_id': repr(self.chatllm_task_id), f'name': repr(self.name), f'instructions': repr(self.instructions), f'lifecycle': repr(self.lifecycle), f'schedule_info': repr(
- self.schedule_info), f'external_application_id': repr(self.external_application_id), f'deployment_conversation_id': repr(self.deployment_conversation_id), f'enable_email_alerts': repr(self.enable_email_alerts), f'email': repr(self.email)}
+ repr_dict = {f'chatllm_task_id': repr(self.chatllm_task_id), f'daemon_task_id': repr(self.daemon_task_id), f'task_type': repr(self.task_type), f'name': repr(self.name), f'instructions': repr(self.instructions), f'description': repr(self.description), f'lifecycle': repr(self.lifecycle), f'schedule_info': repr(self.schedule_info), f'external_application_id': repr(self.external_application_id), f'deployment_conversation_id': repr(self.deployment_conversation_id), f'source_deployment_conversation_id': repr(
+ self.source_deployment_conversation_id), f'enable_email_alerts': repr(self.enable_email_alerts), f'email': repr(self.email), f'num_unread_task_instances': repr(self.num_unread_task_instances), f'compute_points_used': repr(self.compute_points_used), f'display_markdown': repr(self.display_markdown), f'requires_new_conversation': repr(self.requires_new_conversation), f'execution_mode': repr(self.execution_mode), f'task_definition': repr(self.task_definition)}
class_name = "ChatllmTask"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -46,6 +66,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'chatllm_task_id': self.chatllm_task_id, 'name': self.name, 'instructions': self.instructions, 'lifecycle': self.lifecycle, 'schedule_info': self.schedule_info,
- 'external_application_id': self.external_application_id, 'deployment_conversation_id': self.deployment_conversation_id, 'enable_email_alerts': self.enable_email_alerts, 'email': self.email}
+ resp = {'chatllm_task_id': self.chatllm_task_id, 'daemon_task_id': self.daemon_task_id, 'task_type': self.task_type, 'name': self.name, 'instructions': self.instructions, 'description': self.description, 'lifecycle': self.lifecycle, 'schedule_info': self.schedule_info, 'external_application_id': self.external_application_id, 'deployment_conversation_id': self.deployment_conversation_id,
+ 'source_deployment_conversation_id': self.source_deployment_conversation_id, 'enable_email_alerts': self.enable_email_alerts, 'email': self.email, 'num_unread_task_instances': self.num_unread_task_instances, 'compute_points_used': self.compute_points_used, 'display_markdown': self.display_markdown, 'requires_new_conversation': self.requires_new_conversation, 'execution_mode': self.execution_mode, 'task_definition': self.task_definition}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/client.py b/abacusai/client.py
index a8b6f91d5..631c2a4a7 100644
--- a/abacusai/client.py
+++ b/abacusai/client.py
@@ -33,18 +33,18 @@
from .annotation_entry import AnnotationEntry
from .annotations_status import AnnotationsStatus
from .api_class import (
- AgentInterface, AlertActionConfig, AlertConditionConfig, ApiClass, ApiEnum,
- ApplicationConnectorDatasetConfig, ApplicationConnectorType,
- AttachmentParsingConfig, BatchPredictionArgs, Blob, BlobInput, CPUSize,
- DatasetDocumentProcessingConfig, DataType, DeploymentConversationType,
- DocumentProcessingConfig, EvalArtifactType, FeatureGroupExportConfig,
- ForecastingMonitorConfig, IncrementalDatabaseConnectorConfig, LLMName,
- MemorySize, MergeConfig, OperatorConfig, ParsingConfig,
- PredictionArguments, ProblemType, ProjectFeatureGroupConfig,
- PythonFunctionType, ResponseSection, SamplingConfig, Segment,
- StreamingConnectorDatasetConfig, TrainingConfig, VectorStoreConfig,
- WorkflowGraph, WorkflowGraphNode, WorkflowNodeTemplateConfig,
- get_clean_function_source_code_for_agent
+ AgentClientType, AgentInterface, AlertActionConfig, AlertConditionConfig,
+ ApiClass, ApiEnum, ApplicationConnectorDatasetConfig,
+ ApplicationConnectorType, AttachmentParsingConfig, BatchPredictionArgs,
+ Blob, BlobInput, CPUSize, DatasetDocumentProcessingConfig, DataType,
+ DeploymentConversationType, DocumentProcessingConfig, EvalArtifactType,
+ FeatureGroupExportConfig, ForecastingMonitorConfig,
+ IncrementalDatabaseConnectorConfig, LLMName, MemorySize, MergeConfig,
+ OperatorConfig, OrganizationSecretType, ParsingConfig, PredictionArguments,
+ ProblemType, ProjectFeatureGroupConfig, PythonFunctionType,
+ ResponseSection, SamplingConfig, Segment, StreamingConnectorDatasetConfig,
+ TrainingConfig, VectorStoreConfig, WorkflowGraph, WorkflowGraphNode,
+ WorkflowNodeTemplateConfig, get_clean_function_source_code_for_agent
)
from .api_class.abstract import get_clean_function_source_code, get_clean_function_source_code_for_agent, snake_case
from .api_class.ai_agents import WorkflowGraph, WorkflowNodeTemplateConfig
@@ -126,6 +126,7 @@
from .llm_generated_code import LlmGeneratedCode
from .llm_input import LlmInput
from .llm_response import LlmResponse
+from .mcp_server_query_result import McpServerQueryResult
from .model import Model
from .model_artifacts_export import ModelArtifactsExport
from .model_metrics import ModelMetrics
@@ -173,11 +174,13 @@
from .streaming_auth_token import StreamingAuthToken
from .streaming_connector import StreamingConnector
from .training_config_options import TrainingConfigOptions
+from .unified_connector import UnifiedConnector
from .upload import Upload
from .upload_part import UploadPart
from .use_case import UseCase
from .use_case_requirements import UseCaseRequirements
from .user import User
+from .user_group_object_permission import UserGroupObjectPermission
from .web_page_response import WebPageResponse
from .web_search_response import WebSearchResponse
from .webhook import Webhook
@@ -213,20 +216,23 @@ async def sse_asynchronous_generator(endpoint: str, headers: dict, body: dict):
except Exception:
raise Exception('Please install aiohttp to use this functionality')
- async with aiohttp.request('POST', endpoint, json=body, headers=headers) as response:
+ async with aiohttp.request('POST', endpoint, json=body, headers=headers, timeout=aiohttp.ClientTimeout(total=0)) as response:
async for line in response.content:
if line:
- streamed_responses = line.decode('utf-8').split('\n\n')
+ streamed_responses = line.decode('utf-8').split('\n')
for resp in streamed_responses:
if resp:
resp = resp.strip()
if resp:
- resp = json.loads(resp)
- resp = {snake_case(
- key): value for key, value in resp.items()}
- if 'ping' in resp:
- continue
- yield resp
+ try:
+ resp = json.loads(resp)
+ resp = {snake_case(
+ key): value for key, value in resp.items()}
+ if 'ping' in resp:
+ continue
+ yield resp
+ except Exception:
+ pass
def _requests_retry_session(retries=5, backoff_factor=0.1, status_forcelist=(502, 503, 504), session=None, retry_500: bool = False):
@@ -320,6 +326,15 @@ def __getattr__(self, item):
f"'{self.__class__.__name__}' object has no attribute '{item}'")
+class ToolResponse(AgentResponse):
+ """
+ Response object for tool to support non-text response sections
+ """
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+
class ClientOptions:
"""
Options for configuring the ApiClient
@@ -652,7 +667,7 @@ class BaseApiClient:
client_options (ClientOptions): Optional API client configurations
skip_version_check (bool): If true, will skip checking the server's current API version on initializing the client
"""
- client_version = '1.4.31'
+ client_version = '1.4.76'
def __init__(self, api_key: str = None, server: str = None, client_options: ClientOptions = None, skip_version_check: bool = False, include_tb: bool = False):
self.api_key = api_key
@@ -676,14 +691,23 @@ def __init__(self, api_key: str = None, server: str = None, client_options: Clie
self.service_discovery_url = None
# Connection and version check
if self.api_key is not None:
- try:
- endpoint_info = self._call_api('getApiEndpoint', 'GET')
- self.prediction_endpoint = endpoint_info['predictEndpoint']
- self.proxy_endpoint = endpoint_info.get('proxyEndpoint')
- if not self.server:
- self.server = endpoint_info['apiEndpoint']
- except Exception:
- logging.error('Invalid API Key')
+ max_retries = 5
+ retry_delay = 1
+ for attempt in range(max_retries):
+ try:
+ endpoint_info = self._call_api('getApiEndpoint', 'GET')
+ self.prediction_endpoint = endpoint_info['predictEndpoint']
+ self.proxy_endpoint = endpoint_info.get('proxyEndpoint')
+ if not self.server:
+ self.server = endpoint_info['apiEndpoint']
+ break
+ except Exception as e:
+ logging.warning(f'Error while calling API: {e}')
+ if attempt < max_retries - 1:
+ time.sleep(retry_delay)
+ retry_delay *= 2
+ else:
+ logging.error('Failed to get endpoints')
skip_version_check = skip_version_check or self.client_version.startswith(
'g')
if not skip_version_check:
@@ -842,12 +866,35 @@ def _call_api(
error_message, response.status_code, error_type, request_id)
return result
- def _proxy_request(self, name: str, method: str = 'POST', query_params: dict = None, body: dict = None, data: dict = None, files=None, parse_type=None, is_sync: bool = False, streamable_response: bool = False):
+ def _proxy_request(self, name: str, method: str = 'POST', query_params: dict = None, body: dict = None, data: dict = None, files=None, parse_type=None, is_sync: bool = False, streamable_response: bool = False, timeout=None):
headers = {'APIKEY': self.api_key}
deployment_id = os.getenv('ABACUS_EXEC_SERVICE_DEPLOYMENT_ID')
if deployment_id:
- query_params = {**(query_params or {}),
- 'environmentDeploymentId': deployment_id}
+ if not (query_params or {}).get('deploymentId') or deployment_id == (query_params or {}).get('deploymentId'):
+ query_params = {**(query_params or {}),
+ 'environmentDeploymentId': deployment_id}
+ caller = self._get_agent_caller()
+ request_id = self._get_agent_app_request_id()
+ if caller and request_id:
+ if get_object_from_context(self, _request_context, 'is_agent', bool):
+ query_params = {**(query_params or {}), 'isAgent': True}
+ if get_object_from_context(self, _request_context, 'is_agent_api', bool):
+ query_params = {**(query_params or {}), 'isAgentApi': True}
+ if get_object_from_context(self, _request_context, 'is_agent', bool):
+ query_params = {**(query_params or {}), 'isAgent': True}
+ hashed_deployment_id = get_object_from_context(
+ self, _request_context, 'deployment_id', str)
+ if hashed_deployment_id:
+ query_params = {**(query_params or {}),
+ 'billingDeploymentId': hashed_deployment_id}
+ user_info = get_object_from_context(
+ self, _request_context, 'user_info', dict)
+ if user_info:
+ user_id = user_info.get('user_id')
+ if user_id:
+ query_params = {**(query_params or {}),
+ 'userId': user_info.get('user_id')}
+
endpoint = self.proxy_endpoint
if endpoint is None:
raise Exception(
@@ -870,7 +917,7 @@ def _proxy_request(self, name: str, method: str = 'POST', query_params: dict = N
if is_sync:
sync_api_endpoint = f'{endpoint}/api/{name}'
response = self._request(url=sync_api_endpoint, method=method, query_params=query_params,
- headers=headers, body=body, data=data, files=files, stream=streamable_response)
+ headers=headers, body=body, data=data, files=files, stream=streamable_response, timeout=timeout)
status_code = response.status_code
if streamable_response and status_code == 200:
return response.raw
@@ -1004,12 +1051,15 @@ class ReadOnlyClient(BaseApiClient):
skip_version_check (bool): If true, will skip checking the server's current API version on initializing the client
"""
- def list_api_keys(self) -> List[ApiKey]:
+ def list_api_keys(self, type: str = 'default') -> List[ApiKey]:
"""Lists all of the user's API keys
+ Args:
+ type (str): The type of API keys to list.
+
Returns:
list[ApiKey]: List of API Keys for the current user's organization."""
- return self._call_api('listApiKeys', 'GET', query_params={}, parse_type=ApiKey)
+ return self._call_api('listApiKeys', 'GET', query_params={'type': type}, parse_type=ApiKey)
def list_organization_users(self) -> List[User]:
"""Retrieves a list of all platform users in the organization, including pending users who have been invited.
@@ -1529,7 +1579,28 @@ def query_database_connector(self, database_connector_id: str, query: str) -> li
Args:
database_connector_id (str): A unique string identifier for the database connector.
query (str): The query to be run in the database connector."""
- return self._call_api('queryDatabaseConnector', 'GET', query_params={'databaseConnectorId': database_connector_id, 'query': query})
+ return self._proxy_request('queryDatabaseConnector', 'GET', query_params={'databaseConnectorId': database_connector_id, 'query': query}, is_sync=True)
+
+ def query_database_connector_datallm(self, database_connector_id: str, query: str) -> list:
+ """Runs a read-only query in the specified database connector. This API is specifically designed for DataLLM
+
+ and enforces read-only access with user-specific access control.
+
+
+ Args:
+ database_connector_id (str): A unique string identifier for the database connector.
+ query (str): The query to be run in the database connector (must be read-only)."""
+ return self._proxy_request('queryDatabaseConnectorDatallm', 'GET', query_params={'databaseConnectorId': database_connector_id, 'query': query}, is_sync=True)
+
+ def get_database_connector_auth(self, database_connector_id: str) -> DatabaseConnector:
+ """Get the authentication details for a given database connector.
+
+ Args:
+ database_connector_id (str): The unique ID associated with the database connector.
+
+ Returns:
+ DatabaseConnector: The database connector with the authentication details."""
+ return self._call_api('getDatabaseConnectorAuth', 'GET', query_params={'databaseConnectorId': database_connector_id}, parse_type=DatabaseConnector)
def list_application_connectors(self) -> List[ApplicationConnector]:
"""Retrieves a list of all application connectors along with their associated attributes.
@@ -1545,17 +1616,41 @@ def list_application_connector_objects(self, application_connector_id: str) -> l
application_connector_id (str): Unique string identifier for the application connector."""
return self._call_api('listApplicationConnectorObjects', 'GET', query_params={'applicationConnectorId': application_connector_id})
- def get_connector_auth(self, service: Union[ApplicationConnectorType, str] = None, application_connector_id: str = None, scopes: List = None) -> ApplicationConnector:
+ def get_connector_auth(self, service: Union[ApplicationConnectorType, str] = None, application_connector_id: str = None, scopes: List = None, generic_oauth_service: str = None) -> UnifiedConnector:
"""Get the authentication details for a given connector. For user level connectors, the service is required. For org level connectors, the application_connector_id is required.
Args:
service (ApplicationConnectorType): The service name.
application_connector_id (str): The unique ID associated with the connector.
scopes (List): The scopes to request for the connector.
+ generic_oauth_service (str): For GENERIC_OAUTH service, specify the OAuth provider (e.g., 'spotify').
+
+ Returns:
+ UnifiedConnector: The application connector with the authentication details."""
+ return self._call_api('getConnectorAuth', 'GET', query_params={'service': service, 'applicationConnectorId': application_connector_id, 'scopes': scopes, 'genericOauthService': generic_oauth_service}, parse_type=UnifiedConnector)
+
+ def get_user_connector_auth(self, service: Union[ApplicationConnectorType, str], scopes: List = None, generic_oauth_service: str = None, config_connector: str = None) -> UnifiedConnector:
+ """Get the authentication details for a given user level connector.
+
+ Args:
+ service (ApplicationConnectorType): The service name.
+ scopes (List): The scopes to request for the connector.
+ generic_oauth_service (str): For GENERIC_OAUTH service, specify the OAuth provider (e.g., 'spotify').
+ config_connector (str): The config connector id.
+
+ Returns:
+ UnifiedConnector: The application connector with the authentication details."""
+ return self._call_api('getUserConnectorAuth', 'GET', query_params={'service': service, 'scopes': scopes, 'genericOauthService': generic_oauth_service, 'configConnector': config_connector}, parse_type=UnifiedConnector)
+
+ def get_user_mcp_connector_auth(self, mcp_server_name: str = None) -> ApplicationConnector:
+ """Get the auth for a MCP connector
+
+ Args:
+ mcp_server_name (str): The name of the MCP server
Returns:
- ApplicationConnector: The application connector with the authentication details."""
- return self._call_api('getConnectorAuth', 'GET', query_params={'service': service, 'applicationConnectorId': application_connector_id, 'scopes': scopes}, parse_type=ApplicationConnector)
+ ApplicationConnector: The MCP connector"""
+ return self._call_api('getUserMcpConnectorAuth', 'GET', query_params={'mcpServerName': mcp_server_name}, parse_type=ApplicationConnector)
def list_streaming_connectors(self) -> List[StreamingConnector]:
"""Retrieves a list of all streaming connectors along with their corresponding attributes.
@@ -1806,15 +1901,16 @@ def list_model_artifacts_exports(self, model_id: str, limit: int = 25) -> List[M
list[ModelArtifactsExport]: List of model artifacts exports."""
return self._call_api('listModelArtifactsExports', 'GET', query_params={'modelId': model_id, 'limit': limit}, parse_type=ModelArtifactsExport)
- def list_model_monitors(self, project_id: str) -> List[ModelMonitor]:
+ def list_model_monitors(self, project_id: str, limit: int = None) -> List[ModelMonitor]:
"""Retrieves the list of model monitors in the specified project.
Args:
project_id (str): Unique string identifier associated with the project.
+ limit (int): Maximum number of model monitors to return. We'll have internal limit if not set.
Returns:
list[ModelMonitor]: A list of model monitors."""
- return self._call_api('listModelMonitors', 'GET', query_params={'projectId': project_id}, parse_type=ModelMonitor)
+ return self._call_api('listModelMonitors', 'GET', query_params={'projectId': project_id, 'limit': limit}, parse_type=ModelMonitor)
def describe_model_monitor(self, model_monitor_id: str) -> ModelMonitor:
"""Retrieves a full description of the specified model monitor.
@@ -2300,15 +2396,16 @@ def get_batch_prediction_connector_errors(self, batch_prediction_version: str) -
batch_prediction_version (str): Unique string identifier of the batch prediction job to get the errors for."""
return self._call_api('getBatchPredictionConnectorErrors', 'GET', query_params={'batchPredictionVersion': batch_prediction_version}, streamable_response=True)
- def list_batch_predictions(self, project_id: str) -> List[BatchPrediction]:
+ def list_batch_predictions(self, project_id: str, limit: int = None) -> List[BatchPrediction]:
"""Retrieves a list of batch predictions in the project.
Args:
project_id (str): Unique string identifier of the project.
+ limit (int): Maximum number of batch predictions to return. We'll have internal limit if not set.
Returns:
list[BatchPrediction]: List of batch prediction jobs."""
- return self._call_api('listBatchPredictions', 'GET', query_params={'projectId': project_id}, parse_type=BatchPrediction)
+ return self._call_api('listBatchPredictions', 'GET', query_params={'projectId': project_id, 'limit': limit}, parse_type=BatchPrediction)
def describe_batch_prediction(self, batch_prediction_id: str) -> BatchPrediction:
"""Describe the batch prediction.
@@ -2601,12 +2698,16 @@ def get_organization_secret(self, secret_key: str) -> OrganizationSecret:
OrganizationSecret: The secret."""
return self._call_api('getOrganizationSecret', 'GET', query_params={'secretKey': secret_key}, parse_type=OrganizationSecret)
- def list_organization_secrets(self) -> List[OrganizationSecret]:
+ def list_organization_secrets(self, decrypt_value: bool = False, secret_type: Union[OrganizationSecretType, str] = OrganizationSecretType.ORG_SECRET) -> List[OrganizationSecret]:
"""Lists all secrets for an organization.
+ Args:
+ decrypt_value (bool): Whether to decrypt the secret values.
+ secret_type (OrganizationSecretType): Filter secrets by type. Use OrganizationSecretType enum values.
+
Returns:
- list[OrganizationSecret]: list of secrets belonging to the organization."""
- return self._call_api('listOrganizationSecrets', 'GET', query_params={}, parse_type=OrganizationSecret)
+ list[OrganizationSecret]: List of secrets."""
+ return self._call_api('listOrganizationSecrets', 'GET', query_params={'decryptValue': decrypt_value, 'secretType': secret_type}, parse_type=OrganizationSecret)
def get_app_user_group_sign_in_token(self, user_group_id: str, email: str, name: str) -> AppUserGroupSignInToken:
"""Get a token for a user group user to sign in.
@@ -2676,7 +2777,7 @@ def list_chat_sessions(self, most_recent_per_project: bool = False) -> ChatSessi
ChatSession: The chat sessions with Data Science Co-pilot"""
return self._call_api('listChatSessions', 'GET', query_params={'mostRecentPerProject': most_recent_per_project}, parse_type=ChatSession)
- def get_deployment_conversation(self, deployment_conversation_id: str = None, external_session_id: str = None, deployment_id: str = None, filter_intermediate_conversation_events: bool = True, get_unused_document_uploads: bool = False) -> DeploymentConversation:
+ def get_deployment_conversation(self, deployment_conversation_id: str = None, external_session_id: str = None, deployment_id: str = None, filter_intermediate_conversation_events: bool = True, get_unused_document_uploads: bool = False, start: int = None, limit: int = None, include_all_versions: bool = False) -> DeploymentConversation:
"""Gets a deployment conversation.
Args:
@@ -2685,12 +2786,23 @@ def get_deployment_conversation(self, deployment_conversation_id: str = None, ex
deployment_id (str): The deployment this conversation belongs to. This is required if not logged in.
filter_intermediate_conversation_events (bool): If true, intermediate conversation events will be filtered out. Default is true.
get_unused_document_uploads (bool): If true, unused document uploads will be returned. Default is false.
+ start (int): The start index of the conversation.
+ limit (int): The limit of the conversation.
+ include_all_versions (bool): If True, includes all versions of the last bot message for version switching functionality.
Returns:
DeploymentConversation: The deployment conversation."""
- return self._proxy_request('getDeploymentConversation', 'GET', query_params={'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'deploymentId': deployment_id, 'filterIntermediateConversationEvents': filter_intermediate_conversation_events, 'getUnusedDocumentUploads': get_unused_document_uploads}, parse_type=DeploymentConversation, is_sync=True)
+ return self._proxy_request('getDeploymentConversation', 'GET', query_params={'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'deploymentId': deployment_id, 'filterIntermediateConversationEvents': filter_intermediate_conversation_events, 'getUnusedDocumentUploads': get_unused_document_uploads, 'start': start, 'limit': limit, 'includeAllVersions': include_all_versions}, parse_type=DeploymentConversation, is_sync=True)
+
+ def get_deployment_conversation_file(self, deployment_conversation_id: str, file_path: str) -> io.BytesIO:
+ """Gets a deployment conversation file.
- def list_deployment_conversations(self, deployment_id: str = None, external_application_id: str = None, conversation_type: Union[DeploymentConversationType, str] = None, fetch_last_llm_info: bool = False) -> List[DeploymentConversation]:
+ Args:
+ deployment_conversation_id (str): Unique ID of the conversation.
+ file_path (str): The path of the file to get."""
+ return self._proxy_request('getDeploymentConversationFile', 'GET', query_params={'deploymentConversationId': deployment_conversation_id, 'filePath': file_path}, is_sync=True, streamable_response=True)
+
+ def list_deployment_conversations(self, deployment_id: str = None, external_application_id: str = None, conversation_type: Union[DeploymentConversationType, str] = None, fetch_last_llm_info: bool = False, limit: int = None, search: str = None) -> List[DeploymentConversation]:
"""Lists all conversations for the given deployment and current user.
Args:
@@ -2698,10 +2810,12 @@ def list_deployment_conversations(self, deployment_id: str = None, external_appl
external_application_id (str): The external application id associated with the deployment conversation. If specified, only conversations created on that application will be listed.
conversation_type (DeploymentConversationType): The type of the conversation indicating its origin.
fetch_last_llm_info (bool): If true, the LLM info for the most recent conversation will be fetched. Only applicable for system-created bots.
+ limit (int): The number of conversations to return. Defaults to 600.
+ search (str): The search query to filter conversations by title.
Returns:
list[DeploymentConversation]: The deployment conversations."""
- return self._proxy_request('listDeploymentConversations', 'GET', query_params={'deploymentId': deployment_id, 'externalApplicationId': external_application_id, 'conversationType': conversation_type, 'fetchLastLlmInfo': fetch_last_llm_info}, parse_type=DeploymentConversation, is_sync=True)
+ return self._proxy_request('listDeploymentConversations', 'GET', query_params={'deploymentId': deployment_id, 'externalApplicationId': external_application_id, 'conversationType': conversation_type, 'fetchLastLlmInfo': fetch_last_llm_info, 'limit': limit, 'search': search}, parse_type=DeploymentConversation, is_sync=True)
def export_deployment_conversation(self, deployment_conversation_id: str = None, external_session_id: str = None) -> DeploymentConversationExport:
"""Export a Deployment Conversation.
@@ -2714,6 +2828,16 @@ def export_deployment_conversation(self, deployment_conversation_id: str = None,
DeploymentConversationExport: The deployment conversation html export."""
return self._proxy_request('exportDeploymentConversation', 'GET', query_params={'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id}, parse_type=DeploymentConversationExport, is_sync=True)
+ def list_user_group_object_permissions(self) -> List[UserGroupObjectPermission]:
+ """List all user groups permissions associated with the objects in the organization.
+
+ If no associated user groups, the object is public among developers and admins.
+
+
+ Returns:
+ list[UserGroupObjectPermission]: List of user group object permissions in the organization."""
+ return self._call_api('listUserGroupObjectPermissions', 'GET', query_params={}, parse_type=UserGroupObjectPermission)
+
def get_app_user_group(self, user_group_id: str) -> AppUserGroup:
"""Gets an App User Group.
@@ -2724,6 +2848,16 @@ def get_app_user_group(self, user_group_id: str) -> AppUserGroup:
AppUserGroup: The App User Group."""
return self._call_api('getAppUserGroup', 'GET', query_params={'userGroupId': user_group_id}, parse_type=AppUserGroup)
+ def get_app_user_group_from_name(self, name: str = None) -> AppUserGroup:
+ """Gets an App User Group by name.
+
+ Args:
+ name (str): The name of the app user group to retrieve.
+
+ Returns:
+ AppUserGroup: The app user group with the specified name."""
+ return self._call_api('getAppUserGroupFromName', 'GET', query_params={'name': name}, parse_type=AppUserGroup)
+
def describe_external_application(self, external_application_id: str) -> ExternalApplication:
"""Describes an External Application.
@@ -2876,6 +3010,14 @@ def describe_document_retriever_version(self, document_retriever_version: str) -
DocumentRetrieverVersion: The document retriever version object."""
return self._call_api('describeDocumentRetrieverVersion', 'GET', query_params={'documentRetrieverVersion': document_retriever_version}, parse_type=DocumentRetrieverVersion)
+ def list_route_llm_models(self):
+ """This function is used to list all the models and their rates, for routellm api.
+
+ Return:
+ list of dicts, each dict contains the model name, description, id, and api price info.
+ """
+ return self._call_api('listRouteLLMModels', 'GET', query_params={})
+
def get_source_code_info(train_function: callable, predict_function: callable = None, predict_many_function: callable = None, initialize_function: callable = None, common_functions: list = None):
if not train_function:
@@ -2958,6 +3100,101 @@ def create_dataset_from_pandas(self, feature_group_table_name: str, df: pd.DataF
table_name=feature_group_table_name, file_format='PARQUET')
return self._upload_from_pandas(upload, df)
+ def get_assignments_online_with_new_inputs(self, deployment_token: str, deployment_id: str, assignments_df: pd.DataFrame = None, constraints_df: pd.DataFrame = None, constraint_equations_df: pd.DataFrame = None, feature_mapping_dict: dict = None, solve_time_limit_seconds: float = None, optimality_gap_limit: float = None):
+ """
+ Get alternative positive assignments for given query. Optimal assignments are ignored and the alternative assignments are returned instead.
+
+ Args:
+ deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ deployment_id (ID): The unique identifier of a deployment created under the project.
+ assignments_df (pd.DataFrame): A dataframe with all the variables involved in the optimization problem
+ constraints_df (pd.DataFrame): A dataframe of individual constraints, and variables in them
+ constraint_equations_df (pd.DataFrame): A dataframe which tells us about the operator / constant / penalty etc of a constraint
+ This gives us some data which is needed to make sense of the constraints_df.
+ solve_time_limit_seconds (float): Maximum time in seconds to spend solving the query.
+ optimality_gap_limit (float): Optimality gap we want to come within, after which we accept the solution as valid. (0 means we only want an optimal solution). it is abs(best_solution_found - best_bound) / abs(best_solution_found)
+
+ Returns:
+ Dict: The assignments for a given query.
+ """
+
+ def _serialize_df_with_dtypes(df):
+ # Get dtypes dictionary
+ dtypes_dict = df.dtypes.apply(lambda x: str(x)).to_dict()
+
+ # Handle special dtypes
+ for col, dtype in dtypes_dict.items():
+ if 'datetime' in dtype.lower():
+ dtypes_dict[col] = 'datetime'
+ elif 'category' in dtype.lower():
+ dtypes_dict[col] = 'category'
+
+ # Convert DataFrame to JSON
+ json_data = df.to_json(date_format='iso')
+
+ # Create final dictionary with both data and dtypes
+ serialized = {
+ 'data': json_data,
+ 'dtypes': dtypes_dict
+ }
+
+ return json.dumps(serialized)
+
+ serialized_assignments_df = _serialize_df_with_dtypes(assignments_df)
+ serialized_constraints_df = _serialize_df_with_dtypes(constraints_df)
+ serialized_constraint_equations_df = _serialize_df_with_dtypes(
+ constraint_equations_df)
+
+ query_data = {'assignments_df': serialized_assignments_df,
+ 'constraints_df': serialized_constraints_df,
+ 'constraint_equations_df': serialized_constraint_equations_df,
+ 'feature_mapping_dict': feature_mapping_dict}
+
+ result = self.get_assignments_online_with_new_serialized_inputs(
+ deployment_token=deployment_token, deployment_id=deployment_id, query_data=query_data, solve_time_limit_seconds=solve_time_limit_seconds, optimality_gap_limit=optimality_gap_limit)
+ return result
+
+ def get_optimization_input_dataframes_with_new_inputs(self, deployment_token: str, deployment_id: str, query_data: dict):
+ """
+ Get assignments for given query, with new inputs
+
+ Args:
+ deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ deployment_id (str): The unique identifier of a deployment created under the project.
+ query_data (dict): a dictionary with various key: value pairs corresponding to various updated FGs in the FG tree, which we want to update to compute new top level FGs for online solve. values are dataframes and keys are their names. Names should be same as the ones used during training.
+
+ Returns:
+ Dict: The output dataframes for a given query. These will be in serialized form. So, effectively we would have a dict of keys, and serialized dataframes.
+ """
+ def _serialize_df_with_dtypes(df):
+ # Get dtypes dictionary
+ dtypes_dict = df.dtypes.apply(lambda x: str(x)).to_dict()
+
+ # Handle special dtypes
+ for col, dtype in dtypes_dict.items():
+ if 'datetime' in dtype.lower():
+ dtypes_dict[col] = 'datetime'
+ elif 'category' in dtype.lower():
+ dtypes_dict[col] = 'category'
+
+ # Convert DataFrame to JSON
+ json_data = df.to_json(date_format='iso')
+
+ # Create final dictionary with both data and dtypes
+ serialized = {
+ 'data': json_data,
+ 'dtypes': dtypes_dict
+ }
+
+ return json.dumps(serialized)
+
+ query_data = {name: _serialize_df_with_dtypes(
+ df) for name, df in query_data.items()}
+
+ result = self.get_optimization_inputs_from_serialized(
+ deployment_token=deployment_token, deployment_id=deployment_id, query_data=query_data)
+ return result
+
def create_dataset_version_from_pandas(self, table_name_or_id: str, df: pd.DataFrame, clean_column_names: bool = False) -> Dataset:
"""
[Deprecated]
@@ -3277,7 +3514,8 @@ def create_python_function_from_function(self,
function: callable,
function_variable_mappings: list = None,
package_requirements: list = None,
- function_type: str = PythonFunctionType.FEATURE_GROUP.value):
+ function_type: str = PythonFunctionType.FEATURE_GROUP.value,
+ description: str = None):
"""
Creates a custom Python function
@@ -3287,6 +3525,7 @@ def create_python_function_from_function(self,
function_variable_mappings (List): List of Python function arguments.
package_requirements (List): List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
function_type (PythonFunctionType): Type of Python function to create. Default is FEATURE_GROUP, but can also be PLOTLY_FIG.
+ description (str): Description of the Python function.
"""
function_source = None
python_function_name = None
@@ -3301,7 +3540,8 @@ def create_python_function_from_function(self,
function_name=python_function_name,
function_variable_mappings=function_variable_mappings,
package_requirements=package_requirements,
- function_type=function_type)
+ function_type=function_type,
+ description=description)
def create_feature_group_from_python_function(self, function: callable, table_name: str, input_tables: list = None, python_function_name: str = None, python_function_bindings: list = None, cpu_size: str = None, memory: int = None, package_requirements: list = None, included_modules: list = None):
"""
@@ -3680,7 +3920,7 @@ def run_workflow_graph(self, workflow_graph: WorkflowGraph, sample_user_inputs:
def execute_workflow_node(self, node: WorkflowGraphNode, inputs: dict):
"""
- Execute the workflow node given input arguments.
+ Execute the workflow node given input arguments. This is to be used for testing purposes only.
Args:
node (WorkflowGraphNode): The workflow node to be executed.
@@ -3802,6 +4042,20 @@ def execute_feature_group_sql(self, sql, fix_query_on_error: bool = False, timeo
execute_query.wait_for_execution(timeout=timeout, delay=delay)
return execute_query.load_as_pandas()
+ def _get_agent_client_type(self):
+ """
+ Returns the client type for the current request context.
+
+ Returns:
+ AgentClientType: The client type for the current request context.
+ """
+ if self._is_async_app_caller():
+ return AgentClientType.MESSAGING_APP
+ elif self._is_proxy_app_caller():
+ return AgentClientType.CHAT_UI
+ else:
+ return AgentClientType.API
+
def get_agent_context_chat_history(self):
"""
Gets a history of chat messages from the current request context. Applicable within a AIAgent
@@ -3921,7 +4175,7 @@ def get_agent_context_blob_inputs(self):
def get_agent_context_user_info(self):
"""
- Gets information about the user interacting with the agent.
+ Gets information about the user interacting with the agent and user action if applicable.
Applicable within a AIAgent execute function.
Returns:
@@ -3930,20 +4184,28 @@ def get_agent_context_user_info(self):
user_info = get_object_from_context(
self, _request_context, 'user_info', dict)
if user_info:
+ user_info['client_type'] = self._get_agent_client_type()
+ user_action_label = get_object_from_context(
+ self, _request_context, 'action_label', str)
+ if user_action_label:
+ user_info['user_action_info'] = {
+ 'action_label': user_action_label}
return user_info
else:
raise ValueError(
'User information not available. Please use UI interface for this agent to work.')
- def get_agent_runtime_config(self, key: str):
+ def get_runtime_config(self, key: str):
"""
- Gets the deployment level runtime config for the agent
+ Retrieve the value of a specified configuration key from the deployment's runtime settings.
+ These settings can be configured in the deployment details page in the UI.
+ Currently supported for AI Agents, Custom Python Model and Prediction Operators.
Args:
- key(str): Key for which the config value is to be fetched
+ key (str): The configuration key whose value is to be fetched.
Returns:
- str: Config value for the input key
+ str: The value associated with the specified configuration key, or None if the key does not exist.
"""
runtime_config = get_object_from_context(
self, _request_context, 'deployment_runtime_config', dict) or {}
@@ -3983,7 +4245,7 @@ def execute_chatllm_computer_streaming(self, computer_id: str, prompt: str, is_t
text (str): The text responses from the computer.
"""
request_id = self._get_agent_app_request_id()
- caller = self._get_agent_async_app_caller()
+ caller = self._get_agent_caller()
proxy_caller = self._is_proxy_app_caller()
if not request_id or not caller:
@@ -4049,7 +4311,7 @@ def streaming_evaluate_prompt(self, prompt: str = None, system_message: str = No
Returns:
text (str): The response from the model.
"""
- caller = self._get_agent_async_app_caller()
+ caller = self._get_agent_caller()
request_id = self._get_agent_app_request_id()
if prompt and not isinstance(prompt, str):
raise ValueError('prompt must be a string')
@@ -4059,11 +4321,17 @@ def streaming_evaluate_prompt(self, prompt: str = None, system_message: str = No
if caller and request_id:
is_agent = get_object_from_context(
self, _request_context, 'is_agent', bool)
- is_api = get_object_from_context(
+ is_agent_api = get_object_from_context(
self, _request_context, 'is_agent_api', bool)
-
- result = self._stream_llm_call(prompt=prompt, system_message=system_message, llm_name=llm_name, max_tokens=max_tokens, temperature=temperature,
- messages=messages, response_type=response_type, json_response_schema=json_response_schema, section_key=section_key, is_agent=is_agent, is_api=is_api)
+ deployment_id = get_object_from_context(
+ self, _request_context, 'deployment_id', str)
+ user_id = None
+ user_info = get_object_from_context(
+ self, _request_context, 'user_info', dict)
+ if user_info:
+ user_id = user_info.get('user_id')
+ result = self._stream_llm_call(prompt=prompt, system_message=system_message, llm_name=llm_name, max_tokens=max_tokens, temperature=temperature, messages=messages, response_type=response_type,
+ json_response_schema=json_response_schema, section_key=section_key, is_agent=is_agent, is_agent_api=is_agent_api, deployment_id=deployment_id, user_id=user_id)
else:
result = self.evaluate_prompt(prompt, system_message=system_message, llm_name=llm_name, max_tokens=max_tokens,
temperature=temperature, messages=messages, response_type=response_type, json_response_schema=json_response_schema).content
@@ -4113,9 +4381,9 @@ def _get_agent_app_request_id(self):
"""
return get_object_from_context(self, _request_context, 'request_id', str)
- def _get_agent_async_app_caller(self):
+ def _get_agent_caller(self):
"""
- Gets the caller for the current request context of async app. Applicable within a AIAgent execute function.
+ Gets the caller for the current request context. Applicable within a AIAgent execute function.
Returns:
str: The caller for the current request being processed by the Agent.
@@ -4124,13 +4392,22 @@ def _get_agent_async_app_caller(self):
def _is_proxy_app_caller(self):
"""
- Gets the caller for the current request context of async app. Applicable within a AIAgent execute function.
+ Checks if the caller is cluster-proxy app.
Returns:
- bool: True if the caller is proxy app.
+ bool: True if the caller is cluster-proxy app.
"""
return get_object_from_context(self, _request_context, 'proxy_app_caller', str) is not None
+ def _is_async_app_caller(self):
+ """
+ Checks if the caller is async app.
+
+ Returns:
+ bool: True if the caller is async app.
+ """
+ return get_object_from_context(self, _request_context, 'async_app_caller', str) is not None
+
def stream_message(self, message: str, is_transient: bool = False) -> None:
"""
Streams a message to the current request context. Applicable within a AIAgent execute function.
@@ -4141,7 +4418,7 @@ def stream_message(self, message: str, is_transient: bool = False) -> None:
is_transient (bool): If True, the message will be marked as transient and will not be persisted on reload in external chatllm UI. Transient messages are useful for streaming interim updates or results.
"""
request_id = self._get_agent_app_request_id()
- caller = self._get_agent_async_app_caller()
+ caller = self._get_agent_caller()
proxy_caller = self._is_proxy_app_caller()
if request_id and caller:
extra_args = {'stream_type': StreamType.MESSAGE.value,
@@ -4163,7 +4440,7 @@ def stream_section_output(self, section_key: str, value: str) -> None:
value (Any): The output contents.
"""
request_id = self._get_agent_app_request_id()
- caller = self._get_agent_async_app_caller()
+ caller = self._get_agent_caller()
proxy_caller = self._is_proxy_app_caller()
if _is_json_serializable(value):
message_args = {'id': section_key,
@@ -4189,7 +4466,7 @@ def stream_response_section(self, response_section: ResponseSection):
response_section (ResponseSection): The response section to be streamed.
"""
request_id = self._get_agent_app_request_id()
- caller = self._get_agent_async_app_caller()
+ caller = self._get_agent_caller()
proxy_caller = self._is_proxy_app_caller()
if request_id and caller:
segment = response_section.to_dict()
@@ -4203,7 +4480,7 @@ def stream_response_section(self, response_section: ResponseSection):
def _stream_llm_call(self, section_key=None, **kwargs):
request_id = self._get_agent_app_request_id()
- caller = self._get_agent_async_app_caller()
+ caller = self._get_agent_caller()
proxy_caller = self._is_proxy_app_caller()
if not request_id or not caller:
logging.info('Please use evaluate_prompt for local testing.')
@@ -4420,7 +4697,7 @@ def create_model_version_from_local_files(self, model_id: str, optional_artifact
ModelUpload: Collection of upload IDs to upload the model artifacts."""
return self._call_api('createModelVersionFromLocalFiles', 'POST', query_params={}, body={'modelId': model_id, 'optionalArtifacts': optional_artifacts}, parse_type=ModelUpload)
- def get_streaming_chat_response(self, deployment_token: str, deployment_id: str, messages: list, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, ignore_documents: bool = False, include_search_results: bool = False):
+ def get_streaming_chat_response(self, deployment_token: str, deployment_id: str, messages: list, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, ignore_documents: bool = False, include_search_results: bool = False, user_info: dict = None):
"""Return an asynchronous generator which continues the conversation based on the input messages and search results.
Args:
@@ -4435,7 +4712,8 @@ def get_streaming_chat_response(self, deployment_token: str, deployment_id: str,
search_score_cutoff (float): Cutoff for the document retriever score. Matching search results below this score will be ignored.
chat_config (dict): A dictionary specifying the query chat config override.
ignore_documents (bool): If True, will ignore any documents and search results, and only use the messages to generate a response.
- include_search_results (bool): If True, will also return search results, if relevant. """
+ include_search_results (bool): If True, will also return search results, if relevant.
+ user_info (dict): The information of the user to act on behalf of for user restricted data sources. """
headers = {'APIKEY': self.api_key}
body = {
'deploymentToken': deployment_token,
@@ -4449,7 +4727,8 @@ def get_streaming_chat_response(self, deployment_token: str, deployment_id: str,
'searchScoreCutoff': search_score_cutoff,
'chatConfig': chat_config,
'ignoreDocuments': ignore_documents,
- 'includeSearchResults': include_search_results
+ 'includeSearchResults': include_search_results,
+ 'userInfo': user_info
}
endpoint = self._get_proxy_endpoint(deployment_id, deployment_token)
if endpoint is None:
@@ -4457,7 +4736,7 @@ def get_streaming_chat_response(self, deployment_token: str, deployment_id: str,
'API not supported, Please contact Abacus.ai support')
return sse_asynchronous_generator(f'{endpoint}/api/getStreamingChatResponse', headers, body)
- def get_streaming_conversation_response(self, deployment_token: str, deployment_id: str, message: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, ignore_documents: bool = False, include_search_results: bool = False):
+ def get_streaming_conversation_response(self, deployment_token: str, deployment_id: str, message: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, ignore_documents: bool = False, include_search_results: bool = False, user_info: dict = None):
"""Return an asynchronous generator which continues the conversation based on the input messages and search results.
Args:
@@ -4474,7 +4753,8 @@ def get_streaming_conversation_response(self, deployment_token: str, deployment_
search_score_cutoff (float): Cutoff for the document retriever score. Matching search results below this score will be ignored.
chat_config (dict): A dictionary specifying the query chat config override.
ignore_documents (bool): If True, will ignore any documents and search results, and only use the messages to generate a response.
- include_search_results (bool): If True, will also return search results, if relevant. """
+ include_search_results (bool): If True, will also return search results, if relevant.
+ user_info (dict): The information of the user to act on behalf of for user restricted data sources. """
headers = {'APIKEY': self.api_key}
body = {
'deploymentToken': deployment_token,
@@ -4490,7 +4770,8 @@ def get_streaming_conversation_response(self, deployment_token: str, deployment_
'searchScoreCutoff': search_score_cutoff,
'chatConfig': chat_config,
'ignoreDocuments': ignore_documents,
- 'includeSearchResults': include_search_results
+ 'includeSearchResults': include_search_results,
+ 'userInfo': user_info
}
endpoint = self._get_proxy_endpoint(deployment_id, deployment_token)
if endpoint is None:
@@ -4630,7 +4911,8 @@ def set_agent_response_document_sources(self, response_document_sources: List[Do
'source': response_document_source.document_source,
'image_ids': response_document_source.image_ids,
'pages': response_document_source.pages,
- 'bounding_boxes': response_document_source.bounding_boxes
+ 'bounding_boxes': response_document_source.bounding_boxes,
+ 'metadata': response_document_source.metadata
} for response_document_source in response_document_sources]
def get_initialized_data(self):
@@ -4644,6 +4926,23 @@ def get_initialized_data(self):
initialized_object = _request_context.model_object
return initialized_object
+ def execute_agent_with_binary_data(self, deployment_token: str, deployment_id: str, arguments: list = None, keyword_arguments: dict = None, deployment_conversation_id: str = None, external_session_id: str = None, blobs: None = None) -> AgentDataExecutionResult:
+ """Executes a deployed AI agent function with binary data as inputs.
+
+ Args:
+ deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
+ deployment_id (str): A unique string identifier for the deployment created under the project.
+ arguments (list): Positional arguments to the agent execute function.
+ keyword_arguments (dict): A dictionary where each 'key' represents the parameter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
+ deployment_conversation_id (str): A unique string identifier for the deployment conversation used for the conversation.
+ external_session_id (str): A unique string identifier for the session used for the conversation. If both deployment_conversation_id and external_session_id are not provided, a new session will be created.
+ blobs (None): A dictionary of binary data to use as inputs to the agent execute function.
+
+ Returns:
+ AgentDataExecutionResult: The result of the agent execution"""
+
+ return self._proxy_request('executeAgentWithBinaryData', 'POST', query_params={}, data={'deploymentToken': deployment_token, 'deploymentId': deployment_id, 'arguments': json.dumps(arguments) if arguments else None, 'keywordArguments': json.dumps(keyword_arguments) if keyword_arguments else None, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id}, files=blobs, parse_type=AgentDataExecutionResult, is_sync=True)
+
def add_user_to_organization(self, email: str):
"""Invite a user to your organization. This method will send the specified email address an invitation link to join your organization.
@@ -4734,6 +5033,14 @@ def send_email(self, email: str, subject: str, body: str, is_html: bool = False,
attachments (None): A dictionary where the key is the filename (including the file extension), and the value is either a file-like object (e.g., an open file in binary mode) or raw file data (e.g., bytes)."""
return self._call_api('sendEmail', 'POST', query_params={}, data={'email': json.dumps(email) if (email is not None and not isinstance(email, str)) else email, 'subject': json.dumps(subject) if (subject is not None and not isinstance(subject, str)) else subject, 'body': json.dumps(body) if (body is not None and not isinstance(body, str)) else body, 'isHtml': json.dumps(is_html) if (is_html is not None and not isinstance(is_html, str)) else is_html}, files=attachments)
+ def set_user_role_to_platform(self, workspace: str, email: str):
+ """Set the user's role to have the Abacus.AI platform access.
+
+ Args:
+ workspace (str): The workspace for the user that's getting platform access.
+ email (str): The email for the user that's getting platform access."""
+ return self._call_api('setUserRoleToPlatform', 'PATCH', query_params={}, body={'workspace': workspace, 'email': email})
+
def create_deployment_webhook(self, deployment_id: str, endpoint: str, webhook_event_type: str, payload_template: dict = None) -> Webhook:
"""Create a webhook attached to a given deployment ID.
@@ -4769,12 +5076,25 @@ def create_project(self, name: str, use_case: str) -> Project:
Args:
name (str): The project's name.
- use_case (str): The use case that the project solves. Refer to our [guide on use cases](https://api.abacus.ai/app/help/useCases) for further details of each use case. The following enums are currently available for you to choose from: LANGUAGE_DETECTION, NLP_SENTIMENT, NLP_SEARCH, NLP_CHAT, CHAT_LLM, NLP_SENTENCE_BOUNDARY_DETECTION, NLP_CLASSIFICATION, NLP_SUMMARIZATION, NLP_DOCUMENT_VISUALIZATION, AI_AGENT, EMBEDDINGS_ONLY, MODEL_WITH_EMBEDDINGS, TORCH_MODEL, TORCH_MODEL_WITH_EMBEDDINGS, PYTHON_MODEL, NOTEBOOK_PYTHON_MODEL, DOCKER_MODEL, DOCKER_MODEL_WITH_EMBEDDINGS, CUSTOMER_CHURN, ENERGY, EVENT_ANOMALY_DETECTION, FINANCIAL_METRICS, CUMULATIVE_FORECASTING, FRAUD_ACCOUNT, FRAUD_TRANSACTIONS, CLOUD_SPEND, TIMESERIES_ANOMALY, OPERATIONS_MAINTENANCE, PERS_PROMOTIONS, PREDICTING, FEATURE_STORE, RETAIL, SALES_FORECASTING, SALES_SCORING, FEED_RECOMMEND, USER_RANKINGS, NAMED_ENTITY_RECOGNITION, USER_RECOMMENDATIONS, USER_RELATED, VISION, VISION_REGRESSION, VISION_OBJECT_DETECTION, FEATURE_DRIFT, SCHEDULING, GENERIC_FORECASTING, PRETRAINED_IMAGE_TEXT_DESCRIPTION, PRETRAINED_SPEECH_RECOGNITION, PRETRAINED_STYLE_TRANSFER, PRETRAINED_TEXT_TO_IMAGE_GENERATION, PRETRAINED_OCR_DOCUMENT_TO_TEXT, THEME_ANALYSIS, CLUSTERING, CLUSTERING_TIMESERIES, FINETUNED_LLM, PRETRAINED_INSTRUCT_PIX2PIX, PRETRAINED_TEXT_CLASSIFICATION.
+ use_case (str): The use case that the project solves. Refer to our [guide on use cases](https://api.abacus.ai/app/help/developer-platform/useCases) for further details of each use case. The following enums are currently available for you to choose from: LANGUAGE_DETECTION, NLP_SENTIMENT, NLP_SEARCH, NLP_CHAT, CHAT_LLM, NLP_SENTENCE_BOUNDARY_DETECTION, NLP_CLASSIFICATION, NLP_SUMMARIZATION, NLP_DOCUMENT_VISUALIZATION, AI_AGENT, EMBEDDINGS_ONLY, MODEL_WITH_EMBEDDINGS, TORCH_MODEL, TORCH_MODEL_WITH_EMBEDDINGS, PYTHON_MODEL, NOTEBOOK_PYTHON_MODEL, DOCKER_MODEL, DOCKER_MODEL_WITH_EMBEDDINGS, CUSTOMER_CHURN, ENERGY, EVENT_ANOMALY_DETECTION, FINANCIAL_METRICS, CUMULATIVE_FORECASTING, FRAUD_ACCOUNT, FRAUD_TRANSACTIONS, CLOUD_SPEND, TIMESERIES_ANOMALY, OPERATIONS_MAINTENANCE, PERS_PROMOTIONS, PREDICTING, FEATURE_STORE, RETAIL, SALES_FORECASTING, SALES_SCORING, FEED_RECOMMEND, USER_RANKINGS, NAMED_ENTITY_RECOGNITION, USER_RECOMMENDATIONS, USER_RELATED, VISION, VISION_REGRESSION, VISION_OBJECT_DETECTION, FEATURE_DRIFT, SCHEDULING, GENERIC_FORECASTING, PRETRAINED_IMAGE_TEXT_DESCRIPTION, PRETRAINED_SPEECH_RECOGNITION, PRETRAINED_STYLE_TRANSFER, PRETRAINED_TEXT_TO_IMAGE_GENERATION, PRETRAINED_OCR_DOCUMENT_TO_TEXT, THEME_ANALYSIS, CLUSTERING, CLUSTERING_TIMESERIES, FINETUNED_LLM, PRETRAINED_INSTRUCT_PIX2PIX, PRETRAINED_TEXT_CLASSIFICATION.
Returns:
Project: This object represents the newly created project."""
return self._call_api('createProject', 'POST', query_params={}, body={'name': name, 'useCase': use_case}, parse_type=Project)
+ def list_projects_dashboard(self, updated_filter: int = None, limit: int = 50, since_project_id: str = None, search: str = None, starred: bool = None, tag: str = None, tags: list = None) -> dict:
+ """Retrieves a list of projects for dashboard display with filtering and pagination support.
+
+ Args:
+ updated_filter (int): Unix timestamp to filter projects updated after this time.
+ limit (int): The maximum number of projects to return (default: 50).
+ since_project_id (str): The ID of the project after which the list starts for pagination.
+ search (str): Search term to filter projects by name, creator email, or use case.
+ starred (bool): Filter by starred status (True for starred, False for non-starred).
+ tag (str): Filter by a single tag (deprecated, use tags instead).
+ tags (list): List of tags to filter projects by."""
+ return self._call_api('listProjectsDashboard', 'POST', query_params={}, body={'updatedFilter': updated_filter, 'limit': limit, 'sinceProjectId': since_project_id, 'search': search, 'starred': starred, 'tag': tag, 'tags': tags})
+
def rename_project(self, project_id: str, name: str):
"""This method renames a project after it is created.
@@ -4814,6 +5134,17 @@ def remove_project_tags(self, project_id: str, tags: list):
tags (list): The tags to remove from the project."""
return self._call_api('removeProjectTags', 'DELETE', query_params={'projectId': project_id, 'tags': tags})
+ def get_raw_data_from_realtime_dataset(self, dataset_id: str, check_permissions: bool = False, start_time: str = None, end_time: str = None, column_filter: dict = None) -> Dict:
+ """Returns raw data from a realtime dataset. Only Microsoft Teams datasets are supported currently due to data size constraints in realtime datasets.
+
+ Args:
+ dataset_id (str): The unique ID associated with the dataset.
+ check_permissions (bool): If True, checks user permissions using session email.
+ start_time (str): Start time filter (inclusive) for created_date_time_t in ISO 8601 format (e.g. 2025-05-13T08:25:11Z or 2025-05-13T08:25:11+00:00).
+ end_time (str): End time filter (inclusive) for created_date_time_t in ISO 8601 format (e.g. 2025-05-13T08:25:11Z or 2025-05-13T08:25:11+00:00).
+ column_filter (dict): Dictionary mapping column names to filter values. Only rows matching all column filters will be returned."""
+ return self._call_api('getRawDataFromRealtimeDataset', 'POST', query_params={'datasetId': dataset_id}, body={'checkPermissions': check_permissions, 'startTime': start_time, 'endTime': end_time, 'columnFilter': column_filter})
+
def add_feature_group_to_project(self, feature_group_id: str, project_id: str, feature_group_type: str = 'CUSTOM_TABLE'):
"""Adds a feature group to a project.
@@ -4863,6 +5194,15 @@ def set_feature_mapping(self, project_id: str, feature_group_id: str, feature_na
list[Feature]: A list of objects that describes the resulting feature group's schema after the feature's featureMapping is set."""
return self._call_api('setFeatureMapping', 'POST', query_params={}, body={'projectId': project_id, 'featureGroupId': feature_group_id, 'featureName': feature_name, 'featureMapping': feature_mapping, 'nestedColumnName': nested_column_name}, parse_type=Feature)
+ def add_project_scope_for_user(self, project_id: str, email: str, scope: list):
+ """Add a user to a project.
+
+ Args:
+ project_id (str): The project's id.
+ email (str): The user's email.
+ scope (list): The list of project scopes."""
+ return self._call_api('addProjectScopeForUser', 'POST', query_params={}, body={'projectId': project_id, 'email': email, 'scope': scope})
+
def add_annotation(self, annotation: dict, feature_group_id: str, feature_name: str, doc_id: str = None, feature_group_row_identifier: str = None, annotation_source: str = 'ui', status: str = None, comments: dict = None, project_id: str = None, save_metadata: bool = False, pages: list = None) -> AnnotationEntry:
"""Add an annotation entry to the database.
@@ -5018,9 +5358,7 @@ def create_merge_feature_group(self, source_feature_group_id: str, table_name: s
description (str): Human-readable description of this feature group.
Returns:
- FeatureGroup: The created feature group.
-Description:
-Creates a new feature group defined as the union of other feature group versions."""
+ FeatureGroup: The created feature group."""
return self._call_api('createMergeFeatureGroup', 'POST', query_params={}, body={'sourceFeatureGroupId': source_feature_group_id, 'tableName': table_name, 'mergeConfig': merge_config, 'description': description}, parse_type=FeatureGroup)
def create_operator_feature_group(self, source_feature_group_id: str, table_name: str, operator_config: Union[dict, OperatorConfig], description: str = None) -> FeatureGroup:
@@ -5572,7 +5910,7 @@ def export_feature_group_version_to_database_connector(self, feature_group_versi
feature_group_version (str): Unique string identifier for the Feature Group instance to export.
database_connector_id (str): Unique string identifier for the Database Connector to export to.
object_name (str): Name of the database object to write to.
- write_mode (str): Enum string indicating whether to use INSERT or UPSERT.
+ write_mode (str): Enum string indicating whether to use INSERT or UPSERT or REPLACE.
database_feature_mapping (dict): Key/value pair JSON object of "database connector column" -> "feature name" pairs.
id_column (str): Required if write_mode is UPSERT. Indicates which database column should be used as the lookup key.
additional_id_columns (list): For database connectors which support it, additional ID columns to use as a complex key for upserting.
@@ -5829,7 +6167,7 @@ def create_dataset_from_database_connector(self, table_name: str, database_conne
Dataset: The created dataset."""
return self._call_api('createDatasetFromDatabaseConnector', 'POST', query_params={}, body={'tableName': table_name, 'databaseConnectorId': database_connector_id, 'objectName': object_name, 'columns': columns, 'queryArguments': query_arguments, 'refreshSchedule': refresh_schedule, 'sqlQuery': sql_query, 'incremental': incremental, 'attachmentParsingConfig': attachment_parsing_config, 'incrementalDatabaseConnectorConfig': incremental_database_connector_config, 'documentProcessingConfig': document_processing_config, 'versionLimit': version_limit}, parse_type=Dataset)
- def create_dataset_from_application_connector(self, table_name: str, application_connector_id: str, dataset_config: Union[dict, ApplicationConnectorDatasetConfig] = None, refresh_schedule: str = None, version_limit: int = 30) -> Dataset:
+ def create_dataset_from_application_connector(self, table_name: str, application_connector_id: str, dataset_config: Union[dict, ApplicationConnectorDatasetConfig] = None, refresh_schedule: str = None, version_limit: int = 30, incremental: bool = False) -> Dataset:
"""Creates a dataset from an Application Connector.
Args:
@@ -5838,10 +6176,11 @@ def create_dataset_from_application_connector(self, table_name: str, application
dataset_config (ApplicationConnectorDatasetConfig): Dataset config for the application connector.
refresh_schedule (str): Cron time string format that describes a schedule to retrieve the latest version of the imported dataset. The time is specified in UTC.
version_limit (int): The number of recent versions to preserve for the dataset (minimum 30).
+ incremental (bool): Signifies if the dataset is an incremental dataset.
Returns:
Dataset: The created dataset."""
- return self._call_api('createDatasetFromApplicationConnector', 'POST', query_params={}, body={'tableName': table_name, 'applicationConnectorId': application_connector_id, 'datasetConfig': dataset_config, 'refreshSchedule': refresh_schedule, 'versionLimit': version_limit}, parse_type=Dataset)
+ return self._call_api('createDatasetFromApplicationConnector', 'POST', query_params={}, body={'tableName': table_name, 'applicationConnectorId': application_connector_id, 'datasetConfig': dataset_config, 'refreshSchedule': refresh_schedule, 'versionLimit': version_limit, 'incremental': incremental}, parse_type=Dataset)
def create_dataset_version_from_database_connector(self, dataset_id: str, object_name: str = None, columns: str = None, query_arguments: str = None, sql_query: str = None) -> DatasetVersion:
"""Creates a new version of the specified dataset.
@@ -7307,7 +7646,7 @@ def get_related_items(self, deployment_token: str, deployment_id: str, query_dat
deployment_id, deployment_token) if deployment_token else None
return self._call_api('getRelatedItems', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data, 'numItems': num_items, 'page': page, 'scalingFactors': scaling_factors, 'restrictItems': restrict_items, 'excludeItems': exclude_items}, server_override=prediction_url)
- def get_chat_response(self, deployment_token: str, deployment_id: str, messages: list, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None) -> Dict:
+ def get_chat_response(self, deployment_token: str, deployment_id: str, messages: list, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, user_info: dict = None) -> Dict:
"""Return a chat response which continues the conversation based on the input messages and search results.
Args:
@@ -7323,7 +7662,7 @@ def get_chat_response(self, deployment_token: str, deployment_id: str, messages:
chat_config (dict): A dictionary specifying the query chat config override."""
prediction_url = self._get_prediction_endpoint(
deployment_id, deployment_token) if deployment_token else None
- return self._call_api('getChatResponse', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'messages': messages, 'llmName': llm_name, 'numCompletionTokens': num_completion_tokens, 'systemMessage': system_message, 'temperature': temperature, 'filterKeyValues': filter_key_values, 'searchScoreCutoff': search_score_cutoff, 'chatConfig': chat_config}, server_override=prediction_url)
+ return self._call_api('getChatResponse', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'messages': messages, 'llmName': llm_name, 'numCompletionTokens': num_completion_tokens, 'systemMessage': system_message, 'temperature': temperature, 'filterKeyValues': filter_key_values, 'searchScoreCutoff': search_score_cutoff, 'chatConfig': chat_config, 'userInfo': user_info}, server_override=prediction_url)
def get_chat_response_with_binary_data(self, deployment_token: str, deployment_id: str, messages: list, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, attachments: None = None) -> Dict:
"""Return a chat response which continues the conversation based on the input messages and search results.
@@ -7344,7 +7683,7 @@ def get_chat_response_with_binary_data(self, deployment_token: str, deployment_i
deployment_id, deployment_token) if deployment_token else None
return self._call_api('getChatResponseWithBinaryData', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, data={'messages': json.dumps(messages) if (messages is not None and not isinstance(messages, str)) else messages, 'llmName': json.dumps(llm_name) if (llm_name is not None and not isinstance(llm_name, str)) else llm_name, 'numCompletionTokens': json.dumps(num_completion_tokens) if (num_completion_tokens is not None and not isinstance(num_completion_tokens, str)) else num_completion_tokens, 'systemMessage': json.dumps(system_message) if (system_message is not None and not isinstance(system_message, str)) else system_message, 'temperature': json.dumps(temperature) if (temperature is not None and not isinstance(temperature, str)) else temperature, 'filterKeyValues': json.dumps(filter_key_values) if (filter_key_values is not None and not isinstance(filter_key_values, str)) else filter_key_values, 'searchScoreCutoff': json.dumps(search_score_cutoff) if (search_score_cutoff is not None and not isinstance(search_score_cutoff, str)) else search_score_cutoff, 'chatConfig': json.dumps(chat_config) if (chat_config is not None and not isinstance(chat_config, str)) else chat_config}, files=attachments, server_override=prediction_url)
- def get_conversation_response(self, deployment_id: str, message: str, deployment_token: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, doc_infos: list = None) -> Dict:
+ def get_conversation_response(self, deployment_id: str, message: str, deployment_token: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, doc_infos: list = None, user_info: dict = None, execute_usercode_tool: bool = False) -> Dict:
"""Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).
Args:
@@ -7360,10 +7699,11 @@ def get_conversation_response(self, deployment_id: str, message: str, deployment
filter_key_values (dict): A dictionary mapping column names to a list of values to restrict the retrived search results.
search_score_cutoff (float): Cutoff for the document retriever score. Matching search results below this score will be ignored.
chat_config (dict): A dictionary specifiying the query chat config override.
- doc_infos (list): An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore."""
+ doc_infos (list): An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
+ execute_usercode_tool (bool): If True, will return the tool output in the response."""
prediction_url = self._get_prediction_endpoint(
deployment_id, deployment_token) if deployment_token else None
- return self._call_api('getConversationResponse', 'POST', query_params={'deploymentId': deployment_id, 'deploymentToken': deployment_token}, body={'message': message, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'llmName': llm_name, 'numCompletionTokens': num_completion_tokens, 'systemMessage': system_message, 'temperature': temperature, 'filterKeyValues': filter_key_values, 'searchScoreCutoff': search_score_cutoff, 'chatConfig': chat_config, 'docInfos': doc_infos}, server_override=prediction_url)
+ return self._call_api('getConversationResponse', 'POST', query_params={'deploymentId': deployment_id, 'deploymentToken': deployment_token}, body={'message': message, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'llmName': llm_name, 'numCompletionTokens': num_completion_tokens, 'systemMessage': system_message, 'temperature': temperature, 'filterKeyValues': filter_key_values, 'searchScoreCutoff': search_score_cutoff, 'chatConfig': chat_config, 'docInfos': doc_infos, 'userInfo': user_info, 'executeUsercodeTool': execute_usercode_tool}, server_override=prediction_url)
def get_conversation_response_with_binary_data(self, deployment_id: str, deployment_token: str, message: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, attachments: None = None) -> Dict:
"""Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).
@@ -7386,6 +7726,14 @@ def get_conversation_response_with_binary_data(self, deployment_id: str, deploym
deployment_id, deployment_token) if deployment_token else None
return self._call_api('getConversationResponseWithBinaryData', 'POST', query_params={'deploymentId': deployment_id, 'deploymentToken': deployment_token}, data={'message': json.dumps(message) if (message is not None and not isinstance(message, str)) else message, 'deploymentConversationId': json.dumps(deployment_conversation_id) if (deployment_conversation_id is not None and not isinstance(deployment_conversation_id, str)) else deployment_conversation_id, 'externalSessionId': json.dumps(external_session_id) if (external_session_id is not None and not isinstance(external_session_id, str)) else external_session_id, 'llmName': json.dumps(llm_name) if (llm_name is not None and not isinstance(llm_name, str)) else llm_name, 'numCompletionTokens': json.dumps(num_completion_tokens) if (num_completion_tokens is not None and not isinstance(num_completion_tokens, str)) else num_completion_tokens, 'systemMessage': json.dumps(system_message) if (system_message is not None and not isinstance(system_message, str)) else system_message, 'temperature': json.dumps(temperature) if (temperature is not None and not isinstance(temperature, str)) else temperature, 'filterKeyValues': json.dumps(filter_key_values) if (filter_key_values is not None and not isinstance(filter_key_values, str)) else filter_key_values, 'searchScoreCutoff': json.dumps(search_score_cutoff) if (search_score_cutoff is not None and not isinstance(search_score_cutoff, str)) else search_score_cutoff, 'chatConfig': json.dumps(chat_config) if (chat_config is not None and not isinstance(chat_config, str)) else chat_config}, files=attachments, server_override=prediction_url)
+ def get_deep_agent_response(self, message: str, deployment_conversation_id: str = None) -> Dict:
+ """Return a DeepAgent response with generated files if any, based on the input message.
+
+ Args:
+ message (str): The user's message/task for DeepAgent to complete
+ deployment_conversation_id (str): The unique identifier of a deployment conversation to continue. If not specified, a new one will be created."""
+ return self._proxy_request('getDeepAgentResponse', 'POST', query_params={}, body={'message': message, 'deploymentConversationId': deployment_conversation_id}, is_sync=True, timeout=3600)
+
def get_search_results(self, deployment_token: str, deployment_id: str, query_data: dict, num: int = 15) -> Dict:
"""Return the most relevant search results to the search query from the uploaded documents.
@@ -7481,6 +7829,30 @@ def get_alternative_assignments(self, deployment_token: str, deployment_id: str,
deployment_id, deployment_token) if deployment_token else None
return self._call_api('getAlternativeAssignments', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data, 'addConstraints': add_constraints, 'solveTimeLimitSeconds': solve_time_limit_seconds, 'bestAlternateOnly': best_alternate_only}, server_override=prediction_url)
+ def get_optimization_inputs_from_serialized(self, deployment_token: str, deployment_id: str, query_data: dict = None) -> Dict:
+ """Get assignments for given query, with new inputs
+
+ Args:
+ deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ deployment_id (str): The unique identifier of a deployment created under the project.
+ query_data (dict): a dictionary with various key: value pairs corresponding to various updated FGs in the FG tree, which we want to update to compute new top level FGs for online solve. (query data will be dict of names: serialized dataframes)"""
+ prediction_url = self._get_prediction_endpoint(
+ deployment_id, deployment_token) if deployment_token else None
+ return self._call_api('getOptimizationInputsFromSerialized', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data}, server_override=prediction_url)
+
+ def get_assignments_online_with_new_serialized_inputs(self, deployment_token: str, deployment_id: str, query_data: dict = None, solve_time_limit_seconds: float = None, optimality_gap_limit: float = None) -> Dict:
+ """Get assignments for given query, with new inputs
+
+ Args:
+ deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ deployment_id (str): The unique identifier of a deployment created under the project.
+ query_data (dict): a dictionary with assignment, constraint and constraint_equations_df (under these specific keys)
+ solve_time_limit_seconds (float): Maximum time in seconds to spend solving the query.
+ optimality_gap_limit (float): Optimality gap we want to come within, after which we accept the solution as valid. (0 means we only want an optimal solution). it is abs(best_solution_found - best_bound) / abs(best_solution_found)"""
+ prediction_url = self._get_prediction_endpoint(
+ deployment_id, deployment_token) if deployment_token else None
+ return self._call_api('getAssignmentsOnlineWithNewSerializedInputs', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data, 'solveTimeLimitSeconds': solve_time_limit_seconds, 'optimalityGapLimit': optimality_gap_limit}, server_override=prediction_url)
+
def check_constraints(self, deployment_token: str, deployment_id: str, query_data: dict) -> Dict:
"""Check for any constraints violated by the overrides.
@@ -7609,17 +7981,6 @@ def transfer_style(self, deployment_token: str, deployment_id: str, source_image
deployment_id, deployment_token) if deployment_token else None
return self._call_api('transferStyle', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, data={}, files={'sourceImage': source_image, 'styleImage': style_image}, streamable_response=True, server_override=prediction_url)
- def generate_image(self, deployment_token: str, deployment_id: str, query_data: dict) -> io.BytesIO:
- """Generate an image from text prompt.
-
- Args:
- deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
- deployment_id (str): A unique identifier to a deployment created under the project.
- query_data (dict): Specifies the text prompt. For example, {'prompt': 'a cat'}"""
- prediction_url = self._get_prediction_endpoint(
- deployment_id, deployment_token) if deployment_token else None
- return self._call_api('generateImage', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data}, streamable_response=True, server_override=prediction_url)
-
def execute_agent(self, deployment_token: str, deployment_id: str, arguments: list = None, keyword_arguments: dict = None) -> Dict:
"""Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
@@ -7628,9 +7989,7 @@ def execute_agent(self, deployment_token: str, deployment_id: str, arguments: li
deployment_id (str): A unique string identifier for the deployment created under the project.
arguments (list): Positional arguments to the agent execute function.
keyword_arguments (dict): A dictionary where each 'key' represents the paramter name and its corresponding 'value' represents the value of that parameter for the agent execute function."""
- prediction_url = self._get_prediction_endpoint(
- deployment_id, deployment_token) if deployment_token else None
- return self._call_api('executeAgent', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'arguments': arguments, 'keywordArguments': keyword_arguments}, server_override=prediction_url, timeout=1500)
+ return self._proxy_request('executeAgent', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'arguments': arguments, 'keywordArguments': keyword_arguments}, is_sync=True)
def get_matrix_agent_schema(self, deployment_token: str, deployment_id: str, query: str, doc_infos: list = None, deployment_conversation_id: str = None, external_session_id: str = None) -> Dict:
"""Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
@@ -7659,9 +8018,7 @@ def execute_conversation_agent(self, deployment_token: str, deployment_id: str,
regenerate (bool): If True, will regenerate the response from the last query.
doc_infos (list): An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
agent_workflow_node_id (str): An optional agent workflow node id to trigger agent execution from an intermediate node."""
- prediction_url = self._get_prediction_endpoint(
- deployment_id, deployment_token) if deployment_token else None
- return self._call_api('executeConversationAgent', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'arguments': arguments, 'keywordArguments': keyword_arguments, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'regenerate': regenerate, 'docInfos': doc_infos, 'agentWorkflowNodeId': agent_workflow_node_id}, server_override=prediction_url)
+ return self._proxy_request('executeSyncConversationAgent', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'arguments': arguments, 'keywordArguments': keyword_arguments, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'regenerate': regenerate, 'docInfos': doc_infos, 'agentWorkflowNodeId': agent_workflow_node_id}, is_sync=True)
def lookup_matches(self, deployment_token: str, deployment_id: str, data: str = None, filters: dict = None, num: int = None, result_columns: list = None, max_words: int = None, num_retrieval_margin_words: int = None, max_words_per_chunk: int = None, score_multiplier_column: str = None, min_score: float = None, required_phrases: list = None, filter_clause: str = None, crowding_limits: dict = None, include_text_search: bool = False) -> List[DocumentRetrieverLookupResult]:
"""Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
@@ -8010,7 +8367,7 @@ def get_feature_group_row_process_logs_by_key(self, deployment_id: str, primary_
FeatureGroupRowProcessLogs: An object representing the logs for the feature group row process"""
return self._call_api('getFeatureGroupRowProcessLogsByKey', 'POST', query_params={'deploymentId': deployment_id}, body={'primaryKeyValue': primary_key_value}, parse_type=FeatureGroupRowProcessLogs)
- def create_python_function(self, name: str, source_code: str = None, function_name: str = None, function_variable_mappings: List = None, package_requirements: list = None, function_type: str = 'FEATURE_GROUP', description: str = None, examples: dict = None) -> PythonFunction:
+ def create_python_function(self, name: str, source_code: str = None, function_name: str = None, function_variable_mappings: List = None, package_requirements: list = None, function_type: str = 'FEATURE_GROUP', description: str = None, examples: dict = None, user_level_connectors: Dict = None, org_level_connectors: List = None, output_variable_mappings: List = None) -> PythonFunction:
"""Creates a custom Python function that is reusable.
Args:
@@ -8021,13 +8378,16 @@ def create_python_function(self, name: str, source_code: str = None, function_na
package_requirements (list): List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
function_type (str): Type of Python function to create. Default is FEATURE_GROUP, but can also be PLOTLY_FIG.
description (str): Description of the Python function. This should include details about the function's purpose, expected inputs and outputs, and any important usage considerations or limitations.
- examples (dict): Dictionary containing example use cases and anti-patterns. Should include 'positive_examples' showing recommended usage and 'negative_examples' showing cases to avoid.
+ examples (dict): Dictionary containing example use cases and anti-patterns. Should include 'positive_examples' showing recommended usage and 'negative_examples' showing cases to avoid.])
+ user_level_connectors (Dict): Dictionary containing user level connectors.
+ org_level_connectors (List): List containing organization level connectors.
+ output_variable_mappings (List): List of output variable mappings that defines the elements of the function's return value.
Returns:
PythonFunction: The Python function that can be used (e.g. for feature group transform)."""
- return self._call_api('createPythonFunction', 'POST', query_params={}, body={'name': name, 'sourceCode': source_code, 'functionName': function_name, 'functionVariableMappings': function_variable_mappings, 'packageRequirements': package_requirements, 'functionType': function_type, 'description': description, 'examples': examples}, parse_type=PythonFunction)
+ return self._call_api('createPythonFunction', 'POST', query_params={}, body={'name': name, 'sourceCode': source_code, 'functionName': function_name, 'functionVariableMappings': function_variable_mappings, 'packageRequirements': package_requirements, 'functionType': function_type, 'description': description, 'examples': examples, 'userLevelConnectors': user_level_connectors, 'orgLevelConnectors': org_level_connectors, 'outputVariableMappings': output_variable_mappings}, parse_type=PythonFunction)
- def update_python_function(self, name: str, source_code: str = None, function_name: str = None, function_variable_mappings: List = None, package_requirements: list = None, description: str = None, examples: dict = None) -> PythonFunction:
+ def update_python_function(self, name: str, source_code: str = None, function_name: str = None, function_variable_mappings: List = None, package_requirements: list = None, description: str = None, examples: dict = None, user_level_connectors: Dict = None, org_level_connectors: List = None, output_variable_mappings: List = None) -> PythonFunction:
"""Update custom python function with user inputs for the given python function.
Args:
@@ -8038,10 +8398,13 @@ def update_python_function(self, name: str, source_code: str = None, function_na
package_requirements (list): List of package requirement strings. For example: ['numpy==1.2.3', 'pandas>=1.4.0'].
description (str): Description of the Python function. This should include details about the function's purpose, expected inputs and outputs, and any important usage considerations or limitations.
examples (dict): Dictionary containing example use cases and anti-patterns. Should include 'positive_examples' showing recommended usage and 'negative_examples' showing cases to avoid.
+ user_level_connectors (Dict): Dictionary containing user level connectors.
+ org_level_connectors (List): List of organization level connectors.
+ output_variable_mappings (List): List of output variable mappings that defines the elements of the function's return value.
Returns:
PythonFunction: The Python function object."""
- return self._call_api('updatePythonFunction', 'PATCH', query_params={}, body={'name': name, 'sourceCode': source_code, 'functionName': function_name, 'functionVariableMappings': function_variable_mappings, 'packageRequirements': package_requirements, 'description': description, 'examples': examples}, parse_type=PythonFunction)
+ return self._call_api('updatePythonFunction', 'PATCH', query_params={}, body={'name': name, 'sourceCode': source_code, 'functionName': function_name, 'functionVariableMappings': function_variable_mappings, 'packageRequirements': package_requirements, 'description': description, 'examples': examples, 'userLevelConnectors': user_level_connectors, 'orgLevelConnectors': org_level_connectors, 'outputVariableMappings': output_variable_mappings}, parse_type=PythonFunction)
def delete_python_function(self, name: str):
"""Removes an existing Python function.
@@ -8465,16 +8828,18 @@ def update_module(self, name: str, source_code: str = None) -> Module:
Module: The updated module."""
return self._call_api('updateModule', 'PATCH', query_params={}, body={'name': name, 'sourceCode': source_code}, parse_type=Module)
- def create_organization_secret(self, secret_key: str, value: str) -> OrganizationSecret:
+ def create_organization_secret(self, secret_key: str, value: str, secret_type: Union[OrganizationSecretType, str] = OrganizationSecretType.ORG_SECRET, metadata: dict = None) -> OrganizationSecret:
"""Creates a secret which can be accessed in functions and notebooks.
Args:
secret_key (str): The secret key.
value (str): The secret value.
+ secret_type (OrganizationSecretType): The type of secret. Use OrganizationSecretType enum values.
+ metadata (dict): Additional metadata for the secret.
Returns:
OrganizationSecret: The created secret."""
- return self._call_api('createOrganizationSecret', 'POST', query_params={}, body={'secretKey': secret_key, 'value': value}, parse_type=OrganizationSecret)
+ return self._call_api('createOrganizationSecret', 'POST', query_params={}, body={'secretKey': secret_key, 'value': value, 'secretType': secret_type, 'metadata': metadata}, parse_type=OrganizationSecret)
def delete_organization_secret(self, secret_key: str):
"""Deletes a secret.
@@ -8593,7 +8958,7 @@ def set_deployment_conversation_feedback(self, deployment_conversation_id: str,
feedback (str): Optional feedback on why the message is useful or not useful
feedback_type (str): Optional feedback type
deployment_id (str): The deployment this conversation belongs to. This is required if not logged in."""
- return self._call_api('setDeploymentConversationFeedback', 'POST', query_params={'deploymentId': deployment_id}, body={'deploymentConversationId': deployment_conversation_id, 'messageIndex': message_index, 'isUseful': is_useful, 'isNotUseful': is_not_useful, 'feedback': feedback, 'feedbackType': feedback_type})
+ return self._proxy_request('setDeploymentConversationFeedback', 'POST', query_params={'deploymentId': deployment_id}, body={'deploymentConversationId': deployment_conversation_id, 'messageIndex': message_index, 'isUseful': is_useful, 'isNotUseful': is_not_useful, 'feedback': feedback, 'feedbackType': feedback_type}, is_sync=True)
def rename_deployment_conversation(self, deployment_conversation_id: str, name: str, deployment_id: str = None):
"""Rename a Deployment Conversation.
@@ -8604,6 +8969,35 @@ def rename_deployment_conversation(self, deployment_conversation_id: str, name:
deployment_id (str): The deployment this conversation belongs to. This is required if not logged in."""
return self._proxy_request('renameDeploymentConversation', 'POST', query_params={'deploymentId': deployment_id}, body={'deploymentConversationId': deployment_conversation_id, 'name': name}, is_sync=True)
+ def add_user_group_object_permission(self, object_id: str, user_group_id: str, object_type: str, permission: str = 'ALL'):
+ """Add user group object permission for any object type.
+
+ Args:
+ object_id (str): The ID of the object (dataset_id, project_id, external_connection_id, feature_group_id, etc.)
+ user_group_id (str): The ID of the user group
+ object_type (str): The type of object ('dataset', 'project', 'external_connection', 'feature_group')
+ permission (str): The permission to grant (default: 'ALL')"""
+ return self._call_api('addUserGroupObjectPermission', 'POST', query_params={}, body={'objectId': object_id, 'userGroupId': user_group_id, 'objectType': object_type, 'permission': permission})
+
+ def update_user_group_object_permission(self, object_id: str, user_group_id: str, object_type: str, permission: str = 'ALL'):
+ """Update user group object permission for any object type.
+
+ Args:
+ object_id (str): The ID of the object (dataset_id, project_id, external_connection_id, feature_group_id, etc.)
+ user_group_id (str): The ID of the user group
+ object_type (str): The type of object ('dataset', 'project', 'external_connection', 'feature_group')
+ permission (str): The permission to grant (default: 'ALL')"""
+ return self._call_api('updateUserGroupObjectPermission', 'POST', query_params={}, body={'objectId': object_id, 'userGroupId': user_group_id, 'objectType': object_type, 'permission': permission})
+
+ def remove_user_group_object_permission(self, object_id: str, user_group_id: str, object_type: str):
+ """Remove user group object permission for any object type.
+
+ Args:
+ object_id (str): The ID of the object (dataset_id, project_id, external_connection_id, feature_group_id, etc.)
+ user_group_id (str): The ID of the user group
+ object_type (str): The type of object ('dataset', 'project', 'external_connection', 'feature_group')"""
+ return self._call_api('removeUserGroupObjectPermission', 'POST', query_params={}, body={'objectId': object_id, 'userGroupId': user_group_id, 'objectType': object_type})
+
def create_app_user_group(self, name: str) -> AppUserGroup:
"""Creates a new App User Group. This User Group is used to have permissions to access the external chatbots.
@@ -8681,6 +9075,20 @@ def remove_app_user_group_from_external_application(self, user_group_id: str, ex
external_application_id (str): The ID of the External Application."""
return self._call_api('removeAppUserGroupFromExternalApplication', 'POST', query_params={}, body={'userGroupId': user_group_id, 'externalApplicationId': external_application_id})
+ def add_developers_to_external_application(self, external_application_id: str):
+ """Adds a permission for the platform App User Group to access an External Application.
+
+ Args:
+ external_application_id (str): The ID of the External Application."""
+ return self._call_api('addDevelopersToExternalApplication', 'POST', query_params={}, body={'externalApplicationId': external_application_id})
+
+ def remove_developers_from_external_application(self, external_application_id: str):
+ """Removes a permission for the platform App User Group to access an External Application.
+
+ Args:
+ external_application_id (str): The ID of the External Application."""
+ return self._call_api('removeDevelopersFromExternalApplication', 'POST', query_params={}, body={'externalApplicationId': external_application_id})
+
def create_external_application(self, deployment_id: str, name: str = None, description: str = None, logo: str = None, theme: dict = None) -> ExternalApplication:
"""Creates a new External Application from an existing ChatLLM Deployment.
@@ -8786,7 +9194,7 @@ def evaluate_prompt(self, prompt: str = None, system_message: str = None, llm_na
Returns:
LlmResponse: The response from the model, raw text and parsed components."""
- return self._proxy_request('EvaluatePrompt', 'POST', query_params={}, body={'prompt': prompt, 'systemMessage': system_message, 'llmName': llm_name, 'maxTokens': max_tokens, 'temperature': temperature, 'messages': messages, 'responseType': response_type, 'jsonResponseSchema': json_response_schema, 'stopSequences': stop_sequences, 'topP': top_p}, parse_type=LlmResponse)
+ return self._proxy_request('evaluatePrompt', 'POST', query_params={}, body={'prompt': prompt, 'systemMessage': system_message, 'llmName': llm_name, 'maxTokens': max_tokens, 'temperature': temperature, 'messages': messages, 'responseType': response_type, 'jsonResponseSchema': json_response_schema, 'stopSequences': stop_sequences, 'topP': top_p}, parse_type=LlmResponse, is_sync=True)
def render_feature_groups_for_llm(self, feature_group_ids: List, token_budget: int = None, include_definition: bool = True) -> List[LlmInput]:
"""Encode feature groups as language model inputs.
@@ -9003,3 +9411,18 @@ def get_relevant_snippets(self, doc_ids: List = None, blobs: io.TextIOBase = Non
Returns:
list[DocumentRetrieverLookupResult]: The snippets found from the documents."""
return self._proxy_request('GetRelevantSnippets', 'POST', query_params={}, data={'docIds': doc_ids, 'query': query, 'documentRetrieverConfig': json.dumps(document_retriever_config.to_dict()) if hasattr(document_retriever_config, 'to_dict') else json.dumps(document_retriever_config), 'honorSentenceBoundary': honor_sentence_boundary, 'numRetrievalMarginWords': num_retrieval_margin_words, 'maxWordsPerSnippet': max_words_per_snippet, 'maxSnippetsPerDocument': max_snippets_per_document, 'startWordIndex': start_word_index, 'endWordIndex': end_word_index, 'includingBoundingBoxes': including_bounding_boxes, 'text': text, 'documentProcessingConfig': json.dumps(document_processing_config.to_dict()) if hasattr(document_processing_config, 'to_dict') else json.dumps(document_processing_config)}, files=blobs, parse_type=DocumentRetrieverLookupResult)
+
+ def query_mcp_server(self, task: str, server_name: str = None, mcp_server_connection_id: str = None) -> McpServerQueryResult:
+ """Query a Remote MCP server. For a given task, it runs the required tools of the MCP
+
+ server with appropriate arguments and returns the output.
+
+
+ Args:
+ task (str): a comprehensive description of the task to perform using the MCP server tools.
+ server_name (str): The name of the MCP server.
+ mcp_server_connection_id (str): connection id of the MCP server to query.
+
+ Returns:
+ McpServerQueryResult: The execution logs as well as the final output of the task execution."""
+ return self._proxy_request('QueryMcpServer', 'POST', query_params={}, body={'task': task, 'serverName': server_name, 'mcpServerConnectionId': mcp_server_connection_id}, parse_type=McpServerQueryResult)
diff --git a/abacusai/code_autocomplete_edit_prediction_response.py b/abacusai/code_autocomplete_edit_prediction_response.py
new file mode 100644
index 000000000..c1c6ce7b2
--- /dev/null
+++ b/abacusai/code_autocomplete_edit_prediction_response.py
@@ -0,0 +1,37 @@
+from .return_class import AbstractApiClass
+
+
+class CodeAutocompleteEditPredictionResponse(AbstractApiClass):
+ """
+ A autocomplete response from an LLM
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ autocompleteResponse (str): autocomplete code
+ showAutocomplete (bool): Whether to show autocomplete in the client
+ """
+
+ def __init__(self, client, autocompleteResponse=None, showAutocomplete=None):
+ super().__init__(client, None)
+ self.autocomplete_response = autocompleteResponse
+ self.show_autocomplete = showAutocomplete
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'autocomplete_response': repr(
+ self.autocomplete_response), f'show_autocomplete': repr(self.show_autocomplete)}
+ class_name = "CodeAutocompleteEditPredictionResponse"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'autocomplete_response': self.autocomplete_response,
+ 'show_autocomplete': self.show_autocomplete}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/code_bot.py b/abacusai/code_bot.py
index 27e9d6143..519b04f10 100644
--- a/abacusai/code_bot.py
+++ b/abacusai/code_bot.py
@@ -11,21 +11,33 @@ class CodeBot(AbstractApiClass):
name (str): The name of the bot.
imageUploadSupported (bool): Whether the LLM supports image upload.
codeAgentSupported (bool): Whether the LLM supports code agent.
+ codeEditSupported (bool): Whether the LLM supports code edit.
isPremium (bool): Whether the LLM is a premium LLM.
+ llmBotIcon (str): The icon of the LLM bot.
+ provider (str): The provider of the LLM.
+ isUserApiKeyAllowed (bool): Whether the LLM supports user API key.
+ isRateLimited (bool): Whether the LLM is rate limited.
+ apiKeyUrl (str): The URL to get the API key.
"""
- def __init__(self, client, llmName=None, name=None, imageUploadSupported=None, codeAgentSupported=None, isPremium=None):
+ def __init__(self, client, llmName=None, name=None, imageUploadSupported=None, codeAgentSupported=None, codeEditSupported=None, isPremium=None, llmBotIcon=None, provider=None, isUserApiKeyAllowed=None, isRateLimited=None, apiKeyUrl=None):
super().__init__(client, None)
self.llm_name = llmName
self.name = name
self.image_upload_supported = imageUploadSupported
self.code_agent_supported = codeAgentSupported
+ self.code_edit_supported = codeEditSupported
self.is_premium = isPremium
+ self.llm_bot_icon = llmBotIcon
+ self.provider = provider
+ self.is_user_api_key_allowed = isUserApiKeyAllowed
+ self.is_rate_limited = isRateLimited
+ self.api_key_url = apiKeyUrl
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'llm_name': repr(self.llm_name), f'name': repr(self.name), f'image_upload_supported': repr(
- self.image_upload_supported), f'code_agent_supported': repr(self.code_agent_supported), f'is_premium': repr(self.is_premium)}
+ repr_dict = {f'llm_name': repr(self.llm_name), f'name': repr(self.name), f'image_upload_supported': repr(self.image_upload_supported), f'code_agent_supported': repr(self.code_agent_supported), f'code_edit_supported': repr(self.code_edit_supported), f'is_premium': repr(
+ self.is_premium), f'llm_bot_icon': repr(self.llm_bot_icon), f'provider': repr(self.provider), f'is_user_api_key_allowed': repr(self.is_user_api_key_allowed), f'is_rate_limited': repr(self.is_rate_limited), f'api_key_url': repr(self.api_key_url)}
class_name = "CodeBot"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -38,6 +50,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'llm_name': self.llm_name, 'name': self.name, 'image_upload_supported': self.image_upload_supported,
- 'code_agent_supported': self.code_agent_supported, 'is_premium': self.is_premium}
+ resp = {'llm_name': self.llm_name, 'name': self.name, 'image_upload_supported': self.image_upload_supported, 'code_agent_supported': self.code_agent_supported, 'code_edit_supported': self.code_edit_supported,
+ 'is_premium': self.is_premium, 'llm_bot_icon': self.llm_bot_icon, 'provider': self.provider, 'is_user_api_key_allowed': self.is_user_api_key_allowed, 'is_rate_limited': self.is_rate_limited, 'api_key_url': self.api_key_url}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/code_embeddings.py b/abacusai/code_embeddings.py
new file mode 100644
index 000000000..a7cb4bc6b
--- /dev/null
+++ b/abacusai/code_embeddings.py
@@ -0,0 +1,37 @@
+from .return_class import AbstractApiClass
+
+
+class CodeEmbeddings(AbstractApiClass):
+ """
+ Code embeddings
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ embeddings (dict): A dictionary mapping the file name to its embeddings.
+ chunkingScheme (str): The scheme used for chunking the embeddings.
+ """
+
+ def __init__(self, client, embeddings=None, chunkingScheme=None):
+ super().__init__(client, None)
+ self.embeddings = embeddings
+ self.chunking_scheme = chunkingScheme
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'embeddings': repr(
+ self.embeddings), f'chunking_scheme': repr(self.chunking_scheme)}
+ class_name = "CodeEmbeddings"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'embeddings': self.embeddings,
+ 'chunking_scheme': self.chunking_scheme}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/code_llm_changed_files.py b/abacusai/code_llm_changed_files.py
new file mode 100644
index 000000000..d2b309150
--- /dev/null
+++ b/abacusai/code_llm_changed_files.py
@@ -0,0 +1,39 @@
+from .return_class import AbstractApiClass
+
+
+class CodeLlmChangedFiles(AbstractApiClass):
+ """
+ Code changed files
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ addedFiles (list): A list of added file paths.
+ updatedFiles (list): A list of updated file paths.
+ deletedFiles (list): A list of deleted file paths.
+ """
+
+ def __init__(self, client, addedFiles=None, updatedFiles=None, deletedFiles=None):
+ super().__init__(client, None)
+ self.added_files = addedFiles
+ self.updated_files = updatedFiles
+ self.deleted_files = deletedFiles
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'added_files': repr(self.added_files), f'updated_files': repr(
+ self.updated_files), f'deleted_files': repr(self.deleted_files)}
+ class_name = "CodeLlmChangedFiles"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'added_files': self.added_files,
+ 'updated_files': self.updated_files, 'deleted_files': self.deleted_files}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/code_suggestion_validation_response.py b/abacusai/code_suggestion_validation_response.py
new file mode 100644
index 000000000..fd1ef58d4
--- /dev/null
+++ b/abacusai/code_suggestion_validation_response.py
@@ -0,0 +1,33 @@
+from .return_class import AbstractApiClass
+
+
+class CodeSuggestionValidationResponse(AbstractApiClass):
+ """
+ A response from an LLM to validate a code suggestion.
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ isValid (bool): Whether the code suggestion is valid.
+ """
+
+ def __init__(self, client, isValid=None):
+ super().__init__(client, None)
+ self.is_valid = isValid
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'is_valid': repr(self.is_valid)}
+ class_name = "CodeSuggestionValidationResponse"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'is_valid': self.is_valid}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/code_summary_response.py b/abacusai/code_summary_response.py
new file mode 100644
index 000000000..e07415b9c
--- /dev/null
+++ b/abacusai/code_summary_response.py
@@ -0,0 +1,33 @@
+from .return_class import AbstractApiClass
+
+
+class CodeSummaryResponse(AbstractApiClass):
+ """
+ A summary response from an LLM
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ summary (str): The summary of the code.
+ """
+
+ def __init__(self, client, summary=None):
+ super().__init__(client, None)
+ self.summary = summary
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'summary': repr(self.summary)}
+ class_name = "CodeSummaryResponse"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'summary': self.summary}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/codellm_embedding_constants.py b/abacusai/codellm_embedding_constants.py
new file mode 100644
index 000000000..ddc28e881
--- /dev/null
+++ b/abacusai/codellm_embedding_constants.py
@@ -0,0 +1,43 @@
+from .return_class import AbstractApiClass
+
+
+class CodellmEmbeddingConstants(AbstractApiClass):
+ """
+ A dictionary of constants to be used in the autocomplete.
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ maxSupportedWorkspaceFiles (int): Max supported workspace files
+ maxSupportedWorkspaceChunks (int): Max supported workspace chunks
+ maxConcurrentRequests (int): Max concurrent requests
+ fileExtensionToChunkingScheme (dict): Map between the file extensions and their chunking schema
+ idleTimeoutSeconds (int): The idle timeout without any activity before the workspace is refreshed.
+ """
+
+ def __init__(self, client, maxSupportedWorkspaceFiles=None, maxSupportedWorkspaceChunks=None, maxConcurrentRequests=None, fileExtensionToChunkingScheme=None, idleTimeoutSeconds=None):
+ super().__init__(client, None)
+ self.max_supported_workspace_files = maxSupportedWorkspaceFiles
+ self.max_supported_workspace_chunks = maxSupportedWorkspaceChunks
+ self.max_concurrent_requests = maxConcurrentRequests
+ self.file_extension_to_chunking_scheme = fileExtensionToChunkingScheme
+ self.idle_timeout_seconds = idleTimeoutSeconds
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'max_supported_workspace_files': repr(self.max_supported_workspace_files), f'max_supported_workspace_chunks': repr(self.max_supported_workspace_chunks), f'max_concurrent_requests': repr(
+ self.max_concurrent_requests), f'file_extension_to_chunking_scheme': repr(self.file_extension_to_chunking_scheme), f'idle_timeout_seconds': repr(self.idle_timeout_seconds)}
+ class_name = "CodellmEmbeddingConstants"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'max_supported_workspace_files': self.max_supported_workspace_files, 'max_supported_workspace_chunks': self.max_supported_workspace_chunks,
+ 'max_concurrent_requests': self.max_concurrent_requests, 'file_extension_to_chunking_scheme': self.file_extension_to_chunking_scheme, 'idle_timeout_seconds': self.idle_timeout_seconds}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/compute_point_info.py b/abacusai/compute_point_info.py
index af84dbc02..bfebe6d4d 100644
--- a/abacusai/compute_point_info.py
+++ b/abacusai/compute_point_info.py
@@ -13,9 +13,10 @@ class ComputePointInfo(AbstractApiClass):
currMonthAvailPoints (int): The current month's available compute points
currMonthUsage (int): The current month's usage compute points
lastThrottlePopUp (str): The last time the organization was throttled
+ alwaysDisplay (bool): Whether to always display the compute point toggle
"""
- def __init__(self, client, updatedAt=None, last24HoursUsage=None, last7DaysUsage=None, currMonthAvailPoints=None, currMonthUsage=None, lastThrottlePopUp=None):
+ def __init__(self, client, updatedAt=None, last24HoursUsage=None, last7DaysUsage=None, currMonthAvailPoints=None, currMonthUsage=None, lastThrottlePopUp=None, alwaysDisplay=None):
super().__init__(client, None)
self.updated_at = updatedAt
self.last_24_hours_usage = last24HoursUsage
@@ -23,11 +24,12 @@ def __init__(self, client, updatedAt=None, last24HoursUsage=None, last7DaysUsage
self.curr_month_avail_points = currMonthAvailPoints
self.curr_month_usage = currMonthUsage
self.last_throttle_pop_up = lastThrottlePopUp
+ self.always_display = alwaysDisplay
self.deprecated_keys = {}
def __repr__(self):
repr_dict = {f'updated_at': repr(self.updated_at), f'last_24_hours_usage': repr(self.last_24_hours_usage), f'last_7_days_usage': repr(self.last_7_days_usage), f'curr_month_avail_points': repr(
- self.curr_month_avail_points), f'curr_month_usage': repr(self.curr_month_usage), f'last_throttle_pop_up': repr(self.last_throttle_pop_up)}
+ self.curr_month_avail_points), f'curr_month_usage': repr(self.curr_month_usage), f'last_throttle_pop_up': repr(self.last_throttle_pop_up), f'always_display': repr(self.always_display)}
class_name = "ComputePointInfo"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -40,6 +42,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'updated_at': self.updated_at, 'last_24_hours_usage': self.last_24_hours_usage, 'last_7_days_usage': self.last_7_days_usage,
- 'curr_month_avail_points': self.curr_month_avail_points, 'curr_month_usage': self.curr_month_usage, 'last_throttle_pop_up': self.last_throttle_pop_up}
+ resp = {'updated_at': self.updated_at, 'last_24_hours_usage': self.last_24_hours_usage, 'last_7_days_usage': self.last_7_days_usage, 'curr_month_avail_points':
+ self.curr_month_avail_points, 'curr_month_usage': self.curr_month_usage, 'last_throttle_pop_up': self.last_throttle_pop_up, 'always_display': self.always_display}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/constants_autocomplete_response.py b/abacusai/constants_autocomplete_response.py
new file mode 100644
index 000000000..8d1917a76
--- /dev/null
+++ b/abacusai/constants_autocomplete_response.py
@@ -0,0 +1,111 @@
+from .return_class import AbstractApiClass
+
+
+class ConstantsAutocompleteResponse(AbstractApiClass):
+ """
+ A dictionary of constants to be used in the autocomplete.
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ maxPendingRequests (int): The maximum number of pending requests.
+ acceptanceDelay (int): The acceptance delay.
+ debounceDelay (int): The debounce delay.
+ recordUserAction (bool): Whether to record user action.
+ validateSuggestion (bool): Whether to validate the suggestion.
+ validationLinesThreshold (int): The number of lines to validate the suggestion.
+ maxTrackedRecentChanges (int): The maximum number of recent file changes to track.
+ diffThreshold (int): The diff operations threshold.
+ derivativeThreshold (int): The derivative threshold for deletions
+ defaultSurroundingLines (int): The default number of surrounding lines to include in the recently visited context.
+ maxTrackedVisitChanges (int): The maximum number of recently visited ranges to track.
+ selectionCooldownMs (int): The cooldown time in milliseconds for selection changes.
+ viewingCooldownMs (int): The cooldown time in milliseconds for viewing changes.
+ maxLines (int): The maximum number of lines to include in recently visited context.
+ editCooldownMs (int): The cooldown time in milliseconds after last edit.
+ scrollDebounceMs (int): The debounce time in milliseconds for scroll events.
+ lspDeadline (int): The deadline in milliseconds for LSP context.
+ diagnosticsThreshold (int): The max number of diagnostics to show.
+ diagnosticEachThreshold (int): The max number of characters to show for each diagnostic type.
+ numVsCodeSuggestions (int): The number of VS Code suggestions to show.
+ minReindexingInterval (int): The minimum interval between reindexes in ms.
+ minRefreshSummaryInterval (int): The minimum interval between refresh summary in ms.
+ summaryBatchSize (int): The batch size for code summary in autocomplete.
+ jobReorderInterval (int): The interval in ms to reorder jobs in the job queue for summary.
+ stopRapidChanges (bool): Whether to stop rapid changes in autocomplete.
+ delaySummaryBatches (int): The delay in ms between summary batches.
+ delaySummaryBatchesRateLimit (int): The delay in ms in case of rate limit for delay summary batches.
+ maxSymbolsFuzzyMatch (int): The max number of symbols to fuzzy match.
+ fuzzySymbolMatchThreshold (int): The threshold for fuzzy symbol match.
+ symbolsCacheUpdateInterval (int): The interval in ms to update the symbols cache.
+ symbolsStorageUpdateInterval (int): The interval in ms to update the symbols storage.
+ editPredictionSimilarityThreshold (int): The threshold for edit prediction similarity.
+ minSearchWordLength (int): The minimum length of the word to be searched.
+ maxOccurrencesPerWord (int): The maximum occurrences of a particular search word present in the file.
+ maxWordsContentMatches (int): The maximum number of content matches from the client.
+ editPredictionEnabled (bool): Whether to enable edit prediction.
+ snapshotIntervalMs (int): The interval in ms to snapshot the file for recent file changes.
+ linesForSnapshot (int): Limit of max number of lines to snapshot for recent file changes.
+ embeddingConstants (codellmembeddingconstants): Embedding constants
+ """
+
+ def __init__(self, client, maxPendingRequests=None, acceptanceDelay=None, debounceDelay=None, recordUserAction=None, validateSuggestion=None, validationLinesThreshold=None, maxTrackedRecentChanges=None, diffThreshold=None, derivativeThreshold=None, defaultSurroundingLines=None, maxTrackedVisitChanges=None, selectionCooldownMs=None, viewingCooldownMs=None, maxLines=None, editCooldownMs=None, scrollDebounceMs=None, lspDeadline=None, diagnosticsThreshold=None, diagnosticEachThreshold=None, numVsCodeSuggestions=None, minReindexingInterval=None, minRefreshSummaryInterval=None, summaryBatchSize=None, jobReorderInterval=None, stopRapidChanges=None, delaySummaryBatches=None, delaySummaryBatchesRateLimit=None, maxSymbolsFuzzyMatch=None, fuzzySymbolMatchThreshold=None, symbolsCacheUpdateInterval=None, symbolsStorageUpdateInterval=None, editPredictionSimilarityThreshold=None, minSearchWordLength=None, maxOccurrencesPerWord=None, maxWordsContentMatches=None, editPredictionEnabled=None, snapshotIntervalMs=None, linesForSnapshot=None, embeddingConstants=None):
+ super().__init__(client, None)
+ self.max_pending_requests = maxPendingRequests
+ self.acceptance_delay = acceptanceDelay
+ self.debounce_delay = debounceDelay
+ self.record_user_action = recordUserAction
+ self.validate_suggestion = validateSuggestion
+ self.validation_lines_threshold = validationLinesThreshold
+ self.max_tracked_recent_changes = maxTrackedRecentChanges
+ self.diff_threshold = diffThreshold
+ self.derivative_threshold = derivativeThreshold
+ self.default_surrounding_lines = defaultSurroundingLines
+ self.max_tracked_visit_changes = maxTrackedVisitChanges
+ self.selection_cooldown_ms = selectionCooldownMs
+ self.viewing_cooldown_ms = viewingCooldownMs
+ self.max_lines = maxLines
+ self.edit_cooldown_ms = editCooldownMs
+ self.scroll_debounce_ms = scrollDebounceMs
+ self.lsp_deadline = lspDeadline
+ self.diagnostics_threshold = diagnosticsThreshold
+ self.diagnostic_each_threshold = diagnosticEachThreshold
+ self.num_vs_code_suggestions = numVsCodeSuggestions
+ self.min_reindexing_interval = minReindexingInterval
+ self.min_refresh_summary_interval = minRefreshSummaryInterval
+ self.summary_batch_size = summaryBatchSize
+ self.job_reorder_interval = jobReorderInterval
+ self.stop_rapid_changes = stopRapidChanges
+ self.delay_summary_batches = delaySummaryBatches
+ self.delay_summary_batches_rate_limit = delaySummaryBatchesRateLimit
+ self.max_symbols_fuzzy_match = maxSymbolsFuzzyMatch
+ self.fuzzy_symbol_match_threshold = fuzzySymbolMatchThreshold
+ self.symbols_cache_update_interval = symbolsCacheUpdateInterval
+ self.symbols_storage_update_interval = symbolsStorageUpdateInterval
+ self.edit_prediction_similarity_threshold = editPredictionSimilarityThreshold
+ self.min_search_word_length = minSearchWordLength
+ self.max_occurrences_per_word = maxOccurrencesPerWord
+ self.max_words_content_matches = maxWordsContentMatches
+ self.edit_prediction_enabled = editPredictionEnabled
+ self.snapshot_interval_ms = snapshotIntervalMs
+ self.lines_for_snapshot = linesForSnapshot
+ self.embedding_constants = embeddingConstants
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'max_pending_requests': repr(self.max_pending_requests), f'acceptance_delay': repr(self.acceptance_delay), f'debounce_delay': repr(self.debounce_delay), f'record_user_action': repr(self.record_user_action), f'validate_suggestion': repr(self.validate_suggestion), f'validation_lines_threshold': repr(self.validation_lines_threshold), f'max_tracked_recent_changes': repr(self.max_tracked_recent_changes), f'diff_threshold': repr(self.diff_threshold), f'derivative_threshold': repr(self.derivative_threshold), f'default_surrounding_lines': repr(self.default_surrounding_lines), f'max_tracked_visit_changes': repr(self.max_tracked_visit_changes), f'selection_cooldown_ms': repr(self.selection_cooldown_ms), f'viewing_cooldown_ms': repr(self.viewing_cooldown_ms), f'max_lines': repr(self.max_lines), f'edit_cooldown_ms': repr(self.edit_cooldown_ms), f'scroll_debounce_ms': repr(self.scroll_debounce_ms), f'lsp_deadline': repr(self.lsp_deadline), f'diagnostics_threshold': repr(self.diagnostics_threshold), f'diagnostic_each_threshold': repr(self.diagnostic_each_threshold), f'num_vs_code_suggestions': repr(self.num_vs_code_suggestions), f'min_reindexing_interval': repr(
+ self.min_reindexing_interval), f'min_refresh_summary_interval': repr(self.min_refresh_summary_interval), f'summary_batch_size': repr(self.summary_batch_size), f'job_reorder_interval': repr(self.job_reorder_interval), f'stop_rapid_changes': repr(self.stop_rapid_changes), f'delay_summary_batches': repr(self.delay_summary_batches), f'delay_summary_batches_rate_limit': repr(self.delay_summary_batches_rate_limit), f'max_symbols_fuzzy_match': repr(self.max_symbols_fuzzy_match), f'fuzzy_symbol_match_threshold': repr(self.fuzzy_symbol_match_threshold), f'symbols_cache_update_interval': repr(self.symbols_cache_update_interval), f'symbols_storage_update_interval': repr(self.symbols_storage_update_interval), f'edit_prediction_similarity_threshold': repr(self.edit_prediction_similarity_threshold), f'min_search_word_length': repr(self.min_search_word_length), f'max_occurrences_per_word': repr(self.max_occurrences_per_word), f'max_words_content_matches': repr(self.max_words_content_matches), f'edit_prediction_enabled': repr(self.edit_prediction_enabled), f'snapshot_interval_ms': repr(self.snapshot_interval_ms), f'lines_for_snapshot': repr(self.lines_for_snapshot), f'embedding_constants': repr(self.embedding_constants)}
+ class_name = "ConstantsAutocompleteResponse"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'max_pending_requests': self.max_pending_requests, 'acceptance_delay': self.acceptance_delay, 'debounce_delay': self.debounce_delay, 'record_user_action': self.record_user_action, 'validate_suggestion': self.validate_suggestion, 'validation_lines_threshold': self.validation_lines_threshold, 'max_tracked_recent_changes': self.max_tracked_recent_changes, 'diff_threshold': self.diff_threshold, 'derivative_threshold': self.derivative_threshold, 'default_surrounding_lines': self.default_surrounding_lines, 'max_tracked_visit_changes': self.max_tracked_visit_changes, 'selection_cooldown_ms': self.selection_cooldown_ms, 'viewing_cooldown_ms': self.viewing_cooldown_ms, 'max_lines': self.max_lines, 'edit_cooldown_ms': self.edit_cooldown_ms, 'scroll_debounce_ms': self.scroll_debounce_ms, 'lsp_deadline': self.lsp_deadline, 'diagnostics_threshold': self.diagnostics_threshold, 'diagnostic_each_threshold': self.diagnostic_each_threshold, 'num_vs_code_suggestions': self.num_vs_code_suggestions, 'min_reindexing_interval': self.min_reindexing_interval,
+ 'min_refresh_summary_interval': self.min_refresh_summary_interval, 'summary_batch_size': self.summary_batch_size, 'job_reorder_interval': self.job_reorder_interval, 'stop_rapid_changes': self.stop_rapid_changes, 'delay_summary_batches': self.delay_summary_batches, 'delay_summary_batches_rate_limit': self.delay_summary_batches_rate_limit, 'max_symbols_fuzzy_match': self.max_symbols_fuzzy_match, 'fuzzy_symbol_match_threshold': self.fuzzy_symbol_match_threshold, 'symbols_cache_update_interval': self.symbols_cache_update_interval, 'symbols_storage_update_interval': self.symbols_storage_update_interval, 'edit_prediction_similarity_threshold': self.edit_prediction_similarity_threshold, 'min_search_word_length': self.min_search_word_length, 'max_occurrences_per_word': self.max_occurrences_per_word, 'max_words_content_matches': self.max_words_content_matches, 'edit_prediction_enabled': self.edit_prediction_enabled, 'snapshot_interval_ms': self.snapshot_interval_ms, 'lines_for_snapshot': self.lines_for_snapshot, 'embedding_constants': self.embedding_constants}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/custom_chat_instructions.py b/abacusai/custom_chat_instructions.py
index 3200de717..fbfb75e0b 100644
--- a/abacusai/custom_chat_instructions.py
+++ b/abacusai/custom_chat_instructions.py
@@ -13,10 +13,11 @@ class CustomChatInstructions(AbstractApiClass):
enableImageGeneration (bool): Whether or not image generation is enabled.
enableWebSearch (bool): Whether or not web search is enabled.
enablePlayground (bool): Whether or not playground is enabled.
+ enableMemories (bool): Whether or not memories are enabled.
experimentalFeatures (dict): Experimental features.
"""
- def __init__(self, client, userInformationInstructions=None, responseInstructions=None, enableCodeExecution=None, enableImageGeneration=None, enableWebSearch=None, enablePlayground=None, experimentalFeatures=None):
+ def __init__(self, client, userInformationInstructions=None, responseInstructions=None, enableCodeExecution=None, enableImageGeneration=None, enableWebSearch=None, enablePlayground=None, enableMemories=None, experimentalFeatures=None):
super().__init__(client, None)
self.user_information_instructions = userInformationInstructions
self.response_instructions = responseInstructions
@@ -24,12 +25,13 @@ def __init__(self, client, userInformationInstructions=None, responseInstruction
self.enable_image_generation = enableImageGeneration
self.enable_web_search = enableWebSearch
self.enable_playground = enablePlayground
+ self.enable_memories = enableMemories
self.experimental_features = experimentalFeatures
self.deprecated_keys = {}
def __repr__(self):
repr_dict = {f'user_information_instructions': repr(self.user_information_instructions), f'response_instructions': repr(self.response_instructions), f'enable_code_execution': repr(self.enable_code_execution), f'enable_image_generation': repr(
- self.enable_image_generation), f'enable_web_search': repr(self.enable_web_search), f'enable_playground': repr(self.enable_playground), f'experimental_features': repr(self.experimental_features)}
+ self.enable_image_generation), f'enable_web_search': repr(self.enable_web_search), f'enable_playground': repr(self.enable_playground), f'enable_memories': repr(self.enable_memories), f'experimental_features': repr(self.experimental_features)}
class_name = "CustomChatInstructions"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -42,6 +44,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'user_information_instructions': self.user_information_instructions, 'response_instructions': self.response_instructions, 'enable_code_execution': self.enable_code_execution,
- 'enable_image_generation': self.enable_image_generation, 'enable_web_search': self.enable_web_search, 'enable_playground': self.enable_playground, 'experimental_features': self.experimental_features}
+ resp = {'user_information_instructions': self.user_information_instructions, 'response_instructions': self.response_instructions, 'enable_code_execution': self.enable_code_execution, 'enable_image_generation':
+ self.enable_image_generation, 'enable_web_search': self.enable_web_search, 'enable_playground': self.enable_playground, 'enable_memories': self.enable_memories, 'experimental_features': self.experimental_features}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/custom_domain.py b/abacusai/custom_domain.py
new file mode 100644
index 000000000..66c67b041
--- /dev/null
+++ b/abacusai/custom_domain.py
@@ -0,0 +1,41 @@
+from .return_class import AbstractApiClass
+
+
+class CustomDomain(AbstractApiClass):
+ """
+ Result of adding a custom domain to a hosted app
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ status (bool): Whether the custom domain was added successfully
+ message (str): The message from the custom domain
+ expectedNameservers (list): The expected nameservers for the custom domain
+ currentNameservers (list): The current nameservers for the custom domain
+ """
+
+ def __init__(self, client, status=None, message=None, expectedNameservers=None, currentNameservers=None):
+ super().__init__(client, None)
+ self.status = status
+ self.message = message
+ self.expected_nameservers = expectedNameservers
+ self.current_nameservers = currentNameservers
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'status': repr(self.status), f'message': repr(self.message), f'expected_nameservers': repr(
+ self.expected_nameservers), f'current_nameservers': repr(self.current_nameservers)}
+ class_name = "CustomDomain"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'status': self.status, 'message': self.message, 'expected_nameservers':
+ self.expected_nameservers, 'current_nameservers': self.current_nameservers}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/daemon_task_conversation.py b/abacusai/daemon_task_conversation.py
new file mode 100644
index 000000000..c09903820
--- /dev/null
+++ b/abacusai/daemon_task_conversation.py
@@ -0,0 +1,43 @@
+from .return_class import AbstractApiClass
+
+
+class DaemonTaskConversation(AbstractApiClass):
+ """
+ Daemon Task Conversation
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ deploymentConversationId (id): The ID of the deployment conversation
+ createdAt (str): The creation timestamp
+ updatedAt (str): The last update timestamp
+ deploymentConversationName (str): The name of the conversation
+ externalApplicationId (str): The external application ID
+ """
+
+ def __init__(self, client, deploymentConversationId=None, createdAt=None, updatedAt=None, deploymentConversationName=None, externalApplicationId=None):
+ super().__init__(client, None)
+ self.deployment_conversation_id = deploymentConversationId
+ self.created_at = createdAt
+ self.updated_at = updatedAt
+ self.deployment_conversation_name = deploymentConversationName
+ self.external_application_id = externalApplicationId
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'deployment_conversation_id': repr(self.deployment_conversation_id), f'created_at': repr(self.created_at), f'updated_at': repr(
+ self.updated_at), f'deployment_conversation_name': repr(self.deployment_conversation_name), f'external_application_id': repr(self.external_application_id)}
+ class_name = "DaemonTaskConversation"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'deployment_conversation_id': self.deployment_conversation_id, 'created_at': self.created_at, 'updated_at': self.updated_at,
+ 'deployment_conversation_name': self.deployment_conversation_name, 'external_application_id': self.external_application_id}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/database_connector.py b/abacusai/database_connector.py
index d574db35d..74c00af48 100644
--- a/abacusai/database_connector.py
+++ b/abacusai/database_connector.py
@@ -101,3 +101,27 @@ def query(self, query: str):
query (str): The query to be run in the database connector.
"""
return self.client.query_database_connector(self.database_connector_id, query)
+
+ def query_datallm(self, query: str):
+ """
+ Runs a read-only query in the specified database connector. This API is specifically designed for DataLLM
+
+ and enforces read-only access with user-specific access control.
+
+
+ Args:
+ query (str): The query to be run in the database connector (must be read-only).
+ """
+ return self.client.query_database_connector_datallm(self.database_connector_id, query)
+
+ def get_auth(self):
+ """
+ Get the authentication details for a given database connector.
+
+ Args:
+ database_connector_id (str): The unique ID associated with the database connector.
+
+ Returns:
+ DatabaseConnector: The database connector with the authentication details.
+ """
+ return self.client.get_database_connector_auth(self.database_connector_id)
diff --git a/abacusai/dataset.py b/abacusai/dataset.py
index 2668946ad..00ce862aa 100644
--- a/abacusai/dataset.py
+++ b/abacusai/dataset.py
@@ -96,6 +96,18 @@ def to_dict(self):
'extract_bounding_boxes': self.extract_bounding_boxes, 'merge_file_schemas': self.merge_file_schemas, 'reference_only_documentset': self.reference_only_documentset, 'version_limit': self.version_limit, 'schema': self._get_attribute_as_dict(self.schema), 'refresh_schedules': self._get_attribute_as_dict(self.refresh_schedules), 'latest_dataset_version': self._get_attribute_as_dict(self.latest_dataset_version), 'parsing_config': self._get_attribute_as_dict(self.parsing_config), 'document_processing_config': self._get_attribute_as_dict(self.document_processing_config), 'attachment_parsing_config': self._get_attribute_as_dict(self.attachment_parsing_config)}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
+ def get_raw_data_from_realtime(self, check_permissions: bool = False, start_time: str = None, end_time: str = None, column_filter: dict = None):
+ """
+ Returns raw data from a realtime dataset. Only Microsoft Teams datasets are supported currently due to data size constraints in realtime datasets.
+
+ Args:
+ check_permissions (bool): If True, checks user permissions using session email.
+ start_time (str): Start time filter (inclusive) for created_date_time_t in ISO 8601 format (e.g. 2025-05-13T08:25:11Z or 2025-05-13T08:25:11+00:00).
+ end_time (str): End time filter (inclusive) for created_date_time_t in ISO 8601 format (e.g. 2025-05-13T08:25:11Z or 2025-05-13T08:25:11+00:00).
+ column_filter (dict): Dictionary mapping column names to filter values. Only rows matching all column filters will be returned.
+ """
+ return self.client.get_raw_data_from_realtime_dataset(self.dataset_id, check_permissions, start_time, end_time, column_filter)
+
def create_version_from_file_connector(self, location: str = None, file_format: str = None, csv_delimiter: str = None, merge_file_schemas: bool = None, parsing_config: Union[dict, ParsingConfig] = None, sql_query: str = None):
"""
Creates a new version of the specified dataset.
diff --git a/abacusai/default_llm.py b/abacusai/default_llm.py
new file mode 100644
index 000000000..be621a4ab
--- /dev/null
+++ b/abacusai/default_llm.py
@@ -0,0 +1,35 @@
+from .return_class import AbstractApiClass
+
+
+class DefaultLlm(AbstractApiClass):
+ """
+ A default LLM.
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ name (str): The name of the LLM.
+ enum (str): The enum of the LLM.
+ """
+
+ def __init__(self, client, name=None, enum=None):
+ super().__init__(client, None)
+ self.name = name
+ self.enum = enum
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'name': repr(self.name), f'enum': repr(self.enum)}
+ class_name = "DefaultLlm"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'name': self.name, 'enum': self.enum}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/deployment.py b/abacusai/deployment.py
index ca4bea4fb..722086128 100644
--- a/abacusai/deployment.py
+++ b/abacusai/deployment.py
@@ -43,12 +43,13 @@ class Deployment(AbstractApiClass):
outputOnlineFeatureGroupId (id): The online feature group ID that the deployment is outputting results to
realtimeMonitorId (id): The realtime monitor ID of the realtime-monitor that is associated with the deployment
runtimeConfigs (dict): The runtime configurations of a deployment which is used by some of the usecases during prediction.
+ isSystemCreated (bool): Whether the deployment is system created.
refreshSchedules (RefreshSchedule): A list of refresh schedules that indicate when the deployment will be updated to the latest model version.
featureGroupExportConfig (FeatureGroupExportConfig): The export config (file connector or database connector information) for feature group deployment exports.
defaultPredictionArguments (PredictionArguments): The default prediction arguments for prediction APIs
"""
- def __init__(self, client, deploymentId=None, name=None, status=None, description=None, deployedAt=None, createdAt=None, projectId=None, modelId=None, modelVersion=None, featureGroupId=None, featureGroupVersion=None, callsPerSecond=None, autoDeploy=None, skipMetricsCheck=None, algoName=None, regions=None, error=None, batchStreamingUpdates=None, algorithm=None, pendingModelVersion=None, modelDeploymentConfig=None, predictionOperatorId=None, predictionOperatorVersion=None, pendingPredictionOperatorVersion=None, onlineFeatureGroupId=None, outputOnlineFeatureGroupId=None, realtimeMonitorId=None, runtimeConfigs=None, refreshSchedules={}, featureGroupExportConfig={}, defaultPredictionArguments={}):
+ def __init__(self, client, deploymentId=None, name=None, status=None, description=None, deployedAt=None, createdAt=None, projectId=None, modelId=None, modelVersion=None, featureGroupId=None, featureGroupVersion=None, callsPerSecond=None, autoDeploy=None, skipMetricsCheck=None, algoName=None, regions=None, error=None, batchStreamingUpdates=None, algorithm=None, pendingModelVersion=None, modelDeploymentConfig=None, predictionOperatorId=None, predictionOperatorVersion=None, pendingPredictionOperatorVersion=None, onlineFeatureGroupId=None, outputOnlineFeatureGroupId=None, realtimeMonitorId=None, runtimeConfigs=None, isSystemCreated=None, refreshSchedules={}, featureGroupExportConfig={}, defaultPredictionArguments={}):
super().__init__(client, deploymentId)
self.deployment_id = deploymentId
self.name = name
@@ -78,6 +79,7 @@ def __init__(self, client, deploymentId=None, name=None, status=None, descriptio
self.output_online_feature_group_id = outputOnlineFeatureGroupId
self.realtime_monitor_id = realtimeMonitorId
self.runtime_configs = runtimeConfigs
+ self.is_system_created = isSystemCreated
self.refresh_schedules = client._build_class(
RefreshSchedule, refreshSchedules)
self.feature_group_export_config = client._build_class(
@@ -87,8 +89,8 @@ def __init__(self, client, deploymentId=None, name=None, status=None, descriptio
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'deployment_id': repr(self.deployment_id), f'name': repr(self.name), f'status': repr(self.status), f'description': repr(self.description), f'deployed_at': repr(self.deployed_at), f'created_at': repr(self.created_at), f'project_id': repr(self.project_id), f'model_id': repr(self.model_id), f'model_version': repr(self.model_version), f'feature_group_id': repr(self.feature_group_id), f'feature_group_version': repr(self.feature_group_version), f'calls_per_second': repr(self.calls_per_second), f'auto_deploy': repr(self.auto_deploy), f'skip_metrics_check': repr(self.skip_metrics_check), f'algo_name': repr(self.algo_name), f'regions': repr(self.regions), f'error': repr(self.error), f'batch_streaming_updates': repr(self.batch_streaming_updates), f'algorithm': repr(
- self.algorithm), f'pending_model_version': repr(self.pending_model_version), f'model_deployment_config': repr(self.model_deployment_config), f'prediction_operator_id': repr(self.prediction_operator_id), f'prediction_operator_version': repr(self.prediction_operator_version), f'pending_prediction_operator_version': repr(self.pending_prediction_operator_version), f'online_feature_group_id': repr(self.online_feature_group_id), f'output_online_feature_group_id': repr(self.output_online_feature_group_id), f'realtime_monitor_id': repr(self.realtime_monitor_id), f'runtime_configs': repr(self.runtime_configs), f'refresh_schedules': repr(self.refresh_schedules), f'feature_group_export_config': repr(self.feature_group_export_config), f'default_prediction_arguments': repr(self.default_prediction_arguments)}
+ repr_dict = {f'deployment_id': repr(self.deployment_id), f'name': repr(self.name), f'status': repr(self.status), f'description': repr(self.description), f'deployed_at': repr(self.deployed_at), f'created_at': repr(self.created_at), f'project_id': repr(self.project_id), f'model_id': repr(self.model_id), f'model_version': repr(self.model_version), f'feature_group_id': repr(self.feature_group_id), f'feature_group_version': repr(self.feature_group_version), f'calls_per_second': repr(self.calls_per_second), f'auto_deploy': repr(self.auto_deploy), f'skip_metrics_check': repr(self.skip_metrics_check), f'algo_name': repr(self.algo_name), f'regions': repr(self.regions), f'error': repr(self.error), f'batch_streaming_updates': repr(self.batch_streaming_updates), f'algorithm': repr(self.algorithm), f'pending_model_version': repr(
+ self.pending_model_version), f'model_deployment_config': repr(self.model_deployment_config), f'prediction_operator_id': repr(self.prediction_operator_id), f'prediction_operator_version': repr(self.prediction_operator_version), f'pending_prediction_operator_version': repr(self.pending_prediction_operator_version), f'online_feature_group_id': repr(self.online_feature_group_id), f'output_online_feature_group_id': repr(self.output_online_feature_group_id), f'realtime_monitor_id': repr(self.realtime_monitor_id), f'runtime_configs': repr(self.runtime_configs), f'is_system_created': repr(self.is_system_created), f'refresh_schedules': repr(self.refresh_schedules), f'feature_group_export_config': repr(self.feature_group_export_config), f'default_prediction_arguments': repr(self.default_prediction_arguments)}
class_name = "Deployment"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -101,8 +103,8 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'deployment_id': self.deployment_id, 'name': self.name, 'status': self.status, 'description': self.description, 'deployed_at': self.deployed_at, 'created_at': self.created_at, 'project_id': self.project_id, 'model_id': self.model_id, 'model_version': self.model_version, 'feature_group_id': self.feature_group_id, 'feature_group_version': self.feature_group_version, 'calls_per_second': self.calls_per_second, 'auto_deploy': self.auto_deploy, 'skip_metrics_check': self.skip_metrics_check, 'algo_name': self.algo_name, 'regions': self.regions, 'error': self.error, 'batch_streaming_updates': self.batch_streaming_updates, 'algorithm': self.algorithm, 'pending_model_version': self.pending_model_version,
- 'model_deployment_config': self.model_deployment_config, 'prediction_operator_id': self.prediction_operator_id, 'prediction_operator_version': self.prediction_operator_version, 'pending_prediction_operator_version': self.pending_prediction_operator_version, 'online_feature_group_id': self.online_feature_group_id, 'output_online_feature_group_id': self.output_online_feature_group_id, 'realtime_monitor_id': self.realtime_monitor_id, 'runtime_configs': self.runtime_configs, 'refresh_schedules': self._get_attribute_as_dict(self.refresh_schedules), 'feature_group_export_config': self._get_attribute_as_dict(self.feature_group_export_config), 'default_prediction_arguments': self._get_attribute_as_dict(self.default_prediction_arguments)}
+ resp = {'deployment_id': self.deployment_id, 'name': self.name, 'status': self.status, 'description': self.description, 'deployed_at': self.deployed_at, 'created_at': self.created_at, 'project_id': self.project_id, 'model_id': self.model_id, 'model_version': self.model_version, 'feature_group_id': self.feature_group_id, 'feature_group_version': self.feature_group_version, 'calls_per_second': self.calls_per_second, 'auto_deploy': self.auto_deploy, 'skip_metrics_check': self.skip_metrics_check, 'algo_name': self.algo_name, 'regions': self.regions, 'error': self.error, 'batch_streaming_updates': self.batch_streaming_updates, 'algorithm': self.algorithm, 'pending_model_version': self.pending_model_version, 'model_deployment_config': self.model_deployment_config,
+ 'prediction_operator_id': self.prediction_operator_id, 'prediction_operator_version': self.prediction_operator_version, 'pending_prediction_operator_version': self.pending_prediction_operator_version, 'online_feature_group_id': self.online_feature_group_id, 'output_online_feature_group_id': self.output_online_feature_group_id, 'realtime_monitor_id': self.realtime_monitor_id, 'runtime_configs': self.runtime_configs, 'is_system_created': self.is_system_created, 'refresh_schedules': self._get_attribute_as_dict(self.refresh_schedules), 'feature_group_export_config': self._get_attribute_as_dict(self.feature_group_export_config), 'default_prediction_arguments': self._get_attribute_as_dict(self.default_prediction_arguments)}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
def create_webhook(self, endpoint: str, webhook_event_type: str, payload_template: dict = None):
@@ -343,7 +345,7 @@ def create_realtime_monitor(self, realtime_monitor_schedule: str = None, lookbac
"""
return self.client.create_realtime_monitor(self.deployment_id, realtime_monitor_schedule, lookback_time)
- def get_conversation_response(self, message: str, deployment_token: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, doc_infos: list = None):
+ def get_conversation_response(self, message: str, deployment_token: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, doc_infos: list = None, user_info: dict = None, execute_usercode_tool: bool = False):
"""
Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).
@@ -360,8 +362,9 @@ def get_conversation_response(self, message: str, deployment_token: str, deploym
search_score_cutoff (float): Cutoff for the document retriever score. Matching search results below this score will be ignored.
chat_config (dict): A dictionary specifiying the query chat config override.
doc_infos (list): An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
+ execute_usercode_tool (bool): If True, will return the tool output in the response.
"""
- return self.client.get_conversation_response(self.deployment_id, message, deployment_token, deployment_conversation_id, external_session_id, llm_name, num_completion_tokens, system_message, temperature, filter_key_values, search_score_cutoff, chat_config, doc_infos)
+ return self.client.get_conversation_response(self.deployment_id, message, deployment_token, deployment_conversation_id, external_session_id, llm_name, num_completion_tokens, system_message, temperature, filter_key_values, search_score_cutoff, chat_config, doc_infos, user_info, execute_usercode_tool)
def get_conversation_response_with_binary_data(self, deployment_token: str, message: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, attachments: None = None):
"""
@@ -495,7 +498,7 @@ def create_conversation(self, name: str = None, external_application_id: str = N
"""
return self.client.create_deployment_conversation(self.deployment_id, name, external_application_id)
- def list_conversations(self, external_application_id: str = None, conversation_type: Union[dict, DeploymentConversationType] = None, fetch_last_llm_info: bool = False):
+ def list_conversations(self, external_application_id: str = None, conversation_type: Union[dict, DeploymentConversationType] = None, fetch_last_llm_info: bool = False, limit: int = None, search: str = None):
"""
Lists all conversations for the given deployment and current user.
@@ -503,11 +506,13 @@ def list_conversations(self, external_application_id: str = None, conversation_t
external_application_id (str): The external application id associated with the deployment conversation. If specified, only conversations created on that application will be listed.
conversation_type (DeploymentConversationType): The type of the conversation indicating its origin.
fetch_last_llm_info (bool): If true, the LLM info for the most recent conversation will be fetched. Only applicable for system-created bots.
+ limit (int): The number of conversations to return. Defaults to 600.
+ search (str): The search query to filter conversations by title.
Returns:
list[DeploymentConversation]: The deployment conversations.
"""
- return self.client.list_deployment_conversations(self.deployment_id, external_application_id, conversation_type, fetch_last_llm_info)
+ return self.client.list_deployment_conversations(self.deployment_id, external_application_id, conversation_type, fetch_last_llm_info, limit, search)
def create_external_application(self, name: str = None, description: str = None, logo: str = None, theme: dict = None):
"""
diff --git a/abacusai/deployment_conversation.py b/abacusai/deployment_conversation.py
index d211c1c89..591638817 100644
--- a/abacusai/deployment_conversation.py
+++ b/abacusai/deployment_conversation.py
@@ -1,4 +1,5 @@
from .deployment_conversation_event import DeploymentConversationEvent
+from .hosted_artifact import HostedArtifact
from .return_class import AbstractApiClass
@@ -11,6 +12,8 @@ class DeploymentConversation(AbstractApiClass):
deploymentConversationId (str): The unique identifier of the deployment conversation.
name (str): The name of the deployment conversation.
deploymentId (str): The deployment id associated with the deployment conversation.
+ ownerUserId (str): The user id of the owner of the deployment conversation.
+ ownerOrgId (str): The organization id of the owner of the deployment conversation.
createdAt (str): The timestamp at which the deployment conversation was created.
lastEventCreatedAt (str): The timestamp at which the most recent corresponding deployment conversation event was created at.
hasHistory (bool): Whether the deployment conversation has any history.
@@ -26,14 +29,28 @@ class DeploymentConversation(AbstractApiClass):
llmBotIcon (str): The icon location of the LLM model used to generate the most recent response. Only used for system-created bots.
searchSuggestions (list): The list of search suggestions for the conversation.
chatllmTaskId (str): The chatllm task id associated with the deployment conversation.
+ conversationStatus (str): The status of the deployment conversation (used for deep agent conversations).
+ computerStatus (str): The status of the computer associated with the deployment conversation (used for deep agent conversations).
+ filesystemStatus (str): The status of the filesystem associated with the deployment conversation (used for deep agent conversations).
+ totalEvents (int): The total number of events in the deployment conversation.
+ contestNames (list[str]): Names of contests that this deployment is a part of.
+ daemonTaskId (str): The daemon task id associated with the deployment conversation.
+ parentDeploymentConversationId (str): The parent deployment conversation id associated with the deployment conversation.
+ introMessage (str): The intro message for the deployment conversation.
+ previewInfo (dict): App preview info.
+ latestContext (dict): dict containing a list of important segments for pagination in deepagent.
+ shareType (str): The sharing status of the deployment conversation (PUBLIC or PRIVATE).
history (DeploymentConversationEvent): The history of the deployment conversation.
+ hostedArtifacts (HostedArtifact): Artifacts that have been deployed by this conversation.
"""
- def __init__(self, client, deploymentConversationId=None, name=None, deploymentId=None, createdAt=None, lastEventCreatedAt=None, hasHistory=None, externalSessionId=None, regenerateAttempt=None, externalApplicationId=None, unusedDocumentUploadIds=None, humanizeInstructions=None, conversationWarning=None, conversationType=None, metadata=None, llmDisplayName=None, llmBotIcon=None, searchSuggestions=None, chatllmTaskId=None, history={}):
+ def __init__(self, client, deploymentConversationId=None, name=None, deploymentId=None, ownerUserId=None, ownerOrgId=None, createdAt=None, lastEventCreatedAt=None, hasHistory=None, externalSessionId=None, regenerateAttempt=None, externalApplicationId=None, unusedDocumentUploadIds=None, humanizeInstructions=None, conversationWarning=None, conversationType=None, metadata=None, llmDisplayName=None, llmBotIcon=None, searchSuggestions=None, chatllmTaskId=None, conversationStatus=None, computerStatus=None, filesystemStatus=None, totalEvents=None, contestNames=None, daemonTaskId=None, parentDeploymentConversationId=None, introMessage=None, previewInfo=None, latestContext=None, shareType=None, history={}, hostedArtifacts={}):
super().__init__(client, deploymentConversationId)
self.deployment_conversation_id = deploymentConversationId
self.name = name
self.deployment_id = deploymentId
+ self.owner_user_id = ownerUserId
+ self.owner_org_id = ownerOrgId
self.created_at = createdAt
self.last_event_created_at = lastEventCreatedAt
self.has_history = hasHistory
@@ -49,13 +66,26 @@ def __init__(self, client, deploymentConversationId=None, name=None, deploymentI
self.llm_bot_icon = llmBotIcon
self.search_suggestions = searchSuggestions
self.chatllm_task_id = chatllmTaskId
+ self.conversation_status = conversationStatus
+ self.computer_status = computerStatus
+ self.filesystem_status = filesystemStatus
+ self.total_events = totalEvents
+ self.contest_names = contestNames
+ self.daemon_task_id = daemonTaskId
+ self.parent_deployment_conversation_id = parentDeploymentConversationId
+ self.intro_message = introMessage
+ self.preview_info = previewInfo
+ self.latest_context = latestContext
+ self.share_type = shareType
self.history = client._build_class(
DeploymentConversationEvent, history)
+ self.hosted_artifacts = client._build_class(
+ HostedArtifact, hostedArtifacts)
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'deployment_conversation_id': repr(self.deployment_conversation_id), f'name': repr(self.name), f'deployment_id': repr(self.deployment_id), f'created_at': repr(self.created_at), f'last_event_created_at': repr(self.last_event_created_at), f'has_history': repr(self.has_history), f'external_session_id': repr(self.external_session_id), f'regenerate_attempt': repr(self.regenerate_attempt), f'external_application_id': repr(self.external_application_id), f'unused_document_upload_ids': repr(
- self.unused_document_upload_ids), f'humanize_instructions': repr(self.humanize_instructions), f'conversation_warning': repr(self.conversation_warning), f'conversation_type': repr(self.conversation_type), f'metadata': repr(self.metadata), f'llm_display_name': repr(self.llm_display_name), f'llm_bot_icon': repr(self.llm_bot_icon), f'search_suggestions': repr(self.search_suggestions), f'chatllm_task_id': repr(self.chatllm_task_id), f'history': repr(self.history)}
+ repr_dict = {f'deployment_conversation_id': repr(self.deployment_conversation_id), f'name': repr(self.name), f'deployment_id': repr(self.deployment_id), f'owner_user_id': repr(self.owner_user_id), f'owner_org_id': repr(self.owner_org_id), f'created_at': repr(self.created_at), f'last_event_created_at': repr(self.last_event_created_at), f'has_history': repr(self.has_history), f'external_session_id': repr(self.external_session_id), f'regenerate_attempt': repr(self.regenerate_attempt), f'external_application_id': repr(self.external_application_id), f'unused_document_upload_ids': repr(self.unused_document_upload_ids), f'humanize_instructions': repr(self.humanize_instructions), f'conversation_warning': repr(self.conversation_warning), f'conversation_type': repr(self.conversation_type), f'metadata': repr(
+ self.metadata), f'llm_display_name': repr(self.llm_display_name), f'llm_bot_icon': repr(self.llm_bot_icon), f'search_suggestions': repr(self.search_suggestions), f'chatllm_task_id': repr(self.chatllm_task_id), f'conversation_status': repr(self.conversation_status), f'computer_status': repr(self.computer_status), f'filesystem_status': repr(self.filesystem_status), f'total_events': repr(self.total_events), f'contest_names': repr(self.contest_names), f'daemon_task_id': repr(self.daemon_task_id), f'parent_deployment_conversation_id': repr(self.parent_deployment_conversation_id), f'intro_message': repr(self.intro_message), f'preview_info': repr(self.preview_info), f'latest_context': repr(self.latest_context), f'share_type': repr(self.share_type), f'history': repr(self.history), f'hosted_artifacts': repr(self.hosted_artifacts)}
class_name = "DeploymentConversation"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -68,11 +98,11 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'deployment_conversation_id': self.deployment_conversation_id, 'name': self.name, 'deployment_id': self.deployment_id, 'created_at': self.created_at, 'last_event_created_at': self.last_event_created_at, 'has_history': self.has_history, 'external_session_id': self.external_session_id, 'regenerate_attempt': self.regenerate_attempt, 'external_application_id': self.external_application_id, 'unused_document_upload_ids':
- self.unused_document_upload_ids, 'humanize_instructions': self.humanize_instructions, 'conversation_warning': self.conversation_warning, 'conversation_type': self.conversation_type, 'metadata': self.metadata, 'llm_display_name': self.llm_display_name, 'llm_bot_icon': self.llm_bot_icon, 'search_suggestions': self.search_suggestions, 'chatllm_task_id': self.chatllm_task_id, 'history': self._get_attribute_as_dict(self.history)}
+ resp = {'deployment_conversation_id': self.deployment_conversation_id, 'name': self.name, 'deployment_id': self.deployment_id, 'owner_user_id': self.owner_user_id, 'owner_org_id': self.owner_org_id, 'created_at': self.created_at, 'last_event_created_at': self.last_event_created_at, 'has_history': self.has_history, 'external_session_id': self.external_session_id, 'regenerate_attempt': self.regenerate_attempt, 'external_application_id': self.external_application_id, 'unused_document_upload_ids': self.unused_document_upload_ids, 'humanize_instructions': self.humanize_instructions, 'conversation_warning': self.conversation_warning, 'conversation_type': self.conversation_type, 'metadata': self.metadata, 'llm_display_name': self.llm_display_name,
+ 'llm_bot_icon': self.llm_bot_icon, 'search_suggestions': self.search_suggestions, 'chatllm_task_id': self.chatllm_task_id, 'conversation_status': self.conversation_status, 'computer_status': self.computer_status, 'filesystem_status': self.filesystem_status, 'total_events': self.total_events, 'contest_names': self.contest_names, 'daemon_task_id': self.daemon_task_id, 'parent_deployment_conversation_id': self.parent_deployment_conversation_id, 'intro_message': self.intro_message, 'preview_info': self.preview_info, 'latest_context': self.latest_context, 'share_type': self.share_type, 'history': self._get_attribute_as_dict(self.history), 'hosted_artifacts': self._get_attribute_as_dict(self.hosted_artifacts)}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
- def get(self, external_session_id: str = None, deployment_id: str = None, filter_intermediate_conversation_events: bool = True, get_unused_document_uploads: bool = False):
+ def get(self, external_session_id: str = None, deployment_id: str = None, filter_intermediate_conversation_events: bool = True, get_unused_document_uploads: bool = False, start: int = None, limit: int = None, include_all_versions: bool = False):
"""
Gets a deployment conversation.
@@ -81,11 +111,23 @@ def get(self, external_session_id: str = None, deployment_id: str = None, filter
deployment_id (str): The deployment this conversation belongs to. This is required if not logged in.
filter_intermediate_conversation_events (bool): If true, intermediate conversation events will be filtered out. Default is true.
get_unused_document_uploads (bool): If true, unused document uploads will be returned. Default is false.
+ start (int): The start index of the conversation.
+ limit (int): The limit of the conversation.
+ include_all_versions (bool): If True, includes all versions of the last bot message for version switching functionality.
Returns:
DeploymentConversation: The deployment conversation.
"""
- return self.client.get_deployment_conversation(self.deployment_conversation_id, external_session_id, deployment_id, filter_intermediate_conversation_events, get_unused_document_uploads)
+ return self.client.get_deployment_conversation(self.deployment_conversation_id, external_session_id, deployment_id, filter_intermediate_conversation_events, get_unused_document_uploads, start, limit, include_all_versions)
+
+ def get_file(self, file_path: str):
+ """
+ Gets a deployment conversation file.
+
+ Args:
+ file_path (str): The path of the file to get.
+ """
+ return self.client.get_deployment_conversation_file(self.deployment_conversation_id, file_path)
def delete(self, deployment_id: str = None):
"""
diff --git a/abacusai/deployment_conversation_event.py b/abacusai/deployment_conversation_event.py
index 6fc6acd0e..e0f9587bf 100644
--- a/abacusai/deployment_conversation_event.py
+++ b/abacusai/deployment_conversation_event.py
@@ -32,16 +32,25 @@ class DeploymentConversationEvent(AbstractApiClass):
streamedSectionData (str): Aggregated streamed section outputs from the agent in a list.
highlights (dict): Chunks with bounding boxes for highlighting the result sources.
llmDisplayName (str): The display name of the LLM model used to generate the response. Only used for system-created bots.
+ deepagentMode (str): The Deep Agent run mode (e.g. normal or high) associated with the message.
llmBotIcon (str): The icon location of the LLM model used to generate the response. Only used for system-created bots.
formResponse (dict): Contains form data response from the user when a Form Segment is given out by the bot.
routedLlm (str): The LLM that was chosen by RouteLLM to generate the response.
computePointsUsed (int): The number of compute points used for the message.
computerFiles (list): The list of files that were created by the computer agent.
toolUseRequest (dict): The tool use request for the message.
+ toolUseResult (dict): The tool use response for the message.
verificationSummary (str): The summary of the verification process for the message.
+ attachedUserFileNames (list): The list of files attached by the user on the message.
+ oldFileContent (str): The content of the file associated with an edit tool request before the file was edited
+ allVersions (list): All versions of the last bot message for version switching functionality.
+ selectedVersionIndex (int): The index of the currently selected version of the last bot message.
+ totalVersions (int): The total number of versions available for the last bot message.
+ isLastMessage (bool): Whether this message is the last message in the conversation.
+ allowTaskAutomation (bool): Whether to show the automate task button. Only set for the last message in the conversation.
"""
- def __init__(self, client, role=None, text=None, timestamp=None, messageIndex=None, regenerateAttempt=None, modelVersion=None, searchResults=None, isUseful=None, feedback=None, feedbackType=None, docInfos=None, keywordArguments=None, inputParams=None, attachments=None, responseVersion=None, agentWorkflowNodeId=None, nextAgentWorkflowNodeId=None, chatType=None, agentResponse=None, error=None, segments=None, streamedData=None, streamedSectionData=None, highlights=None, llmDisplayName=None, llmBotIcon=None, formResponse=None, routedLlm=None, computePointsUsed=None, computerFiles=None, toolUseRequest=None, verificationSummary=None):
+ def __init__(self, client, role=None, text=None, timestamp=None, messageIndex=None, regenerateAttempt=None, modelVersion=None, searchResults=None, isUseful=None, feedback=None, feedbackType=None, docInfos=None, keywordArguments=None, inputParams=None, attachments=None, responseVersion=None, agentWorkflowNodeId=None, nextAgentWorkflowNodeId=None, chatType=None, agentResponse=None, error=None, segments=None, streamedData=None, streamedSectionData=None, highlights=None, llmDisplayName=None, deepagentMode=None, llmBotIcon=None, formResponse=None, routedLlm=None, computePointsUsed=None, computerFiles=None, toolUseRequest=None, toolUseResult=None, verificationSummary=None, attachedUserFileNames=None, oldFileContent=None, allVersions=None, selectedVersionIndex=None, totalVersions=None, isLastMessage=None, allowTaskAutomation=None):
super().__init__(client, None)
self.role = role
self.text = text
@@ -68,18 +77,27 @@ def __init__(self, client, role=None, text=None, timestamp=None, messageIndex=No
self.streamed_section_data = streamedSectionData
self.highlights = highlights
self.llm_display_name = llmDisplayName
+ self.deepagent_mode = deepagentMode
self.llm_bot_icon = llmBotIcon
self.form_response = formResponse
self.routed_llm = routedLlm
self.compute_points_used = computePointsUsed
self.computer_files = computerFiles
self.tool_use_request = toolUseRequest
+ self.tool_use_result = toolUseResult
self.verification_summary = verificationSummary
+ self.attached_user_file_names = attachedUserFileNames
+ self.old_file_content = oldFileContent
+ self.all_versions = allVersions
+ self.selected_version_index = selectedVersionIndex
+ self.total_versions = totalVersions
+ self.is_last_message = isLastMessage
+ self.allow_task_automation = allowTaskAutomation
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'role': repr(self.role), f'text': repr(self.text), f'timestamp': repr(self.timestamp), f'message_index': repr(self.message_index), f'regenerate_attempt': repr(self.regenerate_attempt), f'model_version': repr(self.model_version), f'search_results': repr(self.search_results), f'is_useful': repr(self.is_useful), f'feedback': repr(self.feedback), f'feedback_type': repr(self.feedback_type), f'doc_infos': repr(self.doc_infos), f'keyword_arguments': repr(self.keyword_arguments), f'input_params': repr(self.input_params), f'attachments': repr(self.attachments), f'response_version': repr(self.response_version), f'agent_workflow_node_id': repr(self.agent_workflow_node_id), f'next_agent_workflow_node_id': repr(
- self.next_agent_workflow_node_id), f'chat_type': repr(self.chat_type), f'agent_response': repr(self.agent_response), f'error': repr(self.error), f'segments': repr(self.segments), f'streamed_data': repr(self.streamed_data), f'streamed_section_data': repr(self.streamed_section_data), f'highlights': repr(self.highlights), f'llm_display_name': repr(self.llm_display_name), f'llm_bot_icon': repr(self.llm_bot_icon), f'form_response': repr(self.form_response), f'routed_llm': repr(self.routed_llm), f'compute_points_used': repr(self.compute_points_used), f'computer_files': repr(self.computer_files), f'tool_use_request': repr(self.tool_use_request), f'verification_summary': repr(self.verification_summary)}
+ repr_dict = {f'role': repr(self.role), f'text': repr(self.text), f'timestamp': repr(self.timestamp), f'message_index': repr(self.message_index), f'regenerate_attempt': repr(self.regenerate_attempt), f'model_version': repr(self.model_version), f'search_results': repr(self.search_results), f'is_useful': repr(self.is_useful), f'feedback': repr(self.feedback), f'feedback_type': repr(self.feedback_type), f'doc_infos': repr(self.doc_infos), f'keyword_arguments': repr(self.keyword_arguments), f'input_params': repr(self.input_params), f'attachments': repr(self.attachments), f'response_version': repr(self.response_version), f'agent_workflow_node_id': repr(self.agent_workflow_node_id), f'next_agent_workflow_node_id': repr(self.next_agent_workflow_node_id), f'chat_type': repr(self.chat_type), f'agent_response': repr(self.agent_response), f'error': repr(self.error), f'segments': repr(self.segments), f'streamed_data': repr(
+ self.streamed_data), f'streamed_section_data': repr(self.streamed_section_data), f'highlights': repr(self.highlights), f'llm_display_name': repr(self.llm_display_name), f'deepagent_mode': repr(self.deepagent_mode), f'llm_bot_icon': repr(self.llm_bot_icon), f'form_response': repr(self.form_response), f'routed_llm': repr(self.routed_llm), f'compute_points_used': repr(self.compute_points_used), f'computer_files': repr(self.computer_files), f'tool_use_request': repr(self.tool_use_request), f'tool_use_result': repr(self.tool_use_result), f'verification_summary': repr(self.verification_summary), f'attached_user_file_names': repr(self.attached_user_file_names), f'old_file_content': repr(self.old_file_content), f'all_versions': repr(self.all_versions), f'selected_version_index': repr(self.selected_version_index), f'total_versions': repr(self.total_versions), f'is_last_message': repr(self.is_last_message), f'allow_task_automation': repr(self.allow_task_automation)}
class_name = "DeploymentConversationEvent"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -92,6 +110,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'role': self.role, 'text': self.text, 'timestamp': self.timestamp, 'message_index': self.message_index, 'regenerate_attempt': self.regenerate_attempt, 'model_version': self.model_version, 'search_results': self.search_results, 'is_useful': self.is_useful, 'feedback': self.feedback, 'feedback_type': self.feedback_type, 'doc_infos': self.doc_infos, 'keyword_arguments': self.keyword_arguments, 'input_params': self.input_params, 'attachments': self.attachments, 'response_version': self.response_version, 'agent_workflow_node_id': self.agent_workflow_node_id, 'next_agent_workflow_node_id':
- self.next_agent_workflow_node_id, 'chat_type': self.chat_type, 'agent_response': self.agent_response, 'error': self.error, 'segments': self.segments, 'streamed_data': self.streamed_data, 'streamed_section_data': self.streamed_section_data, 'highlights': self.highlights, 'llm_display_name': self.llm_display_name, 'llm_bot_icon': self.llm_bot_icon, 'form_response': self.form_response, 'routed_llm': self.routed_llm, 'compute_points_used': self.compute_points_used, 'computer_files': self.computer_files, 'tool_use_request': self.tool_use_request, 'verification_summary': self.verification_summary}
+ resp = {'role': self.role, 'text': self.text, 'timestamp': self.timestamp, 'message_index': self.message_index, 'regenerate_attempt': self.regenerate_attempt, 'model_version': self.model_version, 'search_results': self.search_results, 'is_useful': self.is_useful, 'feedback': self.feedback, 'feedback_type': self.feedback_type, 'doc_infos': self.doc_infos, 'keyword_arguments': self.keyword_arguments, 'input_params': self.input_params, 'attachments': self.attachments, 'response_version': self.response_version, 'agent_workflow_node_id': self.agent_workflow_node_id, 'next_agent_workflow_node_id': self.next_agent_workflow_node_id, 'chat_type': self.chat_type, 'agent_response': self.agent_response, 'error': self.error, 'segments': self.segments, 'streamed_data': self.streamed_data,
+ 'streamed_section_data': self.streamed_section_data, 'highlights': self.highlights, 'llm_display_name': self.llm_display_name, 'deepagent_mode': self.deepagent_mode, 'llm_bot_icon': self.llm_bot_icon, 'form_response': self.form_response, 'routed_llm': self.routed_llm, 'compute_points_used': self.compute_points_used, 'computer_files': self.computer_files, 'tool_use_request': self.tool_use_request, 'tool_use_result': self.tool_use_result, 'verification_summary': self.verification_summary, 'attached_user_file_names': self.attached_user_file_names, 'old_file_content': self.old_file_content, 'all_versions': self.all_versions, 'selected_version_index': self.selected_version_index, 'total_versions': self.total_versions, 'is_last_message': self.is_last_message, 'allow_task_automation': self.allow_task_automation}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/document_retriever_lookup_result.py b/abacusai/document_retriever_lookup_result.py
index 648460075..f7dc64496 100644
--- a/abacusai/document_retriever_lookup_result.py
+++ b/abacusai/document_retriever_lookup_result.py
@@ -14,9 +14,10 @@ class DocumentRetrieverLookupResult(AbstractApiClass):
boundingBoxes (list): Bounding boxes of the retrieved text from the original document.
documentSource (str): Document source name.
imageIds (list): List of Image IDs for all the pages.
+ metadata (dict): Metadata column values for the retrieved documents.
"""
- def __init__(self, client, document=None, score=None, properties=None, pages=None, boundingBoxes=None, documentSource=None, imageIds=None):
+ def __init__(self, client, document=None, score=None, properties=None, pages=None, boundingBoxes=None, documentSource=None, imageIds=None, metadata=None):
super().__init__(client, None)
self.document = document
self.score = score
@@ -25,11 +26,12 @@ def __init__(self, client, document=None, score=None, properties=None, pages=Non
self.bounding_boxes = boundingBoxes
self.document_source = documentSource
self.image_ids = imageIds
+ self.metadata = metadata
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'document': repr(self.document), f'score': repr(self.score), f'properties': repr(self.properties), f'pages': repr(
- self.pages), f'bounding_boxes': repr(self.bounding_boxes), f'document_source': repr(self.document_source), f'image_ids': repr(self.image_ids)}
+ repr_dict = {f'document': repr(self.document), f'score': repr(self.score), f'properties': repr(self.properties), f'pages': repr(self.pages), f'bounding_boxes': repr(
+ self.bounding_boxes), f'document_source': repr(self.document_source), f'image_ids': repr(self.image_ids), f'metadata': repr(self.metadata)}
class_name = "DocumentRetrieverLookupResult"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -43,5 +45,5 @@ def to_dict(self):
dict: The dict value representation of the class parameters
"""
resp = {'document': self.document, 'score': self.score, 'properties': self.properties, 'pages': self.pages,
- 'bounding_boxes': self.bounding_boxes, 'document_source': self.document_source, 'image_ids': self.image_ids}
+ 'bounding_boxes': self.bounding_boxes, 'document_source': self.document_source, 'image_ids': self.image_ids, 'metadata': self.metadata}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/document_retriever_version_logs.py b/abacusai/document_retriever_version_logs.py
new file mode 100644
index 000000000..4d8722d47
--- /dev/null
+++ b/abacusai/document_retriever_version_logs.py
@@ -0,0 +1,33 @@
+from .return_class import AbstractApiClass
+
+
+class DocumentRetrieverVersionLogs(AbstractApiClass):
+ """
+ Logs from document retriever version.
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ logs (list[str]): List of logs from document retriever version.
+ """
+
+ def __init__(self, client, logs=None):
+ super().__init__(client, None)
+ self.logs = logs
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'logs': repr(self.logs)}
+ class_name = "DocumentRetrieverVersionLogs"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'logs': self.logs}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/domain_purchase_result.py b/abacusai/domain_purchase_result.py
new file mode 100644
index 000000000..8ab07d5a5
--- /dev/null
+++ b/abacusai/domain_purchase_result.py
@@ -0,0 +1,36 @@
+from .return_class import AbstractApiClass
+
+
+class DomainPurchaseResult(AbstractApiClass):
+ """
+ Domain purchase result
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ domain (str): The domain name being purchased.
+ billedCredits (int): The total credits used for the purchase.
+ """
+
+ def __init__(self, client, domain=None, billedCredits=None):
+ super().__init__(client, None)
+ self.domain = domain
+ self.billed_credits = billedCredits
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'domain': repr(
+ self.domain), f'billed_credits': repr(self.billed_credits)}
+ class_name = "DomainPurchaseResult"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'domain': self.domain, 'billed_credits': self.billed_credits}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/domain_search_result.py b/abacusai/domain_search_result.py
new file mode 100644
index 000000000..3af70ef93
--- /dev/null
+++ b/abacusai/domain_search_result.py
@@ -0,0 +1,43 @@
+from .return_class import AbstractApiClass
+
+
+class DomainSearchResult(AbstractApiClass):
+ """
+ Domain search result
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ domain (str): name of the domain in a search result
+ registerCredits (int): number of credits required for registration
+ registerYears (int): number of years the domain will get registered for
+ renewalCredits (int): number of credits required for renewal
+ renewalYears (int): number of years the domain will be renewed for
+ """
+
+ def __init__(self, client, domain=None, registerCredits=None, registerYears=None, renewalCredits=None, renewalYears=None):
+ super().__init__(client, None)
+ self.domain = domain
+ self.register_credits = registerCredits
+ self.register_years = registerYears
+ self.renewal_credits = renewalCredits
+ self.renewal_years = renewalYears
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'domain': repr(self.domain), f'register_credits': repr(self.register_credits), f'register_years': repr(
+ self.register_years), f'renewal_credits': repr(self.renewal_credits), f'renewal_years': repr(self.renewal_years)}
+ class_name = "DomainSearchResult"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'domain': self.domain, 'register_credits': self.register_credits, 'register_years': self.register_years,
+ 'renewal_credits': self.renewal_credits, 'renewal_years': self.renewal_years}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/edit_image_models.py b/abacusai/edit_image_models.py
new file mode 100644
index 000000000..0e87f7a46
--- /dev/null
+++ b/abacusai/edit_image_models.py
@@ -0,0 +1,36 @@
+from .return_class import AbstractApiClass
+
+
+class EditImageModels(AbstractApiClass):
+ """
+ Edit image models
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ models (list): The models available for edit image.
+ default (str): The default model for edit image.
+ """
+
+ def __init__(self, client, models=None, default=None):
+ super().__init__(client, None)
+ self.models = models
+ self.default = default
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'models': repr(self.models),
+ f'default': repr(self.default)}
+ class_name = "EditImageModels"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'models': self.models, 'default': self.default}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/entri_auth_token.py b/abacusai/entri_auth_token.py
new file mode 100644
index 000000000..039a6e995
--- /dev/null
+++ b/abacusai/entri_auth_token.py
@@ -0,0 +1,39 @@
+from .return_class import AbstractApiClass
+
+
+class EntriAuthToken(AbstractApiClass):
+ """
+ Entri Auth Token
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ token (str): The authentication token for Entri
+ applicationId (str): application Id from Entri dashboard
+ ttl (int): The duration in milliseconds for which the token is valid
+ """
+
+ def __init__(self, client, token=None, applicationId=None, ttl=None):
+ super().__init__(client, None)
+ self.token = token
+ self.application_id = applicationId
+ self.ttl = ttl
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'token': repr(self.token), f'application_id': repr(
+ self.application_id), f'ttl': repr(self.ttl)}
+ class_name = "EntriAuthToken"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'token': self.token,
+ 'application_id': self.application_id, 'ttl': self.ttl}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/external_application.py b/abacusai/external_application.py
index 365b59dfe..4543c918d 100644
--- a/abacusai/external_application.py
+++ b/abacusai/external_application.py
@@ -24,9 +24,14 @@ class ExternalApplication(AbstractApiClass):
isCustomizable (bool): Whether the external application is customizable.
isDeprecated (bool): Whether the external application is deprecated. Only applicable for system created bots. Deprecated external applications will not show in the UI.
isVisible (bool): Whether the external application should be shown in the dropdown.
+ hasThinkingOption (bool): Whether to show the thinking option in the toolbar.
+ hasFastMode (bool): Whether the external application has fast mode.
+ onlyImageGenEnabled (bool): Whether to LLM only allows image generation.
+ projectId (str): The project id associated with the external application.
+ isCodellmChatmodeSupported (bool): Whether the external application is codellm chatmode supported
"""
- def __init__(self, client, name=None, externalApplicationId=None, deploymentId=None, description=None, logo=None, theme=None, userGroupIds=None, useCase=None, isAgent=None, status=None, deploymentConversationRetentionHours=None, managedUserService=None, predictionOverrides=None, isSystemCreated=None, isCustomizable=None, isDeprecated=None, isVisible=None):
+ def __init__(self, client, name=None, externalApplicationId=None, deploymentId=None, description=None, logo=None, theme=None, userGroupIds=None, useCase=None, isAgent=None, status=None, deploymentConversationRetentionHours=None, managedUserService=None, predictionOverrides=None, isSystemCreated=None, isCustomizable=None, isDeprecated=None, isVisible=None, hasThinkingOption=None, hasFastMode=None, onlyImageGenEnabled=None, projectId=None, isCodellmChatmodeSupported=None):
super().__init__(client, externalApplicationId)
self.name = name
self.external_application_id = externalApplicationId
@@ -45,11 +50,16 @@ def __init__(self, client, name=None, externalApplicationId=None, deploymentId=N
self.is_customizable = isCustomizable
self.is_deprecated = isDeprecated
self.is_visible = isVisible
+ self.has_thinking_option = hasThinkingOption
+ self.has_fast_mode = hasFastMode
+ self.only_image_gen_enabled = onlyImageGenEnabled
+ self.project_id = projectId
+ self.is_codellm_chatmode_supported = isCodellmChatmodeSupported
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'name': repr(self.name), f'external_application_id': repr(self.external_application_id), f'deployment_id': repr(self.deployment_id), f'description': repr(self.description), f'logo': repr(self.logo), f'theme': repr(self.theme), f'user_group_ids': repr(self.user_group_ids), f'use_case': repr(self.use_case), f'is_agent': repr(self.is_agent), f'status': repr(
- self.status), f'deployment_conversation_retention_hours': repr(self.deployment_conversation_retention_hours), f'managed_user_service': repr(self.managed_user_service), f'prediction_overrides': repr(self.prediction_overrides), f'is_system_created': repr(self.is_system_created), f'is_customizable': repr(self.is_customizable), f'is_deprecated': repr(self.is_deprecated), f'is_visible': repr(self.is_visible)}
+ repr_dict = {f'name': repr(self.name), f'external_application_id': repr(self.external_application_id), f'deployment_id': repr(self.deployment_id), f'description': repr(self.description), f'logo': repr(self.logo), f'theme': repr(self.theme), f'user_group_ids': repr(self.user_group_ids), f'use_case': repr(self.use_case), f'is_agent': repr(self.is_agent), f'status': repr(self.status), f'deployment_conversation_retention_hours': repr(self.deployment_conversation_retention_hours), f'managed_user_service': repr(
+ self.managed_user_service), f'prediction_overrides': repr(self.prediction_overrides), f'is_system_created': repr(self.is_system_created), f'is_customizable': repr(self.is_customizable), f'is_deprecated': repr(self.is_deprecated), f'is_visible': repr(self.is_visible), f'has_thinking_option': repr(self.has_thinking_option), f'has_fast_mode': repr(self.has_fast_mode), f'only_image_gen_enabled': repr(self.only_image_gen_enabled), f'project_id': repr(self.project_id), f'is_codellm_chatmode_supported': repr(self.is_codellm_chatmode_supported)}
class_name = "ExternalApplication"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -62,10 +72,28 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'name': self.name, 'external_application_id': self.external_application_id, 'deployment_id': self.deployment_id, 'description': self.description, 'logo': self.logo, 'theme': self.theme, 'user_group_ids': self.user_group_ids, 'use_case': self.use_case, 'is_agent': self.is_agent, 'status': self.status,
- 'deployment_conversation_retention_hours': self.deployment_conversation_retention_hours, 'managed_user_service': self.managed_user_service, 'prediction_overrides': self.prediction_overrides, 'is_system_created': self.is_system_created, 'is_customizable': self.is_customizable, 'is_deprecated': self.is_deprecated, 'is_visible': self.is_visible}
+ resp = {'name': self.name, 'external_application_id': self.external_application_id, 'deployment_id': self.deployment_id, 'description': self.description, 'logo': self.logo, 'theme': self.theme, 'user_group_ids': self.user_group_ids, 'use_case': self.use_case, 'is_agent': self.is_agent, 'status': self.status, 'deployment_conversation_retention_hours': self.deployment_conversation_retention_hours, 'managed_user_service': self.managed_user_service,
+ 'prediction_overrides': self.prediction_overrides, 'is_system_created': self.is_system_created, 'is_customizable': self.is_customizable, 'is_deprecated': self.is_deprecated, 'is_visible': self.is_visible, 'has_thinking_option': self.has_thinking_option, 'has_fast_mode': self.has_fast_mode, 'only_image_gen_enabled': self.only_image_gen_enabled, 'project_id': self.project_id, 'is_codellm_chatmode_supported': self.is_codellm_chatmode_supported}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
+ def add_developers_to(self):
+ """
+ Adds a permission for the platform App User Group to access an External Application.
+
+ Args:
+ external_application_id (str): The ID of the External Application.
+ """
+ return self.client.add_developers_to_external_application(self.external_application_id)
+
+ def remove_developers_from(self):
+ """
+ Removes a permission for the platform App User Group to access an External Application.
+
+ Args:
+ external_application_id (str): The ID of the External Application.
+ """
+ return self.client.remove_developers_from_external_application(self.external_application_id)
+
def update(self, name: str = None, description: str = None, theme: dict = None, deployment_id: str = None, deployment_conversation_retention_hours: int = None, reset_retention_policy: bool = False):
"""
Updates an External Application.
diff --git a/abacusai/feature_group_version.py b/abacusai/feature_group_version.py
index bb29a3f4b..c415ac845 100644
--- a/abacusai/feature_group_version.py
+++ b/abacusai/feature_group_version.py
@@ -122,7 +122,7 @@ def export_to_database_connector(self, database_connector_id: str, object_name:
Args:
database_connector_id (str): Unique string identifier for the Database Connector to export to.
object_name (str): Name of the database object to write to.
- write_mode (str): Enum string indicating whether to use INSERT or UPSERT.
+ write_mode (str): Enum string indicating whether to use INSERT or UPSERT or REPLACE.
database_feature_mapping (dict): Key/value pair JSON object of "database connector column" -> "feature name" pairs.
id_column (str): Required if write_mode is UPSERT. Indicates which database column should be used as the lookup key.
additional_id_columns (list): For database connectors which support it, additional ID columns to use as a complex key for upserting.
diff --git a/abacusai/feature_mapping.py b/abacusai/feature_mapping.py
index 166fef942..f8a4a9e03 100644
--- a/abacusai/feature_mapping.py
+++ b/abacusai/feature_mapping.py
@@ -7,7 +7,7 @@ class FeatureMapping(AbstractApiClass):
Args:
client (ApiClient): An authenticated API Client instance
- featureMapping (str): The mapping of the feature. The possible values will be based on the project's use-case. See the (Use Case Documentation)[https://api.abacus.ai/app/help/useCases] for more details.
+ featureMapping (str): The mapping of the feature. The possible values will be based on the project's use-case. See the (Use Case Documentation)[https://api.abacus.ai/app/help/developer-platform/useCases] for more details.
featureName (str): The unique name of the feature.
"""
diff --git a/abacusai/file_connector.py b/abacusai/file_connector.py
index 6a56689da..07603ccdc 100644
--- a/abacusai/file_connector.py
+++ b/abacusai/file_connector.py
@@ -11,19 +11,21 @@ class FileConnector(AbstractApiClass):
verified (bool): `true` if the bucket has passed verification
writePermission (bool): `true` if Abacus.AI has permission to write to this bucket
authExpiresAt (str): The time when the file connector's auth expires, if applicable
+ createdAt (str): The timestamp at which the file connector was created
"""
- def __init__(self, client, bucket=None, verified=None, writePermission=None, authExpiresAt=None):
+ def __init__(self, client, bucket=None, verified=None, writePermission=None, authExpiresAt=None, createdAt=None):
super().__init__(client, None)
self.bucket = bucket
self.verified = verified
self.write_permission = writePermission
self.auth_expires_at = authExpiresAt
+ self.created_at = createdAt
self.deprecated_keys = {}
def __repr__(self):
repr_dict = {f'bucket': repr(self.bucket), f'verified': repr(self.verified), f'write_permission': repr(
- self.write_permission), f'auth_expires_at': repr(self.auth_expires_at)}
+ self.write_permission), f'auth_expires_at': repr(self.auth_expires_at), f'created_at': repr(self.created_at)}
class_name = "FileConnector"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -36,6 +38,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'bucket': self.bucket, 'verified': self.verified,
- 'write_permission': self.write_permission, 'auth_expires_at': self.auth_expires_at}
+ resp = {'bucket': self.bucket, 'verified': self.verified, 'write_permission': self.write_permission,
+ 'auth_expires_at': self.auth_expires_at, 'created_at': self.created_at}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/fs_entry.py b/abacusai/fs_entry.py
new file mode 100644
index 000000000..6104ce857
--- /dev/null
+++ b/abacusai/fs_entry.py
@@ -0,0 +1,47 @@
+from .return_class import AbstractApiClass
+
+
+class FsEntry(AbstractApiClass):
+ """
+ File system entry.
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ name (str): The name of the file/folder
+ type (str): The type of entry (file/folder)
+ path (str): The path of the entry
+ size (int): The size of the entry in bytes
+ modified (int): The last modified timestamp
+ isFolderEmpty (bool): Whether the folder is empty (only for folders)
+ children (list): List of child FSEntry objects (only for folders)
+ """
+
+ def __init__(self, client, name=None, type=None, path=None, size=None, modified=None, isFolderEmpty=None, children=None):
+ super().__init__(client, None)
+ self.name = name
+ self.type = type
+ self.path = path
+ self.size = size
+ self.modified = modified
+ self.isFolderEmpty = isFolderEmpty
+ self.children = children
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'name': repr(self.name), f'type': repr(self.type), f'path': repr(self.path), f'size': repr(
+ self.size), f'modified': repr(self.modified), f'isFolderEmpty': repr(self.isFolderEmpty), f'children': repr(self.children)}
+ class_name = "FsEntry"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'name': self.name, 'type': self.type, 'path': self.path, 'size': self.size,
+ 'modified': self.modified, 'isFolderEmpty': self.isFolderEmpty, 'children': self.children}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/hosted_app.py b/abacusai/hosted_app.py
new file mode 100644
index 000000000..c91937b83
--- /dev/null
+++ b/abacusai/hosted_app.py
@@ -0,0 +1,41 @@
+from .return_class import AbstractApiClass
+
+
+class HostedApp(AbstractApiClass):
+ """
+ Hosted App
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ hostedAppId (id): The ID of the hosted app
+ deploymentConversationId (id): The ID of the deployment conversation
+ name (str): The name of the hosted app
+ createdAt (str): The creation timestamp
+ """
+
+ def __init__(self, client, hostedAppId=None, deploymentConversationId=None, name=None, createdAt=None):
+ super().__init__(client, hostedAppId)
+ self.hosted_app_id = hostedAppId
+ self.deployment_conversation_id = deploymentConversationId
+ self.name = name
+ self.created_at = createdAt
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'hosted_app_id': repr(self.hosted_app_id), f'deployment_conversation_id': repr(
+ self.deployment_conversation_id), f'name': repr(self.name), f'created_at': repr(self.created_at)}
+ class_name = "HostedApp"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'hosted_app_id': self.hosted_app_id, 'deployment_conversation_id':
+ self.deployment_conversation_id, 'name': self.name, 'created_at': self.created_at}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/hosted_app_container.py b/abacusai/hosted_app_container.py
new file mode 100644
index 000000000..6c210ee49
--- /dev/null
+++ b/abacusai/hosted_app_container.py
@@ -0,0 +1,89 @@
+from .return_class import AbstractApiClass
+
+
+class HostedAppContainer(AbstractApiClass):
+ """
+ Hosted app + Deep agent container information.
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ hostedAppContainerId (id): The ID of the hosted app container
+ hostedAppId (id): The ID of the hosted app
+ deploymentConversationId (id): The deployment conversation ID
+ hostedAppVersion (id): The instance of the hosted app
+ name (str): The name of the hosted app
+ userId (id): The ID of the creation user
+ email (str): The email of the creation user
+ createdAt (str): Creation timestamp
+ updatedAt (str): Last update timestamp
+ containerImage (str): Container image name
+ route (str): Container route
+ appConfig (dict): App configuration
+ isDev (bool): Whether this is a dev container
+ isDeployable (bool): Can this version be deployed
+ isPreviewAvailable (bool): Is the dev preview available on the container
+ lifecycle (str): Container lifecycle status (PENDING/DEPLOYING/ACTIVE/FAILED/STOPPED/DELETING)
+ status (str): Container status (RUNNING/STOPPED/DEPLOYING/FAILED)
+ deployedStatus (str): Deployment status (PENDING/ACTIVE/STOPPED/NOT_DEPLOYED)
+ accessLevel (str): Access Level (PUBLIC/PRIVATE/DEDICATED)
+ hostname (str): Hostname of the deployed app
+ llmArtifactId (id): The ID of the LLM artifact
+ artifactType (str): The type of the artifact
+ deployedLlmArtifactId (id): The ID of the deployed LLM artifact
+ hasDatabase (bool): Whether the app has a database associated to it
+ hasStorage (bool): Whether the app has a cloud storage associated to it
+ webAppProjectId (id): The ID of the web app project
+ parentConversationId (id): The ID of the parent conversation
+ projectMetadata (dict): The metadata of the web app project
+ """
+
+ def __init__(self, client, hostedAppContainerId=None, hostedAppId=None, deploymentConversationId=None, hostedAppVersion=None, name=None, userId=None, email=None, createdAt=None, updatedAt=None, containerImage=None, route=None, appConfig=None, isDev=None, isDeployable=None, isPreviewAvailable=None, lifecycle=None, status=None, deployedStatus=None, accessLevel=None, hostname=None, llmArtifactId=None, artifactType=None, deployedLlmArtifactId=None, hasDatabase=None, hasStorage=None, webAppProjectId=None, parentConversationId=None, projectMetadata=None):
+ super().__init__(client, hostedAppContainerId)
+ self.hosted_app_container_id = hostedAppContainerId
+ self.hosted_app_id = hostedAppId
+ self.deployment_conversation_id = deploymentConversationId
+ self.hosted_app_version = hostedAppVersion
+ self.name = name
+ self.user_id = userId
+ self.email = email
+ self.created_at = createdAt
+ self.updated_at = updatedAt
+ self.container_image = containerImage
+ self.route = route
+ self.app_config = appConfig
+ self.is_dev = isDev
+ self.is_deployable = isDeployable
+ self.is_preview_available = isPreviewAvailable
+ self.lifecycle = lifecycle
+ self.status = status
+ self.deployed_status = deployedStatus
+ self.access_level = accessLevel
+ self.hostname = hostname
+ self.llm_artifact_id = llmArtifactId
+ self.artifact_type = artifactType
+ self.deployed_llm_artifact_id = deployedLlmArtifactId
+ self.has_database = hasDatabase
+ self.has_storage = hasStorage
+ self.web_app_project_id = webAppProjectId
+ self.parent_conversation_id = parentConversationId
+ self.project_metadata = projectMetadata
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'hosted_app_container_id': repr(self.hosted_app_container_id), f'hosted_app_id': repr(self.hosted_app_id), f'deployment_conversation_id': repr(self.deployment_conversation_id), f'hosted_app_version': repr(self.hosted_app_version), f'name': repr(self.name), f'user_id': repr(self.user_id), f'email': repr(self.email), f'created_at': repr(self.created_at), f'updated_at': repr(self.updated_at), f'container_image': repr(self.container_image), f'route': repr(self.route), f'app_config': repr(self.app_config), f'is_dev': repr(self.is_dev), f'is_deployable': repr(self.is_deployable), f'is_preview_available': repr(
+ self.is_preview_available), f'lifecycle': repr(self.lifecycle), f'status': repr(self.status), f'deployed_status': repr(self.deployed_status), f'access_level': repr(self.access_level), f'hostname': repr(self.hostname), f'llm_artifact_id': repr(self.llm_artifact_id), f'artifact_type': repr(self.artifact_type), f'deployed_llm_artifact_id': repr(self.deployed_llm_artifact_id), f'has_database': repr(self.has_database), f'has_storage': repr(self.has_storage), f'web_app_project_id': repr(self.web_app_project_id), f'parent_conversation_id': repr(self.parent_conversation_id), f'project_metadata': repr(self.project_metadata)}
+ class_name = "HostedAppContainer"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'hosted_app_container_id': self.hosted_app_container_id, 'hosted_app_id': self.hosted_app_id, 'deployment_conversation_id': self.deployment_conversation_id, 'hosted_app_version': self.hosted_app_version, 'name': self.name, 'user_id': self.user_id, 'email': self.email, 'created_at': self.created_at, 'updated_at': self.updated_at, 'container_image': self.container_image, 'route': self.route, 'app_config': self.app_config, 'is_dev': self.is_dev, 'is_deployable': self.is_deployable, 'is_preview_available': self.is_preview_available,
+ 'lifecycle': self.lifecycle, 'status': self.status, 'deployed_status': self.deployed_status, 'access_level': self.access_level, 'hostname': self.hostname, 'llm_artifact_id': self.llm_artifact_id, 'artifact_type': self.artifact_type, 'deployed_llm_artifact_id': self.deployed_llm_artifact_id, 'has_database': self.has_database, 'has_storage': self.has_storage, 'web_app_project_id': self.web_app_project_id, 'parent_conversation_id': self.parent_conversation_id, 'project_metadata': self.project_metadata}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/hosted_app_file_read.py b/abacusai/hosted_app_file_read.py
new file mode 100644
index 000000000..7c19a96eb
--- /dev/null
+++ b/abacusai/hosted_app_file_read.py
@@ -0,0 +1,41 @@
+from .return_class import AbstractApiClass
+
+
+class HostedAppFileRead(AbstractApiClass):
+ """
+ Result of reading file content from a hosted app container.
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ content (str): The contents of the file or a portion of it.
+ start (int): If present, the starting position of the read.
+ end (int): If present, the last position in the file returned in this read.
+ retcode (int): If the read is associated with a log the return code of the command.
+ """
+
+ def __init__(self, client, content=None, start=None, end=None, retcode=None):
+ super().__init__(client, None)
+ self.content = content
+ self.start = start
+ self.end = end
+ self.retcode = retcode
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'content': repr(self.content), f'start': repr(
+ self.start), f'end': repr(self.end), f'retcode': repr(self.retcode)}
+ class_name = "HostedAppFileRead"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'content': self.content, 'start': self.start,
+ 'end': self.end, 'retcode': self.retcode}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/hosted_artifact.py b/abacusai/hosted_artifact.py
new file mode 100644
index 000000000..b00792149
--- /dev/null
+++ b/abacusai/hosted_artifact.py
@@ -0,0 +1,47 @@
+from .return_class import AbstractApiClass
+
+
+class HostedArtifact(AbstractApiClass):
+ """
+ A hosted artifact being served by the platform.
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ hostname (str): The url at which the application is being hosted.
+ artifactType (str): The type of artifact being hosted.
+ llmArtifactId (str): The artifact id being hosted.
+ lifecycle (str): The lifecycle of the artifact.
+ externalApplicationId (str): Agent that deployed this application.
+ deploymentConversationId (str): Conversation that created deployed this artifact, null if not applicable.
+ conversationSequenceNumber (number(integer)): Conversation event associated with this artifact, null if not applicable.
+ """
+
+ def __init__(self, client, hostname=None, artifactType=None, llmArtifactId=None, lifecycle=None, externalApplicationId=None, deploymentConversationId=None, conversationSequenceNumber=None):
+ super().__init__(client, None)
+ self.hostname = hostname
+ self.artifact_type = artifactType
+ self.llm_artifact_id = llmArtifactId
+ self.lifecycle = lifecycle
+ self.external_application_id = externalApplicationId
+ self.deployment_conversation_id = deploymentConversationId
+ self.conversation_sequence_number = conversationSequenceNumber
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'hostname': repr(self.hostname), f'artifact_type': repr(self.artifact_type), f'llm_artifact_id': repr(self.llm_artifact_id), f'lifecycle': repr(self.lifecycle), f'external_application_id': repr(
+ self.external_application_id), f'deployment_conversation_id': repr(self.deployment_conversation_id), f'conversation_sequence_number': repr(self.conversation_sequence_number)}
+ class_name = "HostedArtifact"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'hostname': self.hostname, 'artifact_type': self.artifact_type, 'llm_artifact_id': self.llm_artifact_id, 'lifecycle': self.lifecycle, 'external_application_id':
+ self.external_application_id, 'deployment_conversation_id': self.deployment_conversation_id, 'conversation_sequence_number': self.conversation_sequence_number}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/hosted_database.py b/abacusai/hosted_database.py
new file mode 100644
index 000000000..30a6e2542
--- /dev/null
+++ b/abacusai/hosted_database.py
@@ -0,0 +1,43 @@
+from .return_class import AbstractApiClass
+
+
+class HostedDatabase(AbstractApiClass):
+ """
+ Hosted Database
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ hostedDatabaseId (id): The ID of the hosted database
+ displayName (str): The name of the hosted database
+ createdAt (str): The creation timestamp
+ updatedAt (str): The last update timestamp
+ lifecycle (str): The lifecycle of the hosted database
+ """
+
+ def __init__(self, client, hostedDatabaseId=None, displayName=None, createdAt=None, updatedAt=None, lifecycle=None):
+ super().__init__(client, hostedDatabaseId)
+ self.hosted_database_id = hostedDatabaseId
+ self.display_name = displayName
+ self.created_at = createdAt
+ self.updated_at = updatedAt
+ self.lifecycle = lifecycle
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'hosted_database_id': repr(self.hosted_database_id), f'display_name': repr(self.display_name), f'created_at': repr(
+ self.created_at), f'updated_at': repr(self.updated_at), f'lifecycle': repr(self.lifecycle)}
+ class_name = "HostedDatabase"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'hosted_database_id': self.hosted_database_id, 'display_name': self.display_name,
+ 'created_at': self.created_at, 'updated_at': self.updated_at, 'lifecycle': self.lifecycle}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/hosted_database_snapshot.py b/abacusai/hosted_database_snapshot.py
new file mode 100644
index 000000000..0f213004c
--- /dev/null
+++ b/abacusai/hosted_database_snapshot.py
@@ -0,0 +1,43 @@
+from .return_class import AbstractApiClass
+
+
+class HostedDatabaseSnapshot(AbstractApiClass):
+ """
+ Hosted Database Snapshot
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ hostedDatabaseSnapshotId (id): The ID of the hosted database snapshot
+ srcHostedDatabaseId (id): The ID of the source hosted database
+ createdAt (str): The creation timestamp
+ updatedAt (str): The last update timestamp
+ lifecycle (str): The lifecycle of the hosted database snapshot
+ """
+
+ def __init__(self, client, hostedDatabaseSnapshotId=None, srcHostedDatabaseId=None, createdAt=None, updatedAt=None, lifecycle=None):
+ super().__init__(client, hostedDatabaseSnapshotId)
+ self.hosted_database_snapshot_id = hostedDatabaseSnapshotId
+ self.src_hosted_database_id = srcHostedDatabaseId
+ self.created_at = createdAt
+ self.updated_at = updatedAt
+ self.lifecycle = lifecycle
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'hosted_database_snapshot_id': repr(self.hosted_database_snapshot_id), f'src_hosted_database_id': repr(
+ self.src_hosted_database_id), f'created_at': repr(self.created_at), f'updated_at': repr(self.updated_at), f'lifecycle': repr(self.lifecycle)}
+ class_name = "HostedDatabaseSnapshot"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'hosted_database_snapshot_id': self.hosted_database_snapshot_id, 'src_hosted_database_id': self.src_hosted_database_id,
+ 'created_at': self.created_at, 'updated_at': self.updated_at, 'lifecycle': self.lifecycle}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/hostname_info.py b/abacusai/hostname_info.py
new file mode 100644
index 000000000..2122d4149
--- /dev/null
+++ b/abacusai/hostname_info.py
@@ -0,0 +1,37 @@
+from .return_class import AbstractApiClass
+
+
+class HostnameInfo(AbstractApiClass):
+ """
+ Hostname Info
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ isRootDomain (bool): Whether the hostname is a root domain
+ hasRootNameserversConfigured (bool): Whether the root domain has Abacus nameservers configured.
+ """
+
+ def __init__(self, client, isRootDomain=None, hasRootNameserversConfigured=None):
+ super().__init__(client, None)
+ self.is_root_domain = isRootDomain
+ self.has_root_nameservers_configured = hasRootNameserversConfigured
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'is_root_domain': repr(self.is_root_domain), f'has_root_nameservers_configured': repr(
+ self.has_root_nameservers_configured)}
+ class_name = "HostnameInfo"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'is_root_domain': self.is_root_domain,
+ 'has_root_nameservers_configured': self.has_root_nameservers_configured}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/image_gen_model.py b/abacusai/image_gen_model.py
new file mode 100644
index 000000000..952a43649
--- /dev/null
+++ b/abacusai/image_gen_model.py
@@ -0,0 +1,48 @@
+from .image_gen_model_options import ImageGenModelOptions
+from .return_class import AbstractApiClass
+
+
+class ImageGenModel(AbstractApiClass):
+ """
+ Image generation model
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ displayName (str): The display name for the UI component.
+ type (str): The type of the UI component.
+ valueType (str): The data type of the values within the UI component.
+ optional (bool): Whether the selection of a value is optional.
+ default (str): The default value for the image generation model.
+ helptext (str): The helptext for the UI component.
+ options (ImageGenModelOptions): The options of models available for image generation.
+ """
+
+ def __init__(self, client, displayName=None, type=None, valueType=None, optional=None, default=None, helptext=None, options={}):
+ super().__init__(client, None)
+ self.display_name = displayName
+ self.type = type
+ self.value_type = valueType
+ self.optional = optional
+ self.default = default
+ self.helptext = helptext
+ self.options = client._build_class(ImageGenModelOptions, options)
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'display_name': repr(self.display_name), f'type': repr(self.type), f'value_type': repr(self.value_type), f'optional': repr(
+ self.optional), f'default': repr(self.default), f'helptext': repr(self.helptext), f'options': repr(self.options)}
+ class_name = "ImageGenModel"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'display_name': self.display_name, 'type': self.type, 'value_type': self.value_type, 'optional': self.optional,
+ 'default': self.default, 'helptext': self.helptext, 'options': self._get_attribute_as_dict(self.options)}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/image_gen_model_options.py b/abacusai/image_gen_model_options.py
new file mode 100644
index 000000000..c96d48df7
--- /dev/null
+++ b/abacusai/image_gen_model_options.py
@@ -0,0 +1,35 @@
+from .return_class import AbstractApiClass
+
+
+class ImageGenModelOptions(AbstractApiClass):
+ """
+ Image generation model options
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ keys (list): The keys of the image generation model options represented as the enum values.
+ values (list): The display names of the image generation model options.
+ """
+
+ def __init__(self, client, keys=None, values=None):
+ super().__init__(client, None)
+ self.keys = keys
+ self.values = values
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'keys': repr(self.keys), f'values': repr(self.values)}
+ class_name = "ImageGenModelOptions"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'keys': self.keys, 'values': self.values}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/image_gen_settings.py b/abacusai/image_gen_settings.py
index c26226e9e..df6048bbe 100644
--- a/abacusai/image_gen_settings.py
+++ b/abacusai/image_gen_settings.py
@@ -1,3 +1,4 @@
+from .image_gen_model import ImageGenModel
from .return_class import AbstractApiClass
@@ -7,19 +8,21 @@ class ImageGenSettings(AbstractApiClass):
Args:
client (ApiClient): An authenticated API Client instance
- model (dict): Dropdown for models available for image generation.
settings (dict): The settings for each model.
+ warnings (dict): The warnings for each model.
+ model (ImageGenModel): Dropdown for models available for image generation.
"""
- def __init__(self, client, model=None, settings=None):
+ def __init__(self, client, settings=None, warnings=None, model={}):
super().__init__(client, None)
- self.model = model
self.settings = settings
+ self.warnings = warnings
+ self.model = client._build_class(ImageGenModel, model)
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'model': repr(self.model),
- f'settings': repr(self.settings)}
+ repr_dict = {f'settings': repr(self.settings), f'warnings': repr(
+ self.warnings), f'model': repr(self.model)}
class_name = "ImageGenSettings"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -32,5 +35,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'model': self.model, 'settings': self.settings}
+ resp = {'settings': self.settings, 'warnings': self.warnings,
+ 'model': self._get_attribute_as_dict(self.model)}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/lip_sync_gen_settings.py b/abacusai/lip_sync_gen_settings.py
new file mode 100644
index 000000000..ef056fd41
--- /dev/null
+++ b/abacusai/lip_sync_gen_settings.py
@@ -0,0 +1,36 @@
+from .return_class import AbstractApiClass
+
+
+class LipSyncGenSettings(AbstractApiClass):
+ """
+ Lip sync generation settings
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ model (dict): The model settings.
+ settings (dict): The settings for each model.
+ """
+
+ def __init__(self, client, model=None, settings=None):
+ super().__init__(client, None)
+ self.model = model
+ self.settings = settings
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'model': repr(self.model),
+ f'settings': repr(self.settings)}
+ class_name = "LipSyncGenSettings"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'model': self.model, 'settings': self.settings}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/llm_artifact.py b/abacusai/llm_artifact.py
new file mode 100644
index 000000000..744c1d323
--- /dev/null
+++ b/abacusai/llm_artifact.py
@@ -0,0 +1,61 @@
+from .return_class import AbstractApiClass
+
+
+class LlmArtifact(AbstractApiClass):
+ """
+ LLM Artifact
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ llmArtifactId (id): The ID of the LLM artifact
+ info (dict): The info of the LLM artifact
+ description (str): The description of the LLM artifact
+ createdAt (str): The creation timestamp
+ webAppDeploymentId (id): The ID of the associated web app deployment
+ deploymentStatus (str): The status of the associated web app deployment
+ isLatest (bool): Whether it is the most recent version of the artifact
+ deploymentConversationId (id): The ID of the deployment conversation
+ webAppProjectId (id): The ID of the web app project
+ hasDatabase (bool): Whether the app associated with the artifact has a database
+ hasStorage (bool): Whether the app associated with the artifact has a storage
+ projectMetadata (dict): The metadata of the web app project
+ hasDatabaseSnapshot (bool): Whether the artifact database has a snapshot
+ supportsPreviewRestore (bool): Whether the artifact supports preview restore
+ """
+
+ def __init__(self, client, llmArtifactId=None, info=None, description=None, createdAt=None, webAppDeploymentId=None, deploymentStatus=None, isLatest=None, deploymentConversationId=None, webAppProjectId=None, hasDatabase=None, hasStorage=None, projectMetadata=None, hasDatabaseSnapshot=None, supportsPreviewRestore=None):
+ super().__init__(client, llmArtifactId)
+ self.llm_artifact_id = llmArtifactId
+ self.info = info
+ self.description = description
+ self.created_at = createdAt
+ self.web_app_deployment_id = webAppDeploymentId
+ self.deployment_status = deploymentStatus
+ self.is_latest = isLatest
+ self.deployment_conversation_id = deploymentConversationId
+ self.web_app_project_id = webAppProjectId
+ self.has_database = hasDatabase
+ self.has_storage = hasStorage
+ self.project_metadata = projectMetadata
+ self.has_database_snapshot = hasDatabaseSnapshot
+ self.supports_preview_restore = supportsPreviewRestore
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'llm_artifact_id': repr(self.llm_artifact_id), f'info': repr(self.info), f'description': repr(self.description), f'created_at': repr(self.created_at), f'web_app_deployment_id': repr(self.web_app_deployment_id), f'deployment_status': repr(self.deployment_status), f'is_latest': repr(self.is_latest), f'deployment_conversation_id': repr(
+ self.deployment_conversation_id), f'web_app_project_id': repr(self.web_app_project_id), f'has_database': repr(self.has_database), f'has_storage': repr(self.has_storage), f'project_metadata': repr(self.project_metadata), f'has_database_snapshot': repr(self.has_database_snapshot), f'supports_preview_restore': repr(self.supports_preview_restore)}
+ class_name = "LlmArtifact"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'llm_artifact_id': self.llm_artifact_id, 'info': self.info, 'description': self.description, 'created_at': self.created_at, 'web_app_deployment_id': self.web_app_deployment_id, 'deployment_status': self.deployment_status, 'is_latest': self.is_latest, 'deployment_conversation_id': self.deployment_conversation_id,
+ 'web_app_project_id': self.web_app_project_id, 'has_database': self.has_database, 'has_storage': self.has_storage, 'project_metadata': self.project_metadata, 'has_database_snapshot': self.has_database_snapshot, 'supports_preview_restore': self.supports_preview_restore}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/mcp_config.py b/abacusai/mcp_config.py
new file mode 100644
index 000000000..4718da66a
--- /dev/null
+++ b/abacusai/mcp_config.py
@@ -0,0 +1,33 @@
+from .return_class import AbstractApiClass
+
+
+class McpConfig(AbstractApiClass):
+ """
+ Model Context Protocol Config
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ mcpConfig (dict): The MCP configuration for the current user
+ """
+
+ def __init__(self, client, mcpConfig=None):
+ super().__init__(client, None)
+ self.mcp_config = mcpConfig
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'mcp_config': repr(self.mcp_config)}
+ class_name = "McpConfig"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'mcp_config': self.mcp_config}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/mcp_server.py b/abacusai/mcp_server.py
new file mode 100644
index 000000000..28da2a5a2
--- /dev/null
+++ b/abacusai/mcp_server.py
@@ -0,0 +1,49 @@
+from .return_class import AbstractApiClass
+
+
+class McpServer(AbstractApiClass):
+ """
+ Model Context Protocol Server
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ name (str): The name of the MCP server.
+ description (str): description of what the MCP server does.
+ envVars (list): list of api_keys or credentials required by the MCP server.
+ config (str): a json string containing the command and arguments for the MCP server.
+ envVarInstructions (str): instructions for the user to get the environment variables.
+ url (str): The url of the MCP server github repository or webpage.
+ isActive (bool): Whether the MCP server is active.
+ metadata (dict): additional information about the MCP server including github_stars, etc.
+ """
+
+ def __init__(self, client, name=None, description=None, envVars=None, config=None, envVarInstructions=None, url=None, isActive=None, metadata=None):
+ super().__init__(client, None)
+ self.name = name
+ self.description = description
+ self.env_vars = envVars
+ self.config = config
+ self.env_var_instructions = envVarInstructions
+ self.url = url
+ self.is_active = isActive
+ self.metadata = metadata
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'name': repr(self.name), f'description': repr(self.description), f'env_vars': repr(self.env_vars), f'config': repr(
+ self.config), f'env_var_instructions': repr(self.env_var_instructions), f'url': repr(self.url), f'is_active': repr(self.is_active), f'metadata': repr(self.metadata)}
+ class_name = "McpServer"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'name': self.name, 'description': self.description, 'env_vars': self.env_vars, 'config': self.config,
+ 'env_var_instructions': self.env_var_instructions, 'url': self.url, 'is_active': self.is_active, 'metadata': self.metadata}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/mcp_server_connection.py b/abacusai/mcp_server_connection.py
new file mode 100644
index 000000000..51b8e74f8
--- /dev/null
+++ b/abacusai/mcp_server_connection.py
@@ -0,0 +1,59 @@
+from .return_class import AbstractApiClass
+
+
+class McpServerConnection(AbstractApiClass):
+ """
+ Model Context Protocol Server Connection
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ mcpServerConnectionId (id): the id of the MCP server connection.
+ createdAt (str): the date and time the MCP server connection was created.
+ updatedAt (str): the date and time the MCP server connection was updated.
+ name (str): The name of the MCP server.
+ config (dict): a dictionary containing the config for the MCP server.
+ description (str): description of what the MCP server does.
+ transport (str): the transport type for the MCP server.
+ authType (str): the auth type for the MCP server.
+ externalConnectionId (id): the external connection id for the MCP server.
+ inactive (bool): whether the MCP server is inactive.
+ tools (list): the tools for the MCP server.
+ errorMsg (str): the error message for the MCP server.
+ metadata (dict): the metadata for the MCP server.
+ """
+
+ def __init__(self, client, mcpServerConnectionId=None, createdAt=None, updatedAt=None, name=None, config=None, description=None, transport=None, authType=None, externalConnectionId=None, inactive=None, tools=None, errorMsg=None, metadata=None):
+ super().__init__(client, mcpServerConnectionId)
+ self.mcp_server_connection_id = mcpServerConnectionId
+ self.created_at = createdAt
+ self.updated_at = updatedAt
+ self.name = name
+ self.config = config
+ self.description = description
+ self.transport = transport
+ self.auth_type = authType
+ self.external_connection_id = externalConnectionId
+ self.inactive = inactive
+ self.tools = tools
+ self.error_msg = errorMsg
+ self.metadata = metadata
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'mcp_server_connection_id': repr(self.mcp_server_connection_id), f'created_at': repr(self.created_at), f'updated_at': repr(self.updated_at), f'name': repr(self.name), f'config': repr(self.config), f'description': repr(self.description), f'transport': repr(
+ self.transport), f'auth_type': repr(self.auth_type), f'external_connection_id': repr(self.external_connection_id), f'inactive': repr(self.inactive), f'tools': repr(self.tools), f'error_msg': repr(self.error_msg), f'metadata': repr(self.metadata)}
+ class_name = "McpServerConnection"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'mcp_server_connection_id': self.mcp_server_connection_id, 'created_at': self.created_at, 'updated_at': self.updated_at, 'name': self.name, 'config': self.config, 'description': self.description,
+ 'transport': self.transport, 'auth_type': self.auth_type, 'external_connection_id': self.external_connection_id, 'inactive': self.inactive, 'tools': self.tools, 'error_msg': self.error_msg, 'metadata': self.metadata}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/mcp_server_query_result.py b/abacusai/mcp_server_query_result.py
new file mode 100644
index 000000000..02c28fe42
--- /dev/null
+++ b/abacusai/mcp_server_query_result.py
@@ -0,0 +1,33 @@
+from .return_class import AbstractApiClass
+
+
+class McpServerQueryResult(AbstractApiClass):
+ """
+ Result of a MCP server query
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ output (dict): The execution logs as well as the final output of the MCP server query
+ """
+
+ def __init__(self, client, output=None):
+ super().__init__(client, None)
+ self.output = output
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'output': repr(self.output)}
+ class_name = "McpServerQueryResult"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'output': self.output}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/model.py b/abacusai/model.py
index 60f672e20..24c6bf4e8 100644
--- a/abacusai/model.py
+++ b/abacusai/model.py
@@ -23,8 +23,6 @@ class Model(AbstractApiClass):
modelPredictionConfig (dict): The prediction config options for the model.
createdAt (str): Date and time at which the model was created.
projectId (str): The project this model belongs to.
- shared (bool): If model is shared to the Abacus.AI model showcase.
- sharedAt (str): The date and time at which the model was shared to the model showcase
trainFunctionName (str): Name of the function found in the source code that will be executed to train the model. It is not executed when this function is run.
predictFunctionName (str): Name of the function found in the source code that will be executed run predictions through model. It is not executed when this function is run.
predictManyFunctionName (str): Name of the function found in the source code that will be executed to run batch predictions trhough the model.
@@ -54,7 +52,7 @@ class Model(AbstractApiClass):
modelConfig (TrainingConfig): The training config options used to train this model.
"""
- def __init__(self, client, name=None, modelId=None, modelConfigType=None, modelPredictionConfig=None, createdAt=None, projectId=None, shared=None, sharedAt=None, trainFunctionName=None, predictFunctionName=None, predictManyFunctionName=None, initializeFunctionName=None, trainingInputTables=None, sourceCode=None, cpuSize=None, memory=None, trainingFeatureGroupIds=None, algorithmModelConfigs=None, trainingVectorStoreVersions=None, documentRetrievers=None, documentRetrieverIds=None, isPythonModel=None, defaultAlgorithm=None, customAlgorithmConfigs=None, restrictedAlgorithms=None, useGpu=None, notebookId=None, trainingRequired=None, location={}, refreshSchedules={}, codeSource={}, databaseConnector={}, dataLlmFeatureGroups={}, latestModelVersion={}, modelConfig={}):
+ def __init__(self, client, name=None, modelId=None, modelConfigType=None, modelPredictionConfig=None, createdAt=None, projectId=None, trainFunctionName=None, predictFunctionName=None, predictManyFunctionName=None, initializeFunctionName=None, trainingInputTables=None, sourceCode=None, cpuSize=None, memory=None, trainingFeatureGroupIds=None, algorithmModelConfigs=None, trainingVectorStoreVersions=None, documentRetrievers=None, documentRetrieverIds=None, isPythonModel=None, defaultAlgorithm=None, customAlgorithmConfigs=None, restrictedAlgorithms=None, useGpu=None, notebookId=None, trainingRequired=None, location={}, refreshSchedules={}, codeSource={}, databaseConnector={}, dataLlmFeatureGroups={}, latestModelVersion={}, modelConfig={}):
super().__init__(client, modelId)
self.name = name
self.model_id = modelId
@@ -62,8 +60,6 @@ def __init__(self, client, name=None, modelId=None, modelConfigType=None, modelP
self.model_prediction_config = modelPredictionConfig
self.created_at = createdAt
self.project_id = projectId
- self.shared = shared
- self.shared_at = sharedAt
self.train_function_name = trainFunctionName
self.predict_function_name = predictFunctionName
self.predict_many_function_name = predictManyFunctionName
@@ -99,8 +95,8 @@ def __init__(self, client, name=None, modelId=None, modelConfigType=None, modelP
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'name': repr(self.name), f'model_id': repr(self.model_id), f'model_config_type': repr(self.model_config_type), f'model_prediction_config': repr(self.model_prediction_config), f'created_at': repr(self.created_at), f'project_id': repr(self.project_id), f'shared': repr(self.shared), f'shared_at': repr(self.shared_at), f'train_function_name': repr(self.train_function_name), f'predict_function_name': repr(self.predict_function_name), f'predict_many_function_name': repr(self.predict_many_function_name), f'initialize_function_name': repr(self.initialize_function_name), f'training_input_tables': repr(self.training_input_tables), f'source_code': repr(self.source_code), f'cpu_size': repr(self.cpu_size), f'memory': repr(self.memory), f'training_feature_group_ids': repr(self.training_feature_group_ids), f'algorithm_model_configs': repr(self.algorithm_model_configs),
- f'training_vector_store_versions': repr(self.training_vector_store_versions), f'document_retrievers': repr(self.document_retrievers), f'document_retriever_ids': repr(self.document_retriever_ids), f'is_python_model': repr(self.is_python_model), f'default_algorithm': repr(self.default_algorithm), f'custom_algorithm_configs': repr(self.custom_algorithm_configs), f'restricted_algorithms': repr(self.restricted_algorithms), f'use_gpu': repr(self.use_gpu), f'notebook_id': repr(self.notebook_id), f'training_required': repr(self.training_required), f'location': repr(self.location), f'refresh_schedules': repr(self.refresh_schedules), f'code_source': repr(self.code_source), f'database_connector': repr(self.database_connector), f'data_llm_feature_groups': repr(self.data_llm_feature_groups), f'latest_model_version': repr(self.latest_model_version), f'model_config': repr(self.model_config)}
+ repr_dict = {f'name': repr(self.name), f'model_id': repr(self.model_id), f'model_config_type': repr(self.model_config_type), f'model_prediction_config': repr(self.model_prediction_config), f'created_at': repr(self.created_at), f'project_id': repr(self.project_id), f'train_function_name': repr(self.train_function_name), f'predict_function_name': repr(self.predict_function_name), f'predict_many_function_name': repr(self.predict_many_function_name), f'initialize_function_name': repr(self.initialize_function_name), f'training_input_tables': repr(self.training_input_tables), f'source_code': repr(self.source_code), f'cpu_size': repr(self.cpu_size), f'memory': repr(self.memory), f'training_feature_group_ids': repr(self.training_feature_group_ids), f'algorithm_model_configs': repr(self.algorithm_model_configs), f'training_vector_store_versions': repr(
+ self.training_vector_store_versions), f'document_retrievers': repr(self.document_retrievers), f'document_retriever_ids': repr(self.document_retriever_ids), f'is_python_model': repr(self.is_python_model), f'default_algorithm': repr(self.default_algorithm), f'custom_algorithm_configs': repr(self.custom_algorithm_configs), f'restricted_algorithms': repr(self.restricted_algorithms), f'use_gpu': repr(self.use_gpu), f'notebook_id': repr(self.notebook_id), f'training_required': repr(self.training_required), f'location': repr(self.location), f'refresh_schedules': repr(self.refresh_schedules), f'code_source': repr(self.code_source), f'database_connector': repr(self.database_connector), f'data_llm_feature_groups': repr(self.data_llm_feature_groups), f'latest_model_version': repr(self.latest_model_version), f'model_config': repr(self.model_config)}
class_name = "Model"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -113,7 +109,7 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'name': self.name, 'model_id': self.model_id, 'model_config_type': self.model_config_type, 'model_prediction_config': self.model_prediction_config, 'created_at': self.created_at, 'project_id': self.project_id, 'shared': self.shared, 'shared_at': self.shared_at, 'train_function_name': self.train_function_name, 'predict_function_name': self.predict_function_name, 'predict_many_function_name': self.predict_many_function_name, 'initialize_function_name': self.initialize_function_name, 'training_input_tables': self.training_input_tables, 'source_code': self.source_code, 'cpu_size': self.cpu_size, 'memory': self.memory, 'training_feature_group_ids': self.training_feature_group_ids, 'algorithm_model_configs': self.algorithm_model_configs, 'training_vector_store_versions': self.training_vector_store_versions, 'document_retrievers': self.document_retrievers,
+ resp = {'name': self.name, 'model_id': self.model_id, 'model_config_type': self.model_config_type, 'model_prediction_config': self.model_prediction_config, 'created_at': self.created_at, 'project_id': self.project_id, 'train_function_name': self.train_function_name, 'predict_function_name': self.predict_function_name, 'predict_many_function_name': self.predict_many_function_name, 'initialize_function_name': self.initialize_function_name, 'training_input_tables': self.training_input_tables, 'source_code': self.source_code, 'cpu_size': self.cpu_size, 'memory': self.memory, 'training_feature_group_ids': self.training_feature_group_ids, 'algorithm_model_configs': self.algorithm_model_configs, 'training_vector_store_versions': self.training_vector_store_versions, 'document_retrievers': self.document_retrievers,
'document_retriever_ids': self.document_retriever_ids, 'is_python_model': self.is_python_model, 'default_algorithm': self.default_algorithm, 'custom_algorithm_configs': self.custom_algorithm_configs, 'restricted_algorithms': self.restricted_algorithms, 'use_gpu': self.use_gpu, 'notebook_id': self.notebook_id, 'training_required': self.training_required, 'location': self._get_attribute_as_dict(self.location), 'refresh_schedules': self._get_attribute_as_dict(self.refresh_schedules), 'code_source': self._get_attribute_as_dict(self.code_source), 'database_connector': self._get_attribute_as_dict(self.database_connector), 'data_llm_feature_groups': self._get_attribute_as_dict(self.data_llm_feature_groups), 'latest_model_version': self._get_attribute_as_dict(self.latest_model_version), 'model_config': self._get_attribute_as_dict(self.model_config)}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/organization_external_application_settings.py b/abacusai/organization_external_application_settings.py
index a6261d6aa..64df1e3eb 100644
--- a/abacusai/organization_external_application_settings.py
+++ b/abacusai/organization_external_application_settings.py
@@ -11,19 +11,21 @@ class OrganizationExternalApplicationSettings(AbstractApiClass):
theme (dict): The theme used for External Applications in this org.
managedUserService (str): The external service that is managing the user accounts.
passwordsDisabled (bool): Whether or not passwords are disabled for this organization's domain.
+ externalServiceForRoles (str): The external service that is managing the user roles.
"""
- def __init__(self, client, logo=None, theme=None, managedUserService=None, passwordsDisabled=None):
+ def __init__(self, client, logo=None, theme=None, managedUserService=None, passwordsDisabled=None, externalServiceForRoles=None):
super().__init__(client, None)
self.logo = logo
self.theme = theme
self.managed_user_service = managedUserService
self.passwords_disabled = passwordsDisabled
+ self.external_service_for_roles = externalServiceForRoles
self.deprecated_keys = {}
def __repr__(self):
repr_dict = {f'logo': repr(self.logo), f'theme': repr(self.theme), f'managed_user_service': repr(
- self.managed_user_service), f'passwords_disabled': repr(self.passwords_disabled)}
+ self.managed_user_service), f'passwords_disabled': repr(self.passwords_disabled), f'external_service_for_roles': repr(self.external_service_for_roles)}
class_name = "OrganizationExternalApplicationSettings"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -36,6 +38,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'logo': self.logo, 'theme': self.theme, 'managed_user_service':
- self.managed_user_service, 'passwords_disabled': self.passwords_disabled}
+ resp = {'logo': self.logo, 'theme': self.theme, 'managed_user_service': self.managed_user_service,
+ 'passwords_disabled': self.passwords_disabled, 'external_service_for_roles': self.external_service_for_roles}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/organization_secret.py b/abacusai/organization_secret.py
index fae95a9b8..dc1ebfe83 100644
--- a/abacusai/organization_secret.py
+++ b/abacusai/organization_secret.py
@@ -10,18 +10,20 @@ class OrganizationSecret(AbstractApiClass):
secretKey (str): The key of the secret
value (str): The value of the secret
createdAt (str): The date and time when the secret was created, in ISO-8601 format.
+ metadata (dict): Additional metadata for the secret (e.g., web_app_project_ids).
"""
- def __init__(self, client, secretKey=None, value=None, createdAt=None):
+ def __init__(self, client, secretKey=None, value=None, createdAt=None, metadata=None):
super().__init__(client, None)
self.secret_key = secretKey
self.value = value
self.created_at = createdAt
+ self.metadata = metadata
self.deprecated_keys = {}
def __repr__(self):
repr_dict = {f'secret_key': repr(self.secret_key), f'value': repr(
- self.value), f'created_at': repr(self.created_at)}
+ self.value), f'created_at': repr(self.created_at), f'metadata': repr(self.metadata)}
class_name = "OrganizationSecret"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -34,6 +36,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'secret_key': self.secret_key,
- 'value': self.value, 'created_at': self.created_at}
+ resp = {'secret_key': self.secret_key, 'value': self.value,
+ 'created_at': self.created_at, 'metadata': self.metadata}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/prediction_client.py b/abacusai/prediction_client.py
index 03961c9b7..af5a9117c 100644
--- a/abacusai/prediction_client.py
+++ b/abacusai/prediction_client.py
@@ -345,7 +345,7 @@ def get_related_items(self, deployment_token: str, deployment_id: str, query_dat
deployment_id, deployment_token) if deployment_token else None
return self._call_api('getRelatedItems', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data, 'numItems': num_items, 'page': page, 'scalingFactors': scaling_factors, 'restrictItems': restrict_items, 'excludeItems': exclude_items}, server_override=prediction_url)
- def get_chat_response(self, deployment_token: str, deployment_id: str, messages: list, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None) -> Dict:
+ def get_chat_response(self, deployment_token: str, deployment_id: str, messages: list, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, user_info: dict = None) -> Dict:
"""Return a chat response which continues the conversation based on the input messages and search results.
Args:
@@ -361,7 +361,7 @@ def get_chat_response(self, deployment_token: str, deployment_id: str, messages:
chat_config (dict): A dictionary specifying the query chat config override."""
prediction_url = self._get_prediction_endpoint(
deployment_id, deployment_token) if deployment_token else None
- return self._call_api('getChatResponse', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'messages': messages, 'llmName': llm_name, 'numCompletionTokens': num_completion_tokens, 'systemMessage': system_message, 'temperature': temperature, 'filterKeyValues': filter_key_values, 'searchScoreCutoff': search_score_cutoff, 'chatConfig': chat_config}, server_override=prediction_url)
+ return self._call_api('getChatResponse', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'messages': messages, 'llmName': llm_name, 'numCompletionTokens': num_completion_tokens, 'systemMessage': system_message, 'temperature': temperature, 'filterKeyValues': filter_key_values, 'searchScoreCutoff': search_score_cutoff, 'chatConfig': chat_config, 'userInfo': user_info}, server_override=prediction_url)
def get_chat_response_with_binary_data(self, deployment_token: str, deployment_id: str, messages: list, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, attachments: None = None) -> Dict:
"""Return a chat response which continues the conversation based on the input messages and search results.
@@ -382,7 +382,7 @@ def get_chat_response_with_binary_data(self, deployment_token: str, deployment_i
deployment_id, deployment_token) if deployment_token else None
return self._call_api('getChatResponseWithBinaryData', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, data={'messages': json.dumps(messages) if (messages is not None and not isinstance(messages, str)) else messages, 'llmName': json.dumps(llm_name) if (llm_name is not None and not isinstance(llm_name, str)) else llm_name, 'numCompletionTokens': json.dumps(num_completion_tokens) if (num_completion_tokens is not None and not isinstance(num_completion_tokens, str)) else num_completion_tokens, 'systemMessage': json.dumps(system_message) if (system_message is not None and not isinstance(system_message, str)) else system_message, 'temperature': json.dumps(temperature) if (temperature is not None and not isinstance(temperature, str)) else temperature, 'filterKeyValues': json.dumps(filter_key_values) if (filter_key_values is not None and not isinstance(filter_key_values, str)) else filter_key_values, 'searchScoreCutoff': json.dumps(search_score_cutoff) if (search_score_cutoff is not None and not isinstance(search_score_cutoff, str)) else search_score_cutoff, 'chatConfig': json.dumps(chat_config) if (chat_config is not None and not isinstance(chat_config, str)) else chat_config}, files=attachments, server_override=prediction_url)
- def get_conversation_response(self, deployment_id: str, message: str, deployment_token: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, doc_infos: list = None) -> Dict:
+ def get_conversation_response(self, deployment_id: str, message: str, deployment_token: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, doc_infos: list = None, user_info: dict = None, execute_usercode_tool: bool = False) -> Dict:
"""Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).
Args:
@@ -398,10 +398,11 @@ def get_conversation_response(self, deployment_id: str, message: str, deployment
filter_key_values (dict): A dictionary mapping column names to a list of values to restrict the retrived search results.
search_score_cutoff (float): Cutoff for the document retriever score. Matching search results below this score will be ignored.
chat_config (dict): A dictionary specifiying the query chat config override.
- doc_infos (list): An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore."""
+ doc_infos (list): An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
+ execute_usercode_tool (bool): If True, will return the tool output in the response."""
prediction_url = self._get_prediction_endpoint(
deployment_id, deployment_token) if deployment_token else None
- return self._call_api('getConversationResponse', 'POST', query_params={'deploymentId': deployment_id, 'deploymentToken': deployment_token}, body={'message': message, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'llmName': llm_name, 'numCompletionTokens': num_completion_tokens, 'systemMessage': system_message, 'temperature': temperature, 'filterKeyValues': filter_key_values, 'searchScoreCutoff': search_score_cutoff, 'chatConfig': chat_config, 'docInfos': doc_infos}, server_override=prediction_url)
+ return self._call_api('getConversationResponse', 'POST', query_params={'deploymentId': deployment_id, 'deploymentToken': deployment_token}, body={'message': message, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'llmName': llm_name, 'numCompletionTokens': num_completion_tokens, 'systemMessage': system_message, 'temperature': temperature, 'filterKeyValues': filter_key_values, 'searchScoreCutoff': search_score_cutoff, 'chatConfig': chat_config, 'docInfos': doc_infos, 'userInfo': user_info, 'executeUsercodeTool': execute_usercode_tool}, server_override=prediction_url)
def get_conversation_response_with_binary_data(self, deployment_id: str, deployment_token: str, message: str, deployment_conversation_id: str = None, external_session_id: str = None, llm_name: str = None, num_completion_tokens: int = None, system_message: str = None, temperature: float = 0.0, filter_key_values: dict = None, search_score_cutoff: float = None, chat_config: dict = None, attachments: None = None) -> Dict:
"""Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).
@@ -424,6 +425,14 @@ def get_conversation_response_with_binary_data(self, deployment_id: str, deploym
deployment_id, deployment_token) if deployment_token else None
return self._call_api('getConversationResponseWithBinaryData', 'POST', query_params={'deploymentId': deployment_id, 'deploymentToken': deployment_token}, data={'message': json.dumps(message) if (message is not None and not isinstance(message, str)) else message, 'deploymentConversationId': json.dumps(deployment_conversation_id) if (deployment_conversation_id is not None and not isinstance(deployment_conversation_id, str)) else deployment_conversation_id, 'externalSessionId': json.dumps(external_session_id) if (external_session_id is not None and not isinstance(external_session_id, str)) else external_session_id, 'llmName': json.dumps(llm_name) if (llm_name is not None and not isinstance(llm_name, str)) else llm_name, 'numCompletionTokens': json.dumps(num_completion_tokens) if (num_completion_tokens is not None and not isinstance(num_completion_tokens, str)) else num_completion_tokens, 'systemMessage': json.dumps(system_message) if (system_message is not None and not isinstance(system_message, str)) else system_message, 'temperature': json.dumps(temperature) if (temperature is not None and not isinstance(temperature, str)) else temperature, 'filterKeyValues': json.dumps(filter_key_values) if (filter_key_values is not None and not isinstance(filter_key_values, str)) else filter_key_values, 'searchScoreCutoff': json.dumps(search_score_cutoff) if (search_score_cutoff is not None and not isinstance(search_score_cutoff, str)) else search_score_cutoff, 'chatConfig': json.dumps(chat_config) if (chat_config is not None and not isinstance(chat_config, str)) else chat_config}, files=attachments, server_override=prediction_url)
+ def get_deep_agent_response(self, message: str, deployment_conversation_id: str = None) -> Dict:
+ """Return a DeepAgent response with generated files if any, based on the input message.
+
+ Args:
+ message (str): The user's message/task for DeepAgent to complete
+ deployment_conversation_id (str): The unique identifier of a deployment conversation to continue. If not specified, a new one will be created."""
+ return self._proxy_request('getDeepAgentResponse', 'POST', query_params={}, body={'message': message, 'deploymentConversationId': deployment_conversation_id}, is_sync=True, timeout=3600)
+
def get_search_results(self, deployment_token: str, deployment_id: str, query_data: dict, num: int = 15) -> Dict:
"""Return the most relevant search results to the search query from the uploaded documents.
@@ -519,6 +528,30 @@ def get_alternative_assignments(self, deployment_token: str, deployment_id: str,
deployment_id, deployment_token) if deployment_token else None
return self._call_api('getAlternativeAssignments', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data, 'addConstraints': add_constraints, 'solveTimeLimitSeconds': solve_time_limit_seconds, 'bestAlternateOnly': best_alternate_only}, server_override=prediction_url)
+ def get_optimization_inputs_from_serialized(self, deployment_token: str, deployment_id: str, query_data: dict = None) -> Dict:
+ """Get assignments for given query, with new inputs
+
+ Args:
+ deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ deployment_id (str): The unique identifier of a deployment created under the project.
+ query_data (dict): a dictionary with various key: value pairs corresponding to various updated FGs in the FG tree, which we want to update to compute new top level FGs for online solve. (query data will be dict of names: serialized dataframes)"""
+ prediction_url = self._get_prediction_endpoint(
+ deployment_id, deployment_token) if deployment_token else None
+ return self._call_api('getOptimizationInputsFromSerialized', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data}, server_override=prediction_url)
+
+ def get_assignments_online_with_new_serialized_inputs(self, deployment_token: str, deployment_id: str, query_data: dict = None, solve_time_limit_seconds: float = None, optimality_gap_limit: float = None) -> Dict:
+ """Get assignments for given query, with new inputs
+
+ Args:
+ deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ deployment_id (str): The unique identifier of a deployment created under the project.
+ query_data (dict): a dictionary with assignment, constraint and constraint_equations_df (under these specific keys)
+ solve_time_limit_seconds (float): Maximum time in seconds to spend solving the query.
+ optimality_gap_limit (float): Optimality gap we want to come within, after which we accept the solution as valid. (0 means we only want an optimal solution). it is abs(best_solution_found - best_bound) / abs(best_solution_found)"""
+ prediction_url = self._get_prediction_endpoint(
+ deployment_id, deployment_token) if deployment_token else None
+ return self._call_api('getAssignmentsOnlineWithNewSerializedInputs', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data, 'solveTimeLimitSeconds': solve_time_limit_seconds, 'optimalityGapLimit': optimality_gap_limit}, server_override=prediction_url)
+
def check_constraints(self, deployment_token: str, deployment_id: str, query_data: dict) -> Dict:
"""Check for any constraints violated by the overrides.
@@ -647,29 +680,6 @@ def transfer_style(self, deployment_token: str, deployment_id: str, source_image
deployment_id, deployment_token) if deployment_token else None
return self._call_api('transferStyle', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, data={}, files={'sourceImage': source_image, 'styleImage': style_image}, streamable_response=True, server_override=prediction_url)
- def generate_image(self, deployment_token: str, deployment_id: str, query_data: dict) -> io.BytesIO:
- """Generate an image from text prompt.
-
- Args:
- deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
- deployment_id (str): A unique identifier to a deployment created under the project.
- query_data (dict): Specifies the text prompt. For example, {'prompt': 'a cat'}"""
- prediction_url = self._get_prediction_endpoint(
- deployment_id, deployment_token) if deployment_token else None
- return self._call_api('generateImage', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'queryData': query_data}, streamable_response=True, server_override=prediction_url)
-
- def execute_agent(self, deployment_token: str, deployment_id: str, arguments: list = None, keyword_arguments: dict = None) -> Dict:
- """Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
-
- Args:
- deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
- deployment_id (str): A unique string identifier for the deployment created under the project.
- arguments (list): Positional arguments to the agent execute function.
- keyword_arguments (dict): A dictionary where each 'key' represents the paramter name and its corresponding 'value' represents the value of that parameter for the agent execute function."""
- prediction_url = self._get_prediction_endpoint(
- deployment_id, deployment_token) if deployment_token else None
- return self._call_api('executeAgent', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'arguments': arguments, 'keywordArguments': keyword_arguments}, server_override=prediction_url, timeout=1500)
-
def get_matrix_agent_schema(self, deployment_token: str, deployment_id: str, query: str, doc_infos: list = None, deployment_conversation_id: str = None, external_session_id: str = None) -> Dict:
"""Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
@@ -684,23 +694,6 @@ def get_matrix_agent_schema(self, deployment_token: str, deployment_id: str, que
deployment_id, deployment_token) if deployment_token else None
return self._call_api('getMatrixAgentSchema', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'query': query, 'docInfos': doc_infos, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id}, server_override=prediction_url, timeout=1500)
- def execute_conversation_agent(self, deployment_token: str, deployment_id: str, arguments: list = None, keyword_arguments: dict = None, deployment_conversation_id: str = None, external_session_id: str = None, regenerate: bool = False, doc_infos: list = None, agent_workflow_node_id: str = None) -> Dict:
- """Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
-
- Args:
- deployment_token (str): The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
- deployment_id (str): A unique string identifier for the deployment created under the project.
- arguments (list): Positional arguments to the agent execute function.
- keyword_arguments (dict): A dictionary where each 'key' represents the paramter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
- deployment_conversation_id (str): A unique string identifier for the deployment conversation used for the conversation.
- external_session_id (str): A unique string identifier for the session used for the conversation. If both deployment_conversation_id and external_session_id are not provided, a new session will be created.
- regenerate (bool): If True, will regenerate the response from the last query.
- doc_infos (list): An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
- agent_workflow_node_id (str): An optional agent workflow node id to trigger agent execution from an intermediate node."""
- prediction_url = self._get_prediction_endpoint(
- deployment_id, deployment_token) if deployment_token else None
- return self._call_api('executeConversationAgent', 'POST', query_params={'deploymentToken': deployment_token, 'deploymentId': deployment_id}, body={'arguments': arguments, 'keywordArguments': keyword_arguments, 'deploymentConversationId': deployment_conversation_id, 'externalSessionId': external_session_id, 'regenerate': regenerate, 'docInfos': doc_infos, 'agentWorkflowNodeId': agent_workflow_node_id}, server_override=prediction_url)
-
def lookup_matches(self, deployment_token: str, deployment_id: str, data: str = None, filters: dict = None, num: int = None, result_columns: list = None, max_words: int = None, num_retrieval_margin_words: int = None, max_words_per_chunk: int = None, score_multiplier_column: str = None, min_score: float = None, required_phrases: list = None, filter_clause: str = None, crowding_limits: dict = None, include_text_search: bool = False) -> List[DocumentRetrieverLookupResult]:
"""Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
diff --git a/abacusai/presentation_export_result.py b/abacusai/presentation_export_result.py
new file mode 100644
index 000000000..edb6f463c
--- /dev/null
+++ b/abacusai/presentation_export_result.py
@@ -0,0 +1,37 @@
+from .return_class import AbstractApiClass
+
+
+class PresentationExportResult(AbstractApiClass):
+ """
+ Export Presentation
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ filePath (str): The path to the exported presentation
+ webViewLink (str): The web view link to the exported presentation (if applicable)
+ """
+
+ def __init__(self, client, filePath=None, webViewLink=None):
+ super().__init__(client, None)
+ self.file_path = filePath
+ self.web_view_link = webViewLink
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'file_path': repr(
+ self.file_path), f'web_view_link': repr(self.web_view_link)}
+ class_name = "PresentationExportResult"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'file_path': self.file_path,
+ 'web_view_link': self.web_view_link}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/private_web_app_deployment.py b/abacusai/private_web_app_deployment.py
new file mode 100644
index 000000000..a14a084d0
--- /dev/null
+++ b/abacusai/private_web_app_deployment.py
@@ -0,0 +1,57 @@
+from .return_class import AbstractApiClass
+
+
+class PrivateWebAppDeployment(AbstractApiClass):
+ """
+ Private web app deployment list.
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ hostname (str): The hostname of the web app deployment.
+ llmArtifactId (id): The ID of the LLM artifact.
+ hostedAppVersion (id): The version of the hosted app.
+ applicationType (str): The type of application.
+ lifecycle (str): The lifecycle of the web app deployment.
+ deploymentConversationId (id): The ID of the deployment conversation.
+ conversationName (str): The name of the conversation.
+ userId (id): The ID of the user who created the deployment.
+ email (str): The email of the user who created the deployment.
+ conversationType (str): The type of conversation.
+ source (str): The source of the conversation.
+ conversationCreatedAt (str): The timestamp when the conversation was created.
+ """
+
+ def __init__(self, client, hostname=None, llmArtifactId=None, hostedAppVersion=None, applicationType=None, lifecycle=None, deploymentConversationId=None, conversationName=None, userId=None, email=None, conversationType=None, source=None, conversationCreatedAt=None):
+ super().__init__(client, None)
+ self.hostname = hostname
+ self.llm_artifact_id = llmArtifactId
+ self.hosted_app_version = hostedAppVersion
+ self.application_type = applicationType
+ self.lifecycle = lifecycle
+ self.deployment_conversation_id = deploymentConversationId
+ self.conversation_name = conversationName
+ self.user_id = userId
+ self.email = email
+ self.conversation_type = conversationType
+ self.source = source
+ self.conversation_created_at = conversationCreatedAt
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'hostname': repr(self.hostname), f'llm_artifact_id': repr(self.llm_artifact_id), f'hosted_app_version': repr(self.hosted_app_version), f'application_type': repr(self.application_type), f'lifecycle': repr(self.lifecycle), f'deployment_conversation_id': repr(
+ self.deployment_conversation_id), f'conversation_name': repr(self.conversation_name), f'user_id': repr(self.user_id), f'email': repr(self.email), f'conversation_type': repr(self.conversation_type), f'source': repr(self.source), f'conversation_created_at': repr(self.conversation_created_at)}
+ class_name = "PrivateWebAppDeployment"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'hostname': self.hostname, 'llm_artifact_id': self.llm_artifact_id, 'hosted_app_version': self.hosted_app_version, 'application_type': self.application_type, 'lifecycle': self.lifecycle, 'deployment_conversation_id': self.deployment_conversation_id,
+ 'conversation_name': self.conversation_name, 'user_id': self.user_id, 'email': self.email, 'conversation_type': self.conversation_type, 'source': self.source, 'conversation_created_at': self.conversation_created_at}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/project.py b/abacusai/project.py
index 543dc54d5..a4301eb9d 100644
--- a/abacusai/project.py
+++ b/abacusai/project.py
@@ -154,6 +154,16 @@ def infer_feature_mappings(self, feature_group_id: str):
"""
return self.client.infer_feature_mappings(self.project_id, feature_group_id)
+ def add_scope_for_user(self, email: str, scope: list):
+ """
+ Add a user to a project.
+
+ Args:
+ email (str): The user's email.
+ scope (list): The list of project scopes.
+ """
+ return self.client.add_project_scope_for_user(self.project_id, email, scope)
+
def describe_feature_group(self, feature_group_id: str):
"""
Describe a feature group associated with a project
@@ -326,17 +336,17 @@ def create_model_monitor(self, prediction_feature_group_id: str, training_featur
"""
return self.client.create_model_monitor(self.project_id, prediction_feature_group_id, training_feature_group_id, name, refresh_schedule, target_value, target_value_bias, target_value_performance, feature_mappings, model_id, training_feature_mappings, feature_group_base_monitor_config, feature_group_comparison_monitor_config, exclude_interactive_performance_analysis, exclude_bias_analysis, exclude_performance_analysis, exclude_feature_drift_analysis, exclude_data_integrity_analysis)
- def list_model_monitors(self):
+ def list_model_monitors(self, limit: int = None):
"""
Retrieves the list of model monitors in the specified project.
Args:
- project_id (str): Unique string identifier associated with the project.
+ limit (int): Maximum number of model monitors to return. We'll have internal limit if not set.
Returns:
list[ModelMonitor]: A list of model monitors.
"""
- return self.client.list_model_monitors(self.project_id)
+ return self.client.list_model_monitors(self.project_id, limit)
def create_vision_drift_monitor(self, prediction_feature_group_id: str, training_feature_group_id: str, name: str, feature_mappings: dict, training_feature_mappings: dict, target_value_performance: str = None, refresh_schedule: str = None):
"""
@@ -535,17 +545,17 @@ def list_refresh_policies(self, dataset_ids: List = [], feature_group_id: str =
"""
return self.client.list_refresh_policies(self.project_id, dataset_ids, feature_group_id, model_ids, deployment_ids, batch_prediction_ids, model_monitor_ids, notebook_ids)
- def list_batch_predictions(self):
+ def list_batch_predictions(self, limit: int = None):
"""
Retrieves a list of batch predictions in the project.
Args:
- project_id (str): Unique string identifier of the project.
+ limit (int): Maximum number of batch predictions to return. We'll have internal limit if not set.
Returns:
list[BatchPrediction]: List of batch prediction jobs.
"""
- return self.client.list_batch_predictions(self.project_id)
+ return self.client.list_batch_predictions(self.project_id, limit)
def list_pipelines(self):
"""
diff --git a/abacusai/python_function.py b/abacusai/python_function.py
index b6390741f..af9e8ea02 100644
--- a/abacusai/python_function.py
+++ b/abacusai/python_function.py
@@ -21,10 +21,12 @@ class PythonFunction(AbstractApiClass):
packageRequirements (list): The pip package dependencies required to run the code
description (str): Description of the Python function.
examples (dict[str, list[str]]): Dictionary containing example use cases and anti-patterns. Includes 'positive' examples showing recommended usage and 'negative' examples showing cases to avoid.
+ connectors (dict): Dictionary containing user-level and organization-level connectors
+ configurations (dict): Dictionary containing configurations for the Python function
codeSource (CodeSource): Information about the source code of the Python function.
"""
- def __init__(self, client, notebookId=None, name=None, createdAt=None, functionVariableMappings=None, outputVariableMappings=None, functionName=None, pythonFunctionId=None, functionType=None, packageRequirements=None, description=None, examples=None, codeSource={}):
+ def __init__(self, client, notebookId=None, name=None, createdAt=None, functionVariableMappings=None, outputVariableMappings=None, functionName=None, pythonFunctionId=None, functionType=None, packageRequirements=None, description=None, examples=None, connectors=None, configurations=None, codeSource={}):
super().__init__(client, pythonFunctionId)
self.notebook_id = notebookId
self.name = name
@@ -37,12 +39,14 @@ def __init__(self, client, notebookId=None, name=None, createdAt=None, functionV
self.package_requirements = packageRequirements
self.description = description
self.examples = examples
+ self.connectors = connectors
+ self.configurations = configurations
self.code_source = client._build_class(CodeSource, codeSource)
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'notebook_id': repr(self.notebook_id), f'name': repr(self.name), f'created_at': repr(self.created_at), f'function_variable_mappings': repr(self.function_variable_mappings), f'output_variable_mappings': repr(self.output_variable_mappings), f'function_name': repr(
- self.function_name), f'python_function_id': repr(self.python_function_id), f'function_type': repr(self.function_type), f'package_requirements': repr(self.package_requirements), f'description': repr(self.description), f'examples': repr(self.examples), f'code_source': repr(self.code_source)}
+ repr_dict = {f'notebook_id': repr(self.notebook_id), f'name': repr(self.name), f'created_at': repr(self.created_at), f'function_variable_mappings': repr(self.function_variable_mappings), f'output_variable_mappings': repr(self.output_variable_mappings), f'function_name': repr(self.function_name), f'python_function_id': repr(
+ self.python_function_id), f'function_type': repr(self.function_type), f'package_requirements': repr(self.package_requirements), f'description': repr(self.description), f'examples': repr(self.examples), f'connectors': repr(self.connectors), f'configurations': repr(self.configurations), f'code_source': repr(self.code_source)}
class_name = "PythonFunction"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -55,8 +59,8 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'notebook_id': self.notebook_id, 'name': self.name, 'created_at': self.created_at, 'function_variable_mappings': self.function_variable_mappings, 'output_variable_mappings': self.output_variable_mappings, 'function_name': self.function_name,
- 'python_function_id': self.python_function_id, 'function_type': self.function_type, 'package_requirements': self.package_requirements, 'description': self.description, 'examples': self.examples, 'code_source': self._get_attribute_as_dict(self.code_source)}
+ resp = {'notebook_id': self.notebook_id, 'name': self.name, 'created_at': self.created_at, 'function_variable_mappings': self.function_variable_mappings, 'output_variable_mappings': self.output_variable_mappings, 'function_name': self.function_name, 'python_function_id': self.python_function_id,
+ 'function_type': self.function_type, 'package_requirements': self.package_requirements, 'description': self.description, 'examples': self.examples, 'connectors': self.connectors, 'configurations': self.configurations, 'code_source': self._get_attribute_as_dict(self.code_source)}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
def add_graph_to_dashboard(self, graph_dashboard_id: str, function_variable_mappings: List = None, name: str = None):
diff --git a/abacusai/regenerate_llm_external_application.py b/abacusai/regenerate_llm_external_application.py
index 009a6f93c..71538bb26 100644
--- a/abacusai/regenerate_llm_external_application.py
+++ b/abacusai/regenerate_llm_external_application.py
@@ -7,19 +7,21 @@ class RegenerateLlmExternalApplication(AbstractApiClass):
Args:
client (ApiClient): An authenticated API Client instance
- name (str): The external name of the LLM.
- externalApplicationId (str): The unique identifier of the external application.
+ name (str): The display name of the LLM (ie GPT-5)
+ llmBotIcon (str): The bot icon of the LLM.
+ llmName (str): The external name of the LLM (ie OPENAI_GPT5)
"""
- def __init__(self, client, name=None, externalApplicationId=None):
+ def __init__(self, client, name=None, llmBotIcon=None, llmName=None):
super().__init__(client, None)
self.name = name
- self.external_application_id = externalApplicationId
+ self.llm_bot_icon = llmBotIcon
+ self.llm_name = llmName
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'name': repr(self.name), f'external_application_id': repr(
- self.external_application_id)}
+ repr_dict = {f'name': repr(self.name), f'llm_bot_icon': repr(
+ self.llm_bot_icon), f'llm_name': repr(self.llm_name)}
class_name = "RegenerateLlmExternalApplication"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -32,6 +34,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'name': self.name,
- 'external_application_id': self.external_application_id}
+ resp = {'name': self.name, 'llm_bot_icon': self.llm_bot_icon,
+ 'llm_name': self.llm_name}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/regenerate_llm_option.py b/abacusai/regenerate_llm_option.py
new file mode 100644
index 000000000..66efc9866
--- /dev/null
+++ b/abacusai/regenerate_llm_option.py
@@ -0,0 +1,39 @@
+from .return_class import AbstractApiClass
+
+
+class RegenerateLlmOption(AbstractApiClass):
+ """
+ An option that specifies an LLM user can regenerate with in RouteLLM.
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ name (str): The display name of the LLM (ie GPT-5)
+ llmBotIcon (str): The bot icon of the LLM.
+ llmName (str): The external name of the LLM (ie OPENAI_GPT5)
+ """
+
+ def __init__(self, client, name=None, llmBotIcon=None, llmName=None):
+ super().__init__(client, None)
+ self.name = name
+ self.llm_bot_icon = llmBotIcon
+ self.llm_name = llmName
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'name': repr(self.name), f'llm_bot_icon': repr(
+ self.llm_bot_icon), f'llm_name': repr(self.llm_name)}
+ class_name = "RegenerateLlmOption"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'name': self.name, 'llm_bot_icon': self.llm_bot_icon,
+ 'llm_name': self.llm_name}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/schema.py b/abacusai/schema.py
index 93a0f6f29..ff2755c9a 100644
--- a/abacusai/schema.py
+++ b/abacusai/schema.py
@@ -10,7 +10,7 @@ class Schema(AbstractApiClass):
Args:
client (ApiClient): An authenticated API Client instance
name (str): The unique name of the feature.
- featureMapping (str): The mapping of the feature. The possible values will be based on the project's use-case. See the (Use Case Documentation)[https://api.abacus.ai/app/help/useCases] for more details.
+ featureMapping (str): The mapping of the feature. The possible values will be based on the project's use-case. See the (Use Case Documentation)[https://api.abacus.ai/app/help/developer-platform/useCases] for more details.
detectedFeatureMapping (str): Detected feature mapping for this feature
featureType (str): The underlying data type of each feature: CATEGORICAL, CATEGORICAL_LIST, NUMERICAL, TIMESTAMP, TEXT, EMAIL, LABEL_LIST, ENTITY_LABEL_LIST, PAGE_LABEL_LIST, JSON, OBJECT_REFERENCE, MULTICATEGORICAL_LIST, COORDINATE_LIST, NUMERICAL_LIST, TIMESTAMP_LIST, ZIPCODE, URL, PAGE_INFOS, PAGES_DOCUMENT, TOKENS_DOCUMENT, MESSAGE_LIST.
detectedFeatureType (str): The detected feature type for this feature
diff --git a/abacusai/feature_record.py b/abacusai/session_summary.py
similarity index 70%
rename from abacusai/feature_record.py
rename to abacusai/session_summary.py
index 19c6e43a7..a5facfd51 100644
--- a/abacusai/feature_record.py
+++ b/abacusai/session_summary.py
@@ -1,23 +1,23 @@
from .return_class import AbstractApiClass
-class FeatureRecord(AbstractApiClass):
+class SessionSummary(AbstractApiClass):
"""
- A feature record
+ A session summary
Args:
client (ApiClient): An authenticated API Client instance
- data (dict): the record's current data
+ summary (str): The summary of the session.
"""
- def __init__(self, client, data=None):
+ def __init__(self, client, summary=None):
super().__init__(client, None)
- self.data = data
+ self.summary = summary
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'data': repr(self.data)}
- class_name = "FeatureRecord"
+ repr_dict = {f'summary': repr(self.summary)}
+ class_name = "SessionSummary"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
return f"{class_name}({repr_str})"
@@ -29,5 +29,5 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'data': self.data}
+ resp = {'summary': self.summary}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/session_transcript.py b/abacusai/session_transcript.py
new file mode 100644
index 000000000..5bd3e3ead
--- /dev/null
+++ b/abacusai/session_transcript.py
@@ -0,0 +1,35 @@
+from .return_class import AbstractApiClass
+
+
+class SessionTranscript(AbstractApiClass):
+ """
+ A session transcript
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ role (str): The role of the transcript.
+ content (str): The content of the transcript.
+ """
+
+ def __init__(self, client, role=None, content=None):
+ super().__init__(client, None)
+ self.role = role
+ self.content = content
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'role': repr(self.role), f'content': repr(self.content)}
+ class_name = "SessionTranscript"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'role': self.role, 'content': self.content}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/session_transcripts.py b/abacusai/session_transcripts.py
new file mode 100644
index 000000000..6c2ddadb8
--- /dev/null
+++ b/abacusai/session_transcripts.py
@@ -0,0 +1,33 @@
+from .return_class import AbstractApiClass
+
+
+class SessionTranscripts(AbstractApiClass):
+ """
+ A list of session transcripts
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ transcripts (list[sessiontranscript]): A list of session transcripts.
+ """
+
+ def __init__(self, client, transcripts=None):
+ super().__init__(client, None)
+ self.transcripts = transcripts
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'transcripts': repr(self.transcripts)}
+ class_name = "SessionTranscripts"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'transcripts': self.transcripts}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/sts_gen_settings.py b/abacusai/sts_gen_settings.py
new file mode 100644
index 000000000..6b79a3dc0
--- /dev/null
+++ b/abacusai/sts_gen_settings.py
@@ -0,0 +1,36 @@
+from .return_class import AbstractApiClass
+
+
+class StsGenSettings(AbstractApiClass):
+ """
+ STS generation settings
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ model (dict): The model settings.
+ settings (dict): The settings for each model.
+ """
+
+ def __init__(self, client, model=None, settings=None):
+ super().__init__(client, None)
+ self.model = model
+ self.settings = settings
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'model': repr(self.model),
+ f'settings': repr(self.settings)}
+ class_name = "StsGenSettings"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'model': self.model, 'settings': self.settings}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/stt_gen_model.py b/abacusai/stt_gen_model.py
new file mode 100644
index 000000000..f2bcbdfa1
--- /dev/null
+++ b/abacusai/stt_gen_model.py
@@ -0,0 +1,48 @@
+from .return_class import AbstractApiClass
+from .stt_gen_model_options import SttGenModelOptions
+
+
+class SttGenModel(AbstractApiClass):
+ """
+ STT generation model
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ displayName (str): The display name for the UI component.
+ type (str): The type of the UI component.
+ valueType (str): The data type of the values within the UI component.
+ optional (bool): Whether the selection of a value is optional.
+ default (str): The default value for the STT generation model.
+ helptext (str): The helptext for the UI component.
+ options (SttGenModelOptions): The options of models available for STT generation.
+ """
+
+ def __init__(self, client, displayName=None, type=None, valueType=None, optional=None, default=None, helptext=None, options={}):
+ super().__init__(client, None)
+ self.display_name = displayName
+ self.type = type
+ self.value_type = valueType
+ self.optional = optional
+ self.default = default
+ self.helptext = helptext
+ self.options = client._build_class(SttGenModelOptions, options)
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'display_name': repr(self.display_name), f'type': repr(self.type), f'value_type': repr(self.value_type), f'optional': repr(
+ self.optional), f'default': repr(self.default), f'helptext': repr(self.helptext), f'options': repr(self.options)}
+ class_name = "SttGenModel"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'display_name': self.display_name, 'type': self.type, 'value_type': self.value_type, 'optional': self.optional,
+ 'default': self.default, 'helptext': self.helptext, 'options': self._get_attribute_as_dict(self.options)}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/stt_gen_model_options.py b/abacusai/stt_gen_model_options.py
new file mode 100644
index 000000000..f0f023fdf
--- /dev/null
+++ b/abacusai/stt_gen_model_options.py
@@ -0,0 +1,35 @@
+from .return_class import AbstractApiClass
+
+
+class SttGenModelOptions(AbstractApiClass):
+ """
+ STT generation model options
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ keys (list): The keys of the image generation model options represented as the enum values.
+ values (list): The display names of the image generation model options.
+ """
+
+ def __init__(self, client, keys=None, values=None):
+ super().__init__(client, None)
+ self.keys = keys
+ self.values = values
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'keys': repr(self.keys), f'values': repr(self.values)}
+ class_name = "SttGenModelOptions"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'keys': self.keys, 'values': self.values}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/stt_gen_settings.py b/abacusai/stt_gen_settings.py
new file mode 100644
index 000000000..3dbf96a5a
--- /dev/null
+++ b/abacusai/stt_gen_settings.py
@@ -0,0 +1,38 @@
+from .return_class import AbstractApiClass
+from .stt_gen_model import SttGenModel
+
+
+class SttGenSettings(AbstractApiClass):
+ """
+ STT generation settings
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ settings (dict): The settings for each model.
+ model (SttGenModel): Dropdown for models available for STT generation.
+ """
+
+ def __init__(self, client, settings=None, model={}):
+ super().__init__(client, None)
+ self.settings = settings
+ self.model = client._build_class(SttGenModel, model)
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'settings': repr(
+ self.settings), f'model': repr(self.model)}
+ class_name = "SttGenSettings"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'settings': self.settings,
+ 'model': self._get_attribute_as_dict(self.model)}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/tts_gen_settings.py b/abacusai/tts_gen_settings.py
new file mode 100644
index 000000000..ba45aa98d
--- /dev/null
+++ b/abacusai/tts_gen_settings.py
@@ -0,0 +1,36 @@
+from .return_class import AbstractApiClass
+
+
+class TtsGenSettings(AbstractApiClass):
+ """
+ TTS generation settings
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ model (dict): The model settings.
+ settings (dict): The settings for each model.
+ """
+
+ def __init__(self, client, model=None, settings=None):
+ super().__init__(client, None)
+ self.model = model
+ self.settings = settings
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'model': repr(self.model),
+ f'settings': repr(self.settings)}
+ class_name = "TtsGenSettings"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'model': self.model, 'settings': self.settings}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/unified_connector.py b/abacusai/unified_connector.py
new file mode 100644
index 000000000..95fa16e21
--- /dev/null
+++ b/abacusai/unified_connector.py
@@ -0,0 +1,49 @@
+from .return_class import AbstractApiClass
+
+
+class UnifiedConnector(AbstractApiClass):
+ """
+ Lightweight unified connector filter that skips expensive auth transformations.
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ applicationConnectorId (str): The unique ID for the connection.
+ databaseConnectorId (str): The unique ID for the connection.
+ service (str): The service this connection connects to
+ name (str): A user-friendly name for the service
+ createdAt (str): When the API key was created
+ status (str): The status of the Application Connector
+ auth (dict): Non-secret connection information for this connector
+ isUserLevel (bool): Whether this is a user-level connector
+ """
+
+ def __init__(self, client, applicationConnectorId=None, databaseConnectorId=None, service=None, name=None, createdAt=None, status=None, auth=None, isUserLevel=None):
+ super().__init__(client, None)
+ self.application_connector_id = applicationConnectorId
+ self.database_connector_id = databaseConnectorId
+ self.service = service
+ self.name = name
+ self.created_at = createdAt
+ self.status = status
+ self.auth = auth
+ self.is_user_level = isUserLevel
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'application_connector_id': repr(self.application_connector_id), f'database_connector_id': repr(self.database_connector_id), f'service': repr(
+ self.service), f'name': repr(self.name), f'created_at': repr(self.created_at), f'status': repr(self.status), f'auth': repr(self.auth), f'is_user_level': repr(self.is_user_level)}
+ class_name = "UnifiedConnector"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'application_connector_id': self.application_connector_id, 'database_connector_id': self.database_connector_id, 'service': self.service,
+ 'name': self.name, 'created_at': self.created_at, 'status': self.status, 'auth': self.auth, 'is_user_level': self.is_user_level}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/upload.py b/abacusai/upload.py
index d1e8f071f..2a535b86f 100644
--- a/abacusai/upload.py
+++ b/abacusai/upload.py
@@ -121,19 +121,25 @@ def upload_part(self, upload_args):
(part_number, part_data) = upload_args
return self.part(part_number, part_data)
- def upload_file(self, file, threads=10, chunksize=1024 * 1024 * 10, wait_timeout=600):
+ def upload_file(self, file, threads=10, chunksize=None, wait_timeout=600):
"""
Uploads the file in the specified chunk size using the specified number of workers.
Args:
file (IOBase): A bytesIO or StringIO object to upload to Abacus.AI
threads (int): The max number of workers to use while uploading the file
- chunksize (int): The number of bytes to use for each chunk while uploading the file. Defaults to 10 MB
+ chunksize (int): The number of bytes to use for each chunk while uploading the file. If None, dynamically calculated based on file size (defaults to 20 MB.)
wait_timeout (int): The max number of seconds to wait for the file parts to be joined on Abacus.AI. Defaults to 600.
Returns:
Upload: The upload file object.
"""
+ if chunksize is None:
+ file.seek(0, 2)
+ file_size = file.tell()
+ file.seek(0)
+ chunksize = self._calculate_chunk_size(file_size)
+
with ThreadPoolExecutor(max_workers=threads) as pool:
pool.map(self.upload_part, self._yield_upload_part(file, chunksize))
upload_object = self.mark_complete()
@@ -146,6 +152,13 @@ def upload_file(self, file, threads=10, chunksize=1024 * 1024 * 10, wait_timeout
return self.client.describe_model(upload_object.model_id)
return upload_object
+ def _calculate_chunk_size(self, file_size):
+ MIN_CHUNK_SIZE = 20 * 1024 * 1024
+ MAX_CHUNK_SIZE = 100 * 1024 * 1024
+ MAX_PARTS = 10000
+ calculated_chunk_size = (file_size + MAX_PARTS - 1) // MAX_PARTS
+ return min(max(MIN_CHUNK_SIZE, calculated_chunk_size), MAX_CHUNK_SIZE)
+
def _yield_upload_part(self, file, chunksize):
part_number = 0
while True:
diff --git a/abacusai/user.py b/abacusai/user.py
index 196815361..337dc33b4 100644
--- a/abacusai/user.py
+++ b/abacusai/user.py
@@ -4,10 +4,11 @@
class User(AbstractApiClass):
"""
- An Abacus.AI User
+ An Abacus.AI User with a unique user_id.
Args:
client (ApiClient): An authenticated API Client instance
+ userId (id): The unique identifier of the user.
name (str): The User's name.
email (str): The User's primary email address.
createdAt (str): The date and time when the user joined Abacus.AI.
@@ -15,8 +16,9 @@ class User(AbstractApiClass):
organizationGroups (OrganizationGroup): List of Organization Groups this user belongs to.
"""
- def __init__(self, client, name=None, email=None, createdAt=None, status=None, organizationGroups={}):
- super().__init__(client, None)
+ def __init__(self, client, userId=None, name=None, email=None, createdAt=None, status=None, organizationGroups={}):
+ super().__init__(client, userId)
+ self.user_id = userId
self.name = name
self.email = email
self.created_at = createdAt
@@ -26,7 +28,7 @@ def __init__(self, client, name=None, email=None, createdAt=None, status=None, o
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'name': repr(self.name), f'email': repr(self.email), f'created_at': repr(
+ repr_dict = {f'user_id': repr(self.user_id), f'name': repr(self.name), f'email': repr(self.email), f'created_at': repr(
self.created_at), f'status': repr(self.status), f'organization_groups': repr(self.organization_groups)}
class_name = "User"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
@@ -40,6 +42,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'name': self.name, 'email': self.email, 'created_at': self.created_at, 'status': self.status,
- 'organization_groups': self._get_attribute_as_dict(self.organization_groups)}
+ resp = {'user_id': self.user_id, 'name': self.name, 'email': self.email, 'created_at': self.created_at,
+ 'status': self.status, 'organization_groups': self._get_attribute_as_dict(self.organization_groups)}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/user_group_object_permission.py b/abacusai/user_group_object_permission.py
new file mode 100644
index 000000000..269ebe669
--- /dev/null
+++ b/abacusai/user_group_object_permission.py
@@ -0,0 +1,41 @@
+from .return_class import AbstractApiClass
+
+
+class UserGroupObjectPermission(AbstractApiClass):
+ """
+ A user group object permission
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ userGroupId (str): The unique identifier of the user group.
+ userGroupName (str): The name of the user group.
+ objectId (str): The unique identifier of the object.
+ permission (str): The permission level (e.g., 'ALL').
+ """
+
+ def __init__(self, client, userGroupId=None, userGroupName=None, objectId=None, permission=None):
+ super().__init__(client, None)
+ self.user_group_id = userGroupId
+ self.user_group_name = userGroupName
+ self.object_id = objectId
+ self.permission = permission
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'user_group_id': repr(self.user_group_id), f'user_group_name': repr(
+ self.user_group_name), f'object_id': repr(self.object_id), f'permission': repr(self.permission)}
+ class_name = "UserGroupObjectPermission"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'user_group_id': self.user_group_id, 'user_group_name': self.user_group_name,
+ 'object_id': self.object_id, 'permission': self.permission}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/video_gen_costs.py b/abacusai/video_gen_costs.py
new file mode 100644
index 000000000..c4bf16618
--- /dev/null
+++ b/abacusai/video_gen_costs.py
@@ -0,0 +1,37 @@
+from .return_class import AbstractApiClass
+
+
+class VideoGenCosts(AbstractApiClass):
+ """
+ The most expensive price for each video gen model in credits
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ modelCosts (dict): The costs of the video gen models in credits
+ expensiveModels (list): The list of video gen models that are expensive
+ """
+
+ def __init__(self, client, modelCosts=None, expensiveModels=None):
+ super().__init__(client, None)
+ self.model_costs = modelCosts
+ self.expensive_models = expensiveModels
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'model_costs': repr(
+ self.model_costs), f'expensive_models': repr(self.expensive_models)}
+ class_name = "VideoGenCosts"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'model_costs': self.model_costs,
+ 'expensive_models': self.expensive_models}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/video_gen_model.py b/abacusai/video_gen_model.py
new file mode 100644
index 000000000..b6a19552f
--- /dev/null
+++ b/abacusai/video_gen_model.py
@@ -0,0 +1,48 @@
+from .return_class import AbstractApiClass
+from .video_gen_model_options import VideoGenModelOptions
+
+
+class VideoGenModel(AbstractApiClass):
+ """
+ Video generation model
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ displayName (str): The display name for the UI component.
+ type (str): The type of the UI component.
+ valueType (str): The data type of the values within the UI component.
+ optional (bool): Whether the selection of a value is optional.
+ default (str): The default value for the video generation model.
+ helptext (str): The helptext for the UI component.
+ options (VideoGenModelOptions): The options of models available for video generation.
+ """
+
+ def __init__(self, client, displayName=None, type=None, valueType=None, optional=None, default=None, helptext=None, options={}):
+ super().__init__(client, None)
+ self.display_name = displayName
+ self.type = type
+ self.value_type = valueType
+ self.optional = optional
+ self.default = default
+ self.helptext = helptext
+ self.options = client._build_class(VideoGenModelOptions, options)
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'display_name': repr(self.display_name), f'type': repr(self.type), f'value_type': repr(self.value_type), f'optional': repr(
+ self.optional), f'default': repr(self.default), f'helptext': repr(self.helptext), f'options': repr(self.options)}
+ class_name = "VideoGenModel"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'display_name': self.display_name, 'type': self.type, 'value_type': self.value_type, 'optional': self.optional,
+ 'default': self.default, 'helptext': self.helptext, 'options': self._get_attribute_as_dict(self.options)}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/video_gen_model_options.py b/abacusai/video_gen_model_options.py
new file mode 100644
index 000000000..995d71fd9
--- /dev/null
+++ b/abacusai/video_gen_model_options.py
@@ -0,0 +1,35 @@
+from .return_class import AbstractApiClass
+
+
+class VideoGenModelOptions(AbstractApiClass):
+ """
+ Video generation model options
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ keys (list): The keys of the video generation model options represented as the enum values.
+ values (list): The display names of the video generation model options.
+ """
+
+ def __init__(self, client, keys=None, values=None):
+ super().__init__(client, None)
+ self.keys = keys
+ self.values = values
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'keys': repr(self.keys), f'values': repr(self.values)}
+ class_name = "VideoGenModelOptions"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'keys': self.keys, 'values': self.values}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/video_gen_settings.py b/abacusai/video_gen_settings.py
index 2275fd8a7..5b951e427 100644
--- a/abacusai/video_gen_settings.py
+++ b/abacusai/video_gen_settings.py
@@ -1,4 +1,5 @@
from .return_class import AbstractApiClass
+from .video_gen_model import VideoGenModel
class VideoGenSettings(AbstractApiClass):
@@ -7,19 +8,21 @@ class VideoGenSettings(AbstractApiClass):
Args:
client (ApiClient): An authenticated API Client instance
- model (dict): The model settings.
settings (dict): The settings for each model.
+ warnings (dict): The warnings for each model.
+ model (VideoGenModel): Dropdown for models available for video generation.
"""
- def __init__(self, client, model=None, settings=None):
+ def __init__(self, client, settings=None, warnings=None, model={}):
super().__init__(client, None)
- self.model = model
self.settings = settings
+ self.warnings = warnings
+ self.model = client._build_class(VideoGenModel, model)
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'model': repr(self.model),
- f'settings': repr(self.settings)}
+ repr_dict = {f'settings': repr(self.settings), f'warnings': repr(
+ self.warnings), f'model': repr(self.model)}
class_name = "VideoGenSettings"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -32,5 +35,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'model': self.model, 'settings': self.settings}
+ resp = {'settings': self.settings, 'warnings': self.warnings,
+ 'model': self._get_attribute_as_dict(self.model)}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/web_app_conversation.py b/abacusai/web_app_conversation.py
new file mode 100644
index 000000000..d16bb549d
--- /dev/null
+++ b/abacusai/web_app_conversation.py
@@ -0,0 +1,43 @@
+from .return_class import AbstractApiClass
+
+
+class WebAppConversation(AbstractApiClass):
+ """
+ Web App Conversation
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ deploymentConversationId (id): The ID of the deployment conversation
+ llmArtifactId (id): The ID of the LLM artifact
+ deploymentConversationName (str): The name of the conversation
+ externalApplicationId (str): The external application ID
+ createdAt (str): The creation timestamp
+ """
+
+ def __init__(self, client, deploymentConversationId=None, llmArtifactId=None, deploymentConversationName=None, externalApplicationId=None, createdAt=None):
+ super().__init__(client, None)
+ self.deployment_conversation_id = deploymentConversationId
+ self.llm_artifact_id = llmArtifactId
+ self.deployment_conversation_name = deploymentConversationName
+ self.external_application_id = externalApplicationId
+ self.created_at = createdAt
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'deployment_conversation_id': repr(self.deployment_conversation_id), f'llm_artifact_id': repr(self.llm_artifact_id), f'deployment_conversation_name': repr(
+ self.deployment_conversation_name), f'external_application_id': repr(self.external_application_id), f'created_at': repr(self.created_at)}
+ class_name = "WebAppConversation"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'deployment_conversation_id': self.deployment_conversation_id, 'llm_artifact_id': self.llm_artifact_id,
+ 'deployment_conversation_name': self.deployment_conversation_name, 'external_application_id': self.external_application_id, 'created_at': self.created_at}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/web_app_deployment_permission_dict.py b/abacusai/web_app_deployment_permission_dict.py
new file mode 100644
index 000000000..38e47f11c
--- /dev/null
+++ b/abacusai/web_app_deployment_permission_dict.py
@@ -0,0 +1,34 @@
+from .return_class import AbstractApiClass
+
+
+class WebAppDeploymentPermissionDict(AbstractApiClass):
+ """
+ Web app deployment permission dict.
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ deploymentPermissions (dict): Dictionary containing deployment ID as key and list of tuples containing (user_group_id, permission) as value.
+ """
+
+ def __init__(self, client, deploymentPermissions=None):
+ super().__init__(client, None)
+ self.deployment_permissions = deploymentPermissions
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'deployment_permissions': repr(
+ self.deployment_permissions)}
+ class_name = "WebAppDeploymentPermissionDict"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'deployment_permissions': self.deployment_permissions}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/web_app_domain.py b/abacusai/web_app_domain.py
new file mode 100644
index 000000000..bc6dbc0ea
--- /dev/null
+++ b/abacusai/web_app_domain.py
@@ -0,0 +1,51 @@
+from .return_class import AbstractApiClass
+
+
+class WebAppDomain(AbstractApiClass):
+ """
+ Web App Domain
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ webAppDomainId (id): The ID of the web app domain
+ hostname (str): The hostname of the web app domain
+ domainType (str): The type of the web app domain
+ lifecycle (str): The lifecycle of the web app domain
+ nameservers (list): The nameservers of the web app domain
+ dnsRecords (list): The DNS records of the web app domain
+ metadata (dict): The metadata of the web app domain
+ isRootDomain (bool): Whether the web app domain is a root domain
+ isDeployed (bool): Whether the web app domain is deployed
+ """
+
+ def __init__(self, client, webAppDomainId=None, hostname=None, domainType=None, lifecycle=None, nameservers=None, dnsRecords=None, metadata=None, isRootDomain=None, isDeployed=None):
+ super().__init__(client, webAppDomainId)
+ self.web_app_domain_id = webAppDomainId
+ self.hostname = hostname
+ self.domain_type = domainType
+ self.lifecycle = lifecycle
+ self.nameservers = nameservers
+ self.dns_records = dnsRecords
+ self.metadata = metadata
+ self.is_root_domain = isRootDomain
+ self.is_deployed = isDeployed
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'web_app_domain_id': repr(self.web_app_domain_id), f'hostname': repr(self.hostname), f'domain_type': repr(self.domain_type), f'lifecycle': repr(self.lifecycle), f'nameservers': repr(
+ self.nameservers), f'dns_records': repr(self.dns_records), f'metadata': repr(self.metadata), f'is_root_domain': repr(self.is_root_domain), f'is_deployed': repr(self.is_deployed)}
+ class_name = "WebAppDomain"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'web_app_domain_id': self.web_app_domain_id, 'hostname': self.hostname, 'domain_type': self.domain_type, 'lifecycle': self.lifecycle,
+ 'nameservers': self.nameservers, 'dns_records': self.dns_records, 'metadata': self.metadata, 'is_root_domain': self.is_root_domain, 'is_deployed': self.is_deployed}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/web_service_trigger_run.py b/abacusai/web_service_trigger_run.py
new file mode 100644
index 000000000..eeb7f24ae
--- /dev/null
+++ b/abacusai/web_service_trigger_run.py
@@ -0,0 +1,51 @@
+from .return_class import AbstractApiClass
+
+
+class WebServiceTriggerRun(AbstractApiClass):
+ """
+ A web service trigger run
+
+ Args:
+ client (ApiClient): An authenticated API Client instance
+ endpoint (str): The endpoint of the web service trigger run.
+ createdAt (str): The creation time of the web service trigger run.
+ payload (dict): The payload of the web service trigger run.
+ headers (dict): The headers of the web service trigger run.
+ method (str): The method of the web service trigger run.
+ responseStatus (int): The HTTP response status code.
+ responseBody (str): The HTTP response body.
+ error (str): Error message if the request failed.
+ lifecycle (str): The lifecycle status of the run.
+ """
+
+ def __init__(self, client, endpoint=None, createdAt=None, payload=None, headers=None, method=None, responseStatus=None, responseBody=None, error=None, lifecycle=None):
+ super().__init__(client, None)
+ self.endpoint = endpoint
+ self.created_at = createdAt
+ self.payload = payload
+ self.headers = headers
+ self.method = method
+ self.response_status = responseStatus
+ self.response_body = responseBody
+ self.error = error
+ self.lifecycle = lifecycle
+ self.deprecated_keys = {}
+
+ def __repr__(self):
+ repr_dict = {f'endpoint': repr(self.endpoint), f'created_at': repr(self.created_at), f'payload': repr(self.payload), f'headers': repr(self.headers), f'method': repr(
+ self.method), f'response_status': repr(self.response_status), f'response_body': repr(self.response_body), f'error': repr(self.error), f'lifecycle': repr(self.lifecycle)}
+ class_name = "WebServiceTriggerRun"
+ repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
+ ) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
+ return f"{class_name}({repr_str})"
+
+ def to_dict(self):
+ """
+ Get a dict representation of the parameters in this class
+
+ Returns:
+ dict: The dict value representation of the class parameters
+ """
+ resp = {'endpoint': self.endpoint, 'created_at': self.created_at, 'payload': self.payload, 'headers': self.headers, 'method': self.method,
+ 'response_status': self.response_status, 'response_body': self.response_body, 'error': self.error, 'lifecycle': self.lifecycle}
+ return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/abacusai/workflow_graph_node_details.py b/abacusai/workflow_graph_node_details.py
index 9607b3824..398e52666 100644
--- a/abacusai/workflow_graph_node_details.py
+++ b/abacusai/workflow_graph_node_details.py
@@ -9,19 +9,21 @@ class WorkflowGraphNodeDetails(AbstractApiClass):
Args:
client (ApiClient): An authenticated API Client instance
packageRequirements (list[str]): A list of package requirements that the node source code will need.
+ connectors (dict): A dictionary of connectors that the node source code will need.
workflowGraphNode (WorkflowGraphNode): The workflow graph node object.
"""
- def __init__(self, client, packageRequirements=None, workflowGraphNode={}):
+ def __init__(self, client, packageRequirements=None, connectors=None, workflowGraphNode={}):
super().__init__(client, None)
self.package_requirements = packageRequirements
+ self.connectors = connectors
self.workflow_graph_node = client._build_class(
WorkflowGraphNode, workflowGraphNode)
self.deprecated_keys = {}
def __repr__(self):
- repr_dict = {f'package_requirements': repr(
- self.package_requirements), f'workflow_graph_node': repr(self.workflow_graph_node)}
+ repr_dict = {f'package_requirements': repr(self.package_requirements), f'connectors': repr(
+ self.connectors), f'workflow_graph_node': repr(self.workflow_graph_node)}
class_name = "WorkflowGraphNodeDetails"
repr_str = ',\n '.join([f'{key}={value}' for key, value in repr_dict.items(
) if getattr(self, key, None) is not None and key not in self.deprecated_keys])
@@ -34,6 +36,6 @@ def to_dict(self):
Returns:
dict: The dict value representation of the class parameters
"""
- resp = {'package_requirements': self.package_requirements,
+ resp = {'package_requirements': self.package_requirements, 'connectors': self.connectors,
'workflow_graph_node': self._get_attribute_as_dict(self.workflow_graph_node)}
return {key: value for key, value in resp.items() if value is not None and key not in self.deprecated_keys}
diff --git a/build_sdk/bin/python b/build_sdk/bin/python
new file mode 120000
index 000000000..b8a0adbbb
--- /dev/null
+++ b/build_sdk/bin/python
@@ -0,0 +1 @@
+python3
\ No newline at end of file
diff --git a/build_sdk/bin/python3 b/build_sdk/bin/python3
new file mode 120000
index 000000000..ae65fdaa1
--- /dev/null
+++ b/build_sdk/bin/python3
@@ -0,0 +1 @@
+/usr/bin/python3
\ No newline at end of file
diff --git a/build_sdk/bin/python3.10 b/build_sdk/bin/python3.10
new file mode 120000
index 000000000..b8a0adbbb
--- /dev/null
+++ b/build_sdk/bin/python3.10
@@ -0,0 +1 @@
+python3
\ No newline at end of file
diff --git a/build_sdk/lib64 b/build_sdk/lib64
new file mode 120000
index 000000000..7951405f8
--- /dev/null
+++ b/build_sdk/lib64
@@ -0,0 +1 @@
+lib
\ No newline at end of file
diff --git a/build_sdk/pyvenv.cfg b/build_sdk/pyvenv.cfg
new file mode 100644
index 000000000..13f3678cd
--- /dev/null
+++ b/build_sdk/pyvenv.cfg
@@ -0,0 +1,3 @@
+home = /usr/bin
+include-system-site-packages = false
+version = 3.10.18
diff --git a/docs/.buildinfo b/docs/.buildinfo
index d2eadcf7b..3297d19ed 100644
--- a/docs/.buildinfo
+++ b/docs/.buildinfo
@@ -1,4 +1,4 @@
# Sphinx build info version 1
# This file records the configuration used when building these files. When it is not found, a full rebuild will be done.
-config: 787e52a6c82af449544cb6a1253baab3
+config: df4b3fc78cc9b7bc30e9104def812ac3
tags: 645f666f9bcd5a90fca523b33c5a78b7
diff --git a/docs/.buildinfo.bak b/docs/.buildinfo.bak
index 5b1654b3d..c56024fd6 100644
--- a/docs/.buildinfo.bak
+++ b/docs/.buildinfo.bak
@@ -1,4 +1,4 @@
# Sphinx build info version 1
-# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
-config: 12535596649b36dd1c7e0cddf2d1e3b2
+# This file records the configuration used when building these files. When it is not found, a full rebuild will be done.
+config: 1807596735468513042c35ba1a2f25ef
tags: 645f666f9bcd5a90fca523b33c5a78b7
diff --git a/docs/_sources/autoapi/abacusai/api_class/ai_agents/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/ai_agents/index.rst.txt
index a668e9f3d..1cd382616 100644
--- a/docs/_sources/autoapi/abacusai/api_class/ai_agents/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/ai_agents/index.rst.txt
@@ -4,6 +4,14 @@ abacusai.api_class.ai_agents
.. py:module:: abacusai.api_class.ai_agents
+Attributes
+----------
+
+.. autoapisummary::
+
+ abacusai.api_class.ai_agents.MIN_AGENT_SLEEP_TIME
+
+
Classes
-------
@@ -17,6 +25,9 @@ Classes
abacusai.api_class.ai_agents.WorkflowNodeOutputSchema
abacusai.api_class.ai_agents.TriggerConfig
abacusai.api_class.ai_agents.WorkflowGraphNode
+ abacusai.api_class.ai_agents.DecisionNode
+ abacusai.api_class.ai_agents.LLMAgentNode
+ abacusai.api_class.ai_agents.InputNode
abacusai.api_class.ai_agents.WorkflowGraphEdge
abacusai.api_class.ai_agents.WorkflowGraph
abacusai.api_class.ai_agents.AgentConversationMessage
@@ -36,7 +47,11 @@ Functions
Module Contents
---------------
-.. py:function:: validate_input_dict_param(dict_object, friendly_class_name, must_contain=[])
+.. py:data:: MIN_AGENT_SLEEP_TIME
+ :value: 300
+
+
+.. py:function:: validate_input_dict_param(dict_object, friendly_class_name, must_contain=[], schema={})
.. py:class:: FieldDescriptor
@@ -107,6 +122,8 @@ Module Contents
:type is_required: bool
:param description: The description of this input.
:type description: str
+ :param long_description: The detailed description of this input.
+ :type long_description: str
:param constant_value: The constant value of this input if variable type is CONSTANT. Only applicable for template nodes.
:type constant_value: str
@@ -143,6 +160,12 @@ Module Contents
+ .. py:attribute:: long_description
+ :type: str
+ :value: None
+
+
+
.. py:attribute:: constant_value
:type: str
:value: None
@@ -154,11 +177,6 @@ Module Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(mapping)
:classmethod:
@@ -208,11 +226,6 @@ Module Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(schema)
:classmethod:
@@ -243,6 +256,17 @@ Module Contents
+ .. py:method:: from_tool_variable_mappings(tool_variable_mappings)
+ :classmethod:
+
+
+ Creates a WorkflowNodeInputSchema for the given tool variable mappings.
+
+ :param tool_variable_mappings: The tool variable mappings for the node.
+ :type tool_variable_mappings: List[dict]
+
+
+
.. py:class:: WorkflowNodeOutputMapping
Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
@@ -256,6 +280,8 @@ Module Contents
:type variable_type: Union[WorkflowNodeOutputType, str]
:param description: The description of this output.
:type description: str
+ :param long_description: The detailed description of this output.
+ :type long_description: str
.. py:attribute:: name
@@ -272,15 +298,16 @@ Module Contents
- .. py:method:: __post_init__()
+ .. py:attribute:: long_description
+ :type: str
+ :value: None
- .. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
+ .. py:method:: __post_init__()
+
+ .. py:method:: to_dict()
.. py:method:: from_dict(mapping)
@@ -305,11 +332,6 @@ Module Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(schema)
:classmethod:
@@ -335,18 +357,13 @@ Module Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(configs)
:classmethod:
-.. py:class:: WorkflowGraphNode(name, function = None, input_mappings = None, output_mappings = None, function_name = None, source_code = None, input_schema = None, output_schema = None, template_metadata = None, trigger_config = None)
+.. py:class:: WorkflowGraphNode(name, function = None, input_mappings = None, output_mappings = None, function_name = None, source_code = None, input_schema = None, output_schema = None, template_metadata = None, trigger_config = None, description = None)
Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
@@ -357,14 +374,16 @@ Module Contents
:type name: str
:param input_mappings: List of input mappings for the node. Each arg/kwarg of the node function should have a corresponding input mapping.
:type input_mappings: List[WorkflowNodeInputMapping]
- :param output_mappings: List of output mappings for the node. Each field in the returned dict/AgentResponse must have a corresponding output mapping.
- :type output_mappings: List[WorkflowNodeOutputMapping]
+ :param output_mappings: List of outputs for the node. Each field in the returned dict/AgentResponse must have a corresponding output in the list.
+ :type output_mappings: List[str]
:param function: The callable node function reference.
:type function: callable
- :param input_schema: The react json schema for the user input variables.
+ :param input_schema: The react json schema for the user input variables. This should be empty for CHAT interface.
:type input_schema: WorkflowNodeInputSchema
- :param output_schema: The react json schema for the output to be shown on UI.
- :type output_schema: WorkflowNodeOutputSchema
+ :param output_schema: The list of outputs to be shown on UI. Each output corresponds to a field in the output mappings of the node.
+ :type output_schema: List[str]
+ :param description: A description of the workflow node.
+ :type description: str
Additional Attributes:
function_name (str): The name of the function.
@@ -372,6 +391,17 @@ Module Contents
trigger_config (TriggerConfig): The configuration for a trigger workflow node.
+ .. py:method:: is_special_node_type()
+ :classmethod:
+
+
+ Returns True if this node type skips source code validation.
+
+ Special node types (like InputNode and LLMAgentNode) are executed
+ directly without source code validation.
+
+
+
.. py:attribute:: template_metadata
:value: None
@@ -382,7 +412,17 @@ Module Contents
- .. py:method:: _raw_init(name, input_mappings = None, output_mappings = None, function = None, function_name = None, source_code = None, input_schema = None, output_schema = None, template_metadata = None, trigger_config = None)
+ .. py:attribute:: node_type
+ :value: 'workflow_node'
+
+
+
+ .. py:attribute:: description
+ :value: None
+
+
+
+ .. py:method:: _raw_init(name, input_mappings = None, output_mappings = None, function = None, function_name = None, source_code = None, input_schema = None, output_schema = None, template_metadata = None, trigger_config = None, description = None)
:classmethod:
@@ -392,12 +432,47 @@ Module Contents
- .. py:method:: to_dict()
+ .. py:method:: from_tool(tool_name, name, configs = None, input_mappings = None, input_schema = None, output_schema = None)
+ :classmethod:
+
+
+ Creates and returns a WorkflowGraphNode based on an available user created tool.
+ Note: DO NOT specify the output mapping for the tool; it will be inferred automatically. Doing so will raise an error.
+
+ :param tool_name: The name of the tool. There should already be a tool created in the platform with tool_name.
+ :param name: The name to assign to the WorkflowGraphNode instance.
+ :param configs: The configuration state of the tool to use (if necessary). If not specified, will use the tool's default configuration.
+ :type configs: optional
+ :param input_mappings: The WorkflowNodeInputMappings for this node.
+ :type input_mappings: optional
+ :param input_schema: The WorkflowNodeInputSchema for this node.
+ :type input_schema: optional
+ :param output_schema: The WorkflowNodeOutputSchema for this node.
+ :type output_schema: optional
+
+
+
+ .. py:method:: from_system_tool(tool_name, name, configs = None, input_mappings = None, input_schema = None, output_schema = None)
+ :classmethod:
+
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
+ Creates and returns a WorkflowGraphNode based on the name of an available system tool.
+ Note: DO NOT specify the output mapping for the tool; it will be inferred automatically. Doing so will raise an error.
+ :param tool_name: The name of the tool. There should already be a tool created in the platform with tool_name.
+ :param name: The name to assign to the WorkflowGraphNode instance.
+ :param configs: The configuration state of the tool to use (if necessary). If not specified, will use the tool's default configuration.
+ :type configs: optional
+ :param input_mappings: The WorkflowNodeInputMappings for this node.
+ :type input_mappings: optional
+ :param input_schema: The WorkflowNodeInputSchema for this node.
+ :type input_schema: optional
+ :param output_schema: The WorkflowNodeOutputSchema for this node.
+ :type output_schema: optional
+
+
+
+ .. py:method:: to_dict()
.. py:method:: is_template_node()
@@ -429,6 +504,262 @@ Module Contents
.. py:property:: outputs
+.. py:class:: DecisionNode(name, condition, input_mappings, description = None)
+
+ Bases: :py:obj:`WorkflowGraphNode`
+
+
+ Represents a decision node in an Agent workflow graph. It is connected between two workflow nodes and is used to determine if subsequent nodes should be executed.
+
+
+ .. py:attribute:: node_type
+ :value: 'decision_node'
+
+
+
+ .. py:attribute:: name
+
+
+ .. py:attribute:: source_code
+
+
+ .. py:attribute:: description
+ :value: None
+
+
+
+ .. py:attribute:: output_mappings
+
+
+ .. py:attribute:: template_metadata
+ :value: None
+
+
+
+ .. py:attribute:: trigger_config
+ :value: None
+
+
+
+ .. py:attribute:: input_schema
+ :value: None
+
+
+
+ .. py:attribute:: output_schema
+ :value: None
+
+
+
+ .. py:attribute:: function_name
+ :value: None
+
+
+
+ .. py:method:: to_dict()
+
+
+ .. py:method:: from_dict(node)
+ :classmethod:
+
+
+
+.. py:class:: LLMAgentNode(name, chatbot_deployment_id = None, chatbot_parameters = None, input_schema = None, output_schema = None)
+
+ Bases: :py:obj:`WorkflowGraphNode`
+
+
+ Represents an LLM agent node in an Agent workflow graph. The LLM Agent Node can be initialized using either chatbot_deployment_id or creation_parameters.
+
+ :param name: A unique name for the LLM agent node.
+ :type name: str
+ :param chatbot_deployment_id: The deployment ID of the chatbot to use for this node. If not provided, a new chatbot will be created.
+ :type chatbot_deployment_id: str
+ :param chatbot_parameters: Parameters for configuring the chatbot, such as data_feature_group_ids and document_retrievers.
+ :type chatbot_parameters: dict
+ :param uid: A unique identifier for the LLM agent node. If not provided, a unique ID will be auto-generated.
+ :type uid: str
+
+
+ .. py:attribute:: name
+ :type: str
+
+
+ .. py:attribute:: chatbot_deployment_id
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: chatbot_parameters
+ :type: dict
+
+
+ .. py:attribute:: uid
+ :type: str
+ :value: None
+
+
+
+ .. py:method:: is_special_node_type()
+ :classmethod:
+
+
+ LLM agent nodes skip source code validation.
+
+
+
+ .. py:attribute:: node_type
+ :value: 'llm_agent_node'
+
+
+
+ .. py:attribute:: source_code
+ :value: None
+
+
+
+ .. py:method:: get_source_code()
+
+
+ .. py:method:: get_function_name()
+
+
+ .. py:method:: to_dict()
+
+
+ .. py:method:: from_dict(node)
+ :classmethod:
+
+
+
+.. py:class:: InputNode(name, input_mappings = None, output_mappings = None, input_schema = None, output_schema = None, description = None, function_name = None, source_code = None, uid = None)
+
+ Bases: :py:obj:`WorkflowGraphNode`
+
+
+ Represents an input node in an agent workflow graph.
+
+ Accepts multiple user inputs (text and file) and processes them into a single combined text output.
+ File inputs are processed using OCR/document extraction when applicable.
+
+ :param name: The name of the input node.
+ :type name: str
+ :param input_mappings: The input mappings for the input node. If not provided, defaults to a text_input and file_input.
+ :type input_mappings: Union[Dict[str, WorkflowNodeInputMapping], List[WorkflowNodeInputMapping]]
+ :param output_mappings: The output mappings for the input node. If not provided, defaults to a single 'answer' output.
+ :type output_mappings: Union[List[str], Dict[str, str], List[WorkflowNodeOutputMapping]]
+ :param input_schema: The input schema for the input node. If not provided, will be generated from input_mappings.
+ :type input_schema: WorkflowNodeInputSchema
+ :param output_schema: The output schema for the input node. If not provided, will be generated from output_mappings.
+ :type output_schema: WorkflowNodeOutputSchema
+ :param description: The description of the input node.
+ :type description: str
+ :param uid: A unique identifier for the input node. Auto-generated if not provided.
+ :type uid: str
+
+
+ .. py:attribute:: name
+ :type: str
+
+
+ .. py:attribute:: input_mappings
+ :type: Union[Dict[str, WorkflowNodeInputMapping], List[WorkflowNodeInputMapping]]
+ :value: None
+
+
+
+ .. py:attribute:: output_mappings
+ :type: Union[List[str], Dict[str, str], List[WorkflowNodeOutputMapping]]
+ :value: None
+
+
+
+ .. py:attribute:: input_schema
+ :type: WorkflowNodeInputSchema
+ :value: None
+
+
+
+ .. py:attribute:: output_schema
+ :type: WorkflowNodeOutputSchema
+ :value: None
+
+
+
+ .. py:attribute:: description
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: uid
+ :type: str
+ :value: None
+
+
+
+ .. py:method:: is_special_node_type()
+ :classmethod:
+
+
+ Input nodes skip source code validation.
+
+
+
+ .. py:method:: _mark_file_inputs_in_schema(input_schema)
+ :staticmethod:
+
+
+ Mark file inputs in the schema based on format and ui:widget indicators.
+
+
+
+ .. py:attribute:: node_type
+ :value: 'input_node'
+
+
+
+ .. py:attribute:: source_code
+ :value: None
+
+
+
+ .. py:attribute:: template_metadata
+ :value: None
+
+
+
+ .. py:attribute:: trigger_config
+ :value: None
+
+
+
+ .. py:method:: to_dict()
+
+ Return snake_case keys to match return filter expectations.
+ (The base ApiClass.to_dict() camelCases keys, which causes the
+ WorkflowGraphNodeDetails return filter to miss fields.)
+
+
+
+ .. py:method:: from_dict(node)
+ :classmethod:
+
+
+
+ .. py:method:: get_function_name()
+
+
+ .. py:method:: get_source_code()
+
+ Return the stored source code, if any.
+
+
+
+ .. py:method:: get_input_schema()
+
+
.. py:class:: WorkflowGraphEdge(source, target, details = None)
Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
@@ -462,6 +793,11 @@ Module Contents
.. py:method:: to_nx_edge()
+ .. py:method:: from_dict(input_dict)
+ :classmethod:
+
+
+
.. py:class:: WorkflowGraph
Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
@@ -469,20 +805,16 @@ Module Contents
Represents an Agent workflow graph.
- The edges define the node invocation order.
-
:param nodes: A list of nodes in the workflow graph.
- :type nodes: List[WorkflowGraphNode]
- :param edges: A list of edges in the workflow graph, where each edge is a tuple of source, target, and details.
- :type edges: List[WorkflowGraphEdge]
+ :type nodes: List[Union[WorkflowGraphNode, DecisionNode, LLMAgentNode]]
:param primary_start_node: The primary node to start the workflow from.
- :type primary_start_node: Union[str, WorkflowGraphNode]
+ :type primary_start_node: Union[str, WorkflowGraphNode, LLMAgentNode]
:param common_source_code: Common source code that can be used across all nodes.
:type common_source_code: str
.. py:attribute:: nodes
- :type: List[WorkflowGraphNode]
+ :type: List[Union[WorkflowGraphNode, DecisionNode, LLMAgentNode]]
:value: []
@@ -494,7 +826,7 @@ Module Contents
.. py:attribute:: primary_start_node
- :type: Union[str, WorkflowGraphNode]
+ :type: Union[str, WorkflowGraphNode, LLMAgentNode]
:value: None
@@ -516,11 +848,6 @@ Module Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(graph)
:classmethod:
@@ -562,11 +889,6 @@ Module Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:class:: WorkflowNodeTemplateConfig
@@ -609,11 +931,6 @@ Module Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(mapping)
:classmethod:
@@ -633,6 +950,8 @@ Module Contents
:type is_required: bool
:param description: The description of this input.
:type description: str
+ :param long_description: The detailed description of this input.
+ :type long_description: str
.. py:attribute:: name
@@ -651,12 +970,13 @@ Module Contents
- .. py:method:: to_dict()
+ .. py:attribute:: long_description
+ :type: str
+ :value: ''
+
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
+ .. py:method:: to_dict()
.. py:method:: from_dict(mapping)
@@ -677,6 +997,8 @@ Module Contents
:type variable_type: WorkflowNodeOutputType
:param description: The description of this output.
:type description: str
+ :param long_description: The detailed description of this output.
+ :type long_description: str
.. py:attribute:: name
@@ -693,13 +1015,14 @@ Module Contents
- .. py:method:: to_dict()
+ .. py:attribute:: long_description
+ :type: str
+ :value: ''
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
+ .. py:method:: to_dict()
+
.. py:method:: from_dict(mapping)
:classmethod:
diff --git a/docs/_sources/autoapi/abacusai/api_class/batch_prediction/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/batch_prediction/index.rst.txt
index bebc36f13..a7e8905ac 100644
--- a/docs/_sources/autoapi/abacusai/api_class/batch_prediction/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/batch_prediction/index.rst.txt
@@ -312,6 +312,8 @@ Module Contents
:param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
:type for_eval: bool
+ :param files_input_location: The input location for the files.
+ :type files_input_location: str
:param files_output_location_prefix: The output location prefix for the files.
:type files_output_location_prefix: str
:param channel_id_to_label_map: JSON string for the map from channel ids to their labels.
@@ -324,6 +326,12 @@ Module Contents
+ .. py:attribute:: files_input_location
+ :type: str
+ :value: None
+
+
+
.. py:attribute:: files_output_location_prefix
:type: str
:value: None
@@ -475,10 +483,6 @@ Module Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
diff --git a/docs/_sources/autoapi/abacusai/api_class/connectors/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/connectors/index.rst.txt
index 30cba51af..fc829719a 100644
--- a/docs/_sources/autoapi/abacusai/api_class/connectors/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/connectors/index.rst.txt
@@ -64,10 +64,6 @@ Module Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
diff --git a/docs/_sources/autoapi/abacusai/api_class/dataset_application_connector/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/dataset_application_connector/index.rst.txt
index 088cdaf6f..397403c9d 100644
--- a/docs/_sources/autoapi/abacusai/api_class/dataset_application_connector/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/dataset_application_connector/index.rst.txt
@@ -20,7 +20,10 @@ Classes
abacusai.api_class.dataset_application_connector.ZendeskDatasetConfig
abacusai.api_class.dataset_application_connector.AbacusUsageMetricsDatasetConfig
abacusai.api_class.dataset_application_connector.TeamsScraperDatasetConfig
+ abacusai.api_class.dataset_application_connector.OutlookDatasetConfig
+ abacusai.api_class.dataset_application_connector.AzureStorageDatasetConfig
abacusai.api_class.dataset_application_connector.FreshserviceDatasetConfig
+ abacusai.api_class.dataset_application_connector.SftpDatasetConfig
abacusai.api_class.dataset_application_connector._ApplicationConnectorDatasetConfigFactory
@@ -79,6 +82,8 @@ Module Contents
:type pull_attachments: bool
:param extract_bounding_boxes: Whether to extract bounding boxes from the documents
:type extract_bounding_boxes: bool
+ :param location_type: The type of location to be fetched. Maps values in `location` to content type, example: 'spaceKey/folderTitle/*' -> 'folder'
+ :type location_type: str
.. py:attribute:: location
@@ -105,6 +110,12 @@ Module Contents
+ .. py:attribute:: location_type
+ :type: str
+ :value: None
+
+
+
.. py:method:: __post_init__()
@@ -194,6 +205,8 @@ Module Contents
:type extract_bounding_boxes: bool
:param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
:type merge_file_schemas: bool
+ :param location_type: The type of path to fetch. 'shared' for shared files. if not provided, it will fetch from the root folder.
+ :type location_type: str
.. py:attribute:: location
@@ -220,6 +233,12 @@ Module Contents
+ .. py:attribute:: location_type
+ :type: str
+ :value: None
+
+
+
.. py:method:: __post_init__()
@@ -326,6 +345,8 @@ Module Contents
:type extract_bounding_boxes: bool
:param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
:type merge_file_schemas: bool
+ :param add_file_metadata: Signifies if the file metadata should be added to the dataset
+ :type add_file_metadata: bool
.. py:attribute:: location
@@ -352,6 +373,12 @@ Module Contents
+ .. py:attribute:: add_file_metadata
+ :type: bool
+ :value: False
+
+
+
.. py:method:: __post_init__()
@@ -425,6 +452,8 @@ Module Contents
:type pull_channel_posts: bool
:param pull_transcripts: Whether to pull transcripts for calendar meetings
:type pull_transcripts: bool
+ :param max_days_to_lookback: The maximum number of days to look back for data
+ :type max_days_to_lookback: int
.. py:attribute:: pull_chat_messages
@@ -445,6 +474,51 @@ Module Contents
+ .. py:attribute:: max_days_to_lookback
+ :type: int
+ :value: 365
+
+
+
+ .. py:method:: __post_init__()
+
+
+.. py:class:: OutlookDatasetConfig
+
+ Bases: :py:obj:`ApplicationConnectorDatasetConfig`
+
+
+ Dataset config for Outlook Application Connector
+
+
+ .. py:method:: __post_init__()
+
+
+.. py:class:: AzureStorageDatasetConfig
+
+ Bases: :py:obj:`ApplicationConnectorDatasetConfig`
+
+
+ Dataset config for Azure Storage Application Connector
+
+ :param location: The location of the files to fetch
+ :type location: str
+ :param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
+ :type merge_file_schemas: bool
+
+
+ .. py:attribute:: location
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: merge_file_schemas
+ :type: bool
+ :value: False
+
+
+
.. py:method:: __post_init__()
@@ -459,13 +533,53 @@ Module Contents
.. py:method:: __post_init__()
-.. py:class:: _ApplicationConnectorDatasetConfigFactory
+.. py:class:: SftpDatasetConfig
+
+ Bases: :py:obj:`ApplicationConnectorDatasetConfig`
+
+
+ Dataset config for SFTP Application Connector
+
+ :param location: The regex location of the files to fetch
+ :type location: str
+ :param csv_delimiter: If the file format is CSV, use a specific csv delimiter
+ :type csv_delimiter: str
+ :param extract_bounding_boxes: Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True
+ :type extract_bounding_boxes: bool
+ :param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
+ :type merge_file_schemas: bool
+
+
+ .. py:attribute:: location
+ :type: str
+ :value: None
- Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
+ .. py:attribute:: csv_delimiter
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: extract_bounding_boxes
+ :type: bool
+ :value: False
+
+
+
+ .. py:attribute:: merge_file_schemas
+ :type: bool
+ :value: False
+
+
+
+ .. py:method:: __post_init__()
+
+
+.. py:class:: _ApplicationConnectorDatasetConfigFactory
+
+ Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
.. py:attribute:: config_abstract_class
diff --git a/docs/_sources/autoapi/abacusai/api_class/deployment/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/deployment/index.rst.txt
index f9a9d4b6e..e0b399cb2 100644
--- a/docs/_sources/autoapi/abacusai/api_class/deployment/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/deployment/index.rst.txt
@@ -402,10 +402,6 @@ Module Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
diff --git a/docs/_sources/autoapi/abacusai/api_class/enums/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/enums/index.rst.txt
index d2f1ec33d..983b3e1be 100644
--- a/docs/_sources/autoapi/abacusai/api_class/enums/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/enums/index.rst.txt
@@ -75,6 +75,8 @@ Classes
abacusai.api_class.enums.ResponseSectionType
abacusai.api_class.enums.CodeLanguage
abacusai.api_class.enums.DeploymentConversationType
+ abacusai.api_class.enums.AgentClientType
+ abacusai.api_class.enums.OrganizationSecretType
Functions
@@ -989,6 +991,16 @@ Module Contents
+ .. py:attribute:: TS
+ :value: 'TS'
+
+
+
+ .. py:attribute:: TSX
+ :value: 'TSX'
+
+
+
.. py:class:: ExperimentationMode
Bases: :py:obj:`ApiEnum`
@@ -1689,6 +1701,11 @@ Module Contents
+ .. py:attribute:: GOOGLECALENDAR
+ :value: 'GOOGLECALENDAR'
+
+
+
.. py:attribute:: GIT
:value: 'GIT'
@@ -1764,11 +1781,6 @@ Module Contents
- .. py:attribute:: GOOGLECALENDAR
- :value: 'GOOGLECALENDAR'
-
-
-
.. py:attribute:: GOOGLESHEETS
:value: 'GOOGLESHEETS'
@@ -1799,6 +1811,71 @@ Module Contents
+ .. py:attribute:: SFTPAPPLICATION
+ :value: 'SFTPAPPLICATION'
+
+
+
+ .. py:attribute:: OAUTH
+ :value: 'OAUTH'
+
+
+
+ .. py:attribute:: SALESFORCE
+ :value: 'SALESFORCE'
+
+
+
+ .. py:attribute:: TWITTER
+ :value: 'TWITTER'
+
+
+
+ .. py:attribute:: MCP
+ :value: 'MCP'
+
+
+
+ .. py:attribute:: ODBC
+ :value: 'ODBC'
+
+
+
+ .. py:attribute:: DBC
+ :value: 'DBC'
+
+
+
+ .. py:attribute:: GENERIC_OAUTH
+ :value: 'GENERIC_OAUTH'
+
+
+
+ .. py:attribute:: OUTLOOK
+ :value: 'OUTLOOK'
+
+
+
+ .. py:attribute:: BIGQUERY
+ :value: 'BIGQUERY'
+
+
+
+ .. py:attribute:: AZURESTORAGE
+ :value: 'AZURESTORAGE'
+
+
+
+ .. py:method:: user_connectors()
+ :classmethod:
+
+
+
+ .. py:method:: database_connectors()
+ :classmethod:
+
+
+
.. py:class:: StreamingConnectorType
Bases: :py:obj:`ApiEnum`
@@ -1889,6 +1966,16 @@ Module Contents
+ .. py:attribute:: ATTACHMENT
+ :value: 'ATTACHMENT'
+
+
+
+ .. py:method:: to_json_type(type)
+ :staticmethod:
+
+
+
.. py:class:: PythonFunctionOutputArgumentType
Bases: :py:obj:`ApiEnum`
@@ -1964,6 +2051,11 @@ Module Contents
+ .. py:attribute:: ATTACHMENT
+ :value: 'ATTACHMENT'
+
+
+
.. py:class:: VectorStoreTextEncoder
Bases: :py:obj:`ApiEnum`
@@ -2019,6 +2111,16 @@ Module Contents
Derive from this class to define new enumerations.
+ .. py:attribute:: OPENAI_GPT3_5
+ :value: 'OPENAI_GPT3_5'
+
+
+
+ .. py:attribute:: OPENAI_GPT3_5_TEXT
+ :value: 'OPENAI_GPT3_5_TEXT'
+
+
+
.. py:attribute:: OPENAI_GPT4
:value: 'OPENAI_GPT4'
@@ -2054,48 +2156,78 @@ Module Contents
- .. py:attribute:: OPENAI_GPT3_5
- :value: 'OPENAI_GPT3_5'
+ .. py:attribute:: OPENAI_O3
+ :value: 'OPENAI_O3'
- .. py:attribute:: OPENAI_GPT3_5_TEXT
- :value: 'OPENAI_GPT3_5_TEXT'
+ .. py:attribute:: OPENAI_O3_HIGH
+ :value: 'OPENAI_O3_HIGH'
- .. py:attribute:: LLAMA3_1_405B
- :value: 'LLAMA3_1_405B'
+ .. py:attribute:: OPENAI_O4_MINI
+ :value: 'OPENAI_O4_MINI'
- .. py:attribute:: LLAMA3_1_70B
- :value: 'LLAMA3_1_70B'
+ .. py:attribute:: OPENAI_O4_MINI_HIGH
+ :value: 'OPENAI_O4_MINI_HIGH'
- .. py:attribute:: LLAMA3_1_8B
- :value: 'LLAMA3_1_8B'
+ .. py:attribute:: OPENAI_GPT4_1
+ :value: 'OPENAI_GPT4_1'
- .. py:attribute:: LLAMA3_3_70B
- :value: 'LLAMA3_3_70B'
+ .. py:attribute:: OPENAI_GPT4_1_MINI
+ :value: 'OPENAI_GPT4_1_MINI'
- .. py:attribute:: LLAMA3_LARGE_CHAT
- :value: 'LLAMA3_LARGE_CHAT'
+ .. py:attribute:: OPENAI_GPT4_1_NANO
+ :value: 'OPENAI_GPT4_1_NANO'
- .. py:attribute:: CLAUDE_V3_OPUS
- :value: 'CLAUDE_V3_OPUS'
+ .. py:attribute:: OPENAI_GPT5
+ :value: 'OPENAI_GPT5'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_MINI
+ :value: 'OPENAI_GPT5_MINI'
+
+
+ .. py:attribute:: OPENAI_GPT5_MINI_HIGH
+ :value: 'OPENAI_GPT5_MINI_HIGH'
- .. py:attribute:: CLAUDE_V3_SONNET
- :value: 'CLAUDE_V3_SONNET'
+
+ .. py:attribute:: OPENAI_GPT5_MINI_LOW
+ :value: 'OPENAI_GPT5_MINI_LOW'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_NANO
+ :value: 'OPENAI_GPT5_NANO'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_NANO_HIGH
+ :value: 'OPENAI_GPT5_NANO_HIGH'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_1
+ :value: 'OPENAI_GPT5_1'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_2
+ :value: 'OPENAI_GPT5_2'
@@ -2104,8 +2236,8 @@ Module Contents
- .. py:attribute:: CLAUDE_V3_5_SONNET
- :value: 'CLAUDE_V3_5_SONNET'
+ .. py:attribute:: CLAUDE_V3_OPUS
+ :value: 'CLAUDE_V3_OPUS'
@@ -2114,23 +2246,118 @@ Module Contents
+ .. py:attribute:: CLAUDE_V3_5_SONNET
+ :value: 'CLAUDE_V3_5_SONNET'
+
+
+
+ .. py:attribute:: CLAUDE_V3_7_SONNET
+ :value: 'CLAUDE_V3_7_SONNET'
+
+
+
+ .. py:attribute:: CLAUDE_V4_SONNET
+ :value: 'CLAUDE_V4_SONNET'
+
+
+
+ .. py:attribute:: CLAUDE_V4_OPUS
+ :value: 'CLAUDE_V4_OPUS'
+
+
+
+ .. py:attribute:: CLAUDE_V4_5_SONNET
+ :value: 'CLAUDE_V4_5_SONNET'
+
+
+
.. py:attribute:: GEMINI_1_5_PRO
:value: 'GEMINI_1_5_PRO'
+ .. py:attribute:: GEMINI_1_5_FLASH
+ :value: 'GEMINI_1_5_FLASH'
+
+
+
+ .. py:attribute:: GEMINI_2_PRO
+ :value: 'GEMINI_2_PRO'
+
+
+
.. py:attribute:: GEMINI_2_FLASH
:value: 'GEMINI_2_FLASH'
- .. py:attribute:: GEMINI_2_FLASH_THINKING
- :value: 'GEMINI_2_FLASH_THINKING'
+ .. py:attribute:: GEMINI_2_5_PRO
+ :value: 'GEMINI_2_5_PRO'
- .. py:attribute:: GEMINI_2_PRO
- :value: 'GEMINI_2_PRO'
+ .. py:attribute:: GEMINI_3_PRO
+ :value: 'GEMINI_3_PRO'
+
+
+
+ .. py:attribute:: GEMINI_2_5_FLASH
+ :value: 'GEMINI_2_5_FLASH'
+
+
+
+ .. py:attribute:: GEMINI_3_FLASH
+ :value: 'GEMINI_3_FLASH'
+
+
+
+ .. py:attribute:: XAI_GROK
+ :value: 'XAI_GROK'
+
+
+
+ .. py:attribute:: XAI_GROK_3
+ :value: 'XAI_GROK_3'
+
+
+
+ .. py:attribute:: XAI_GROK_3_MINI
+ :value: 'XAI_GROK_3_MINI'
+
+
+
+ .. py:attribute:: XAI_GROK_4
+ :value: 'XAI_GROK_4'
+
+
+
+ .. py:attribute:: LLAMA4_MAVERICK
+ :value: 'LLAMA4_MAVERICK'
+
+
+
+ .. py:attribute:: LLAMA3_1_405B
+ :value: 'LLAMA3_1_405B'
+
+
+
+ .. py:attribute:: LLAMA3_1_70B
+ :value: 'LLAMA3_1_70B'
+
+
+
+ .. py:attribute:: LLAMA3_1_8B
+ :value: 'LLAMA3_1_8B'
+
+
+
+ .. py:attribute:: LLAMA3_3_70B
+ :value: 'LLAMA3_3_70B'
+
+
+
+ .. py:attribute:: LLAMA3_LARGE_CHAT
+ :value: 'LLAMA3_LARGE_CHAT'
@@ -2164,18 +2391,23 @@ Module Contents
- .. py:attribute:: GEMINI_1_5_FLASH
- :value: 'GEMINI_1_5_FLASH'
+ .. py:attribute:: QWEN3_32B
+ :value: 'QWEN3_32B'
- .. py:attribute:: XAI_GROK
- :value: 'XAI_GROK'
+ .. py:attribute:: QWEN3_235B_A22B
+ :value: 'QWEN3_235B_A22B'
+
+
+ .. py:attribute:: QWEN3_CODER
+ :value: 'QWEN3_CODER'
- .. py:attribute:: DEEPSEEK_V3
- :value: 'DEEPSEEK_V3'
+
+ .. py:attribute:: DEEPSEEK_V3_1
+ :value: 'DEEPSEEK_V3_1'
@@ -2379,6 +2611,11 @@ Module Contents
+ .. py:attribute:: CONNECTOR_TOOL
+ :value: 'CONNECTOR_TOOL'
+
+
+
.. py:class:: EvalArtifactType
Bases: :py:obj:`ApiEnum`
@@ -2644,6 +2881,11 @@ Module Contents
+ .. py:attribute:: COMPREHENSIVE_MARKDOWN
+ :value: 'COMPREHENSIVE_MARKDOWN'
+
+
+
.. py:method:: is_ocr_forced(document_type)
:classmethod:
@@ -2999,3 +3241,96 @@ Module Contents
+ .. py:attribute:: CODE_LLM_AGENT
+ :value: 'CODE_LLM_AGENT'
+
+
+
+ .. py:attribute:: CHAT_LLM_TASK
+ :value: 'CHAT_LLM_TASK'
+
+
+
+ .. py:attribute:: COMPUTER_AGENT
+ :value: 'COMPUTER_AGENT'
+
+
+
+ .. py:attribute:: SEARCH_LLM
+ :value: 'SEARCH_LLM'
+
+
+
+ .. py:attribute:: APP_LLM
+ :value: 'APP_LLM'
+
+
+
+ .. py:attribute:: TEST_AGENT
+ :value: 'TEST_AGENT'
+
+
+
+ .. py:attribute:: SUPER_AGENT
+ :value: 'SUPER_AGENT'
+
+
+
+ .. py:attribute:: CODE_LLM_NON_INTERACTIVE
+ :value: 'CODE_LLM_NON_INTERACTIVE'
+
+
+
+ .. py:attribute:: BROWSER_EXTENSION
+ :value: 'BROWSER_EXTENSION'
+
+
+
+.. py:class:: AgentClientType
+
+ Bases: :py:obj:`ApiEnum`
+
+
+ Generic enumeration.
+
+ Derive from this class to define new enumerations.
+
+
+ .. py:attribute:: CHAT_UI
+ :value: 'CHAT_UI'
+
+
+
+ .. py:attribute:: MESSAGING_APP
+ :value: 'MESSAGING_APP'
+
+
+
+ .. py:attribute:: API
+ :value: 'API'
+
+
+
+.. py:class:: OrganizationSecretType
+
+ Bases: :py:obj:`ApiEnum`
+
+
+ Enum for organization secret types
+
+
+ .. py:attribute:: ORG_SECRET
+ :value: 'ORG_SECRET'
+
+
+
+ .. py:attribute:: ORG_API_CREDENTIALS
+ :value: 'ORG_API_CREDENTIALS'
+
+
+
+ .. py:attribute:: USER_API_CREDENTIALS
+ :value: 'USER_API_CREDENTIALS'
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/api_class/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/index.rst.txt
index 658efc409..341999d64 100644
--- a/docs/_sources/autoapi/abacusai/api_class/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/index.rst.txt
@@ -36,6 +36,7 @@ Attributes
.. autoapisummary::
+ abacusai.api_class.MIN_AGENT_SLEEP_TIME
abacusai.api_class.DocumentRetrieverConfig
abacusai.api_class.Segment
@@ -54,6 +55,9 @@ Classes
abacusai.api_class.WorkflowNodeOutputSchema
abacusai.api_class.TriggerConfig
abacusai.api_class.WorkflowGraphNode
+ abacusai.api_class.DecisionNode
+ abacusai.api_class.LLMAgentNode
+ abacusai.api_class.InputNode
abacusai.api_class.WorkflowGraphEdge
abacusai.api_class.WorkflowGraph
abacusai.api_class.AgentConversationMessage
@@ -107,7 +111,10 @@ Classes
abacusai.api_class.ZendeskDatasetConfig
abacusai.api_class.AbacusUsageMetricsDatasetConfig
abacusai.api_class.TeamsScraperDatasetConfig
+ abacusai.api_class.OutlookDatasetConfig
+ abacusai.api_class.AzureStorageDatasetConfig
abacusai.api_class.FreshserviceDatasetConfig
+ abacusai.api_class.SftpDatasetConfig
abacusai.api_class._ApplicationConnectorDatasetConfigFactory
abacusai.api_class.ApiClass
abacusai.api_class._ApiClassFactory
@@ -190,6 +197,8 @@ Classes
abacusai.api_class.ResponseSectionType
abacusai.api_class.CodeLanguage
abacusai.api_class.DeploymentConversationType
+ abacusai.api_class.AgentClientType
+ abacusai.api_class.OrganizationSecretType
abacusai.api_class.ApiClass
abacusai.api_class._ApiClassFactory
abacusai.api_class.DocumentProcessingConfig
@@ -217,6 +226,7 @@ Classes
abacusai.api_class.ForecastingTrainingConfig
abacusai.api_class.NamedEntityExtractionTrainingConfig
abacusai.api_class.NaturalLanguageSearchTrainingConfig
+ abacusai.api_class.SystemConnectorTool
abacusai.api_class.ChatLLMTrainingConfig
abacusai.api_class.SentenceBoundaryDetectionTrainingConfig
abacusai.api_class.SentimentDetectionTrainingConfig
@@ -371,7 +381,11 @@ Package Contents
.. py:function:: validate_constructor_arg_types(friendly_class_name=None)
-.. py:function:: validate_input_dict_param(dict_object, friendly_class_name, must_contain=[])
+.. py:data:: MIN_AGENT_SLEEP_TIME
+ :value: 300
+
+
+.. py:function:: validate_input_dict_param(dict_object, friendly_class_name, must_contain=[], schema={})
.. py:class:: FieldDescriptor
@@ -442,6 +456,8 @@ Package Contents
:type is_required: bool
:param description: The description of this input.
:type description: str
+ :param long_description: The detailed description of this input.
+ :type long_description: str
:param constant_value: The constant value of this input if variable type is CONSTANT. Only applicable for template nodes.
:type constant_value: str
@@ -478,6 +494,12 @@ Package Contents
+ .. py:attribute:: long_description
+ :type: str
+ :value: None
+
+
+
.. py:attribute:: constant_value
:type: str
:value: None
@@ -489,11 +511,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(mapping)
:classmethod:
@@ -543,11 +560,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(schema)
:classmethod:
@@ -578,6 +590,17 @@ Package Contents
+ .. py:method:: from_tool_variable_mappings(tool_variable_mappings)
+ :classmethod:
+
+
+ Creates a WorkflowNodeInputSchema for the given tool variable mappings.
+
+ :param tool_variable_mappings: The tool variable mappings for the node.
+ :type tool_variable_mappings: List[dict]
+
+
+
.. py:class:: WorkflowNodeOutputMapping
Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
@@ -591,6 +614,8 @@ Package Contents
:type variable_type: Union[WorkflowNodeOutputType, str]
:param description: The description of this output.
:type description: str
+ :param long_description: The detailed description of this output.
+ :type long_description: str
.. py:attribute:: name
@@ -607,15 +632,16 @@ Package Contents
- .. py:method:: __post_init__()
+ .. py:attribute:: long_description
+ :type: str
+ :value: None
- .. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
+ .. py:method:: __post_init__()
+
+ .. py:method:: to_dict()
.. py:method:: from_dict(mapping)
@@ -640,11 +666,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(schema)
:classmethod:
@@ -670,18 +691,13 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(configs)
:classmethod:
-.. py:class:: WorkflowGraphNode(name, function = None, input_mappings = None, output_mappings = None, function_name = None, source_code = None, input_schema = None, output_schema = None, template_metadata = None, trigger_config = None)
+.. py:class:: WorkflowGraphNode(name, function = None, input_mappings = None, output_mappings = None, function_name = None, source_code = None, input_schema = None, output_schema = None, template_metadata = None, trigger_config = None, description = None)
Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
@@ -692,14 +708,16 @@ Package Contents
:type name: str
:param input_mappings: List of input mappings for the node. Each arg/kwarg of the node function should have a corresponding input mapping.
:type input_mappings: List[WorkflowNodeInputMapping]
- :param output_mappings: List of output mappings for the node. Each field in the returned dict/AgentResponse must have a corresponding output mapping.
- :type output_mappings: List[WorkflowNodeOutputMapping]
+ :param output_mappings: List of outputs for the node. Each field in the returned dict/AgentResponse must have a corresponding output in the list.
+ :type output_mappings: List[str]
:param function: The callable node function reference.
:type function: callable
- :param input_schema: The react json schema for the user input variables.
+ :param input_schema: The react json schema for the user input variables. This should be empty for CHAT interface.
:type input_schema: WorkflowNodeInputSchema
- :param output_schema: The react json schema for the output to be shown on UI.
- :type output_schema: WorkflowNodeOutputSchema
+ :param output_schema: The list of outputs to be shown on UI. Each output corresponds to a field in the output mappings of the node.
+ :type output_schema: List[str]
+ :param description: A description of the workflow node.
+ :type description: str
Additional Attributes:
function_name (str): The name of the function.
@@ -707,6 +725,17 @@ Package Contents
trigger_config (TriggerConfig): The configuration for a trigger workflow node.
+ .. py:method:: is_special_node_type()
+ :classmethod:
+
+
+ Returns True if this node type skips source code validation.
+
+ Special node types (like InputNode and LLMAgentNode) are executed
+ directly without source code validation.
+
+
+
.. py:attribute:: template_metadata
:value: None
@@ -717,7 +746,17 @@ Package Contents
- .. py:method:: _raw_init(name, input_mappings = None, output_mappings = None, function = None, function_name = None, source_code = None, input_schema = None, output_schema = None, template_metadata = None, trigger_config = None)
+ .. py:attribute:: node_type
+ :value: 'workflow_node'
+
+
+
+ .. py:attribute:: description
+ :value: None
+
+
+
+ .. py:method:: _raw_init(name, input_mappings = None, output_mappings = None, function = None, function_name = None, source_code = None, input_schema = None, output_schema = None, template_metadata = None, trigger_config = None, description = None)
:classmethod:
@@ -727,12 +766,47 @@ Package Contents
- .. py:method:: to_dict()
+ .. py:method:: from_tool(tool_name, name, configs = None, input_mappings = None, input_schema = None, output_schema = None)
+ :classmethod:
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
+ Creates and returns a WorkflowGraphNode based on an available user created tool.
+ Note: DO NOT specify the output mapping for the tool; it will be inferred automatically. Doing so will raise an error.
+
+ :param tool_name: The name of the tool. There should already be a tool created in the platform with tool_name.
+ :param name: The name to assign to the WorkflowGraphNode instance.
+ :param configs: The configuration state of the tool to use (if necessary). If not specified, will use the tool's default configuration.
+ :type configs: optional
+ :param input_mappings: The WorkflowNodeInputMappings for this node.
+ :type input_mappings: optional
+ :param input_schema: The WorkflowNodeInputSchema for this node.
+ :type input_schema: optional
+ :param output_schema: The WorkflowNodeOutputSchema for this node.
+ :type output_schema: optional
+
+
+
+ .. py:method:: from_system_tool(tool_name, name, configs = None, input_mappings = None, input_schema = None, output_schema = None)
+ :classmethod:
+
+
+ Creates and returns a WorkflowGraphNode based on the name of an available system tool.
+ Note: DO NOT specify the output mapping for the tool; it will be inferred automatically. Doing so will raise an error.
+
+ :param tool_name: The name of the tool. There should already be a tool created in the platform with tool_name.
+ :param name: The name to assign to the WorkflowGraphNode instance.
+ :param configs: The configuration state of the tool to use (if necessary). If not specified, will use the tool's default configuration.
+ :type configs: optional
+ :param input_mappings: The WorkflowNodeInputMappings for this node.
+ :type input_mappings: optional
+ :param input_schema: The WorkflowNodeInputSchema for this node.
+ :type input_schema: optional
+ :param output_schema: The WorkflowNodeOutputSchema for this node.
+ :type output_schema: optional
+
+
+
+ .. py:method:: to_dict()
.. py:method:: is_template_node()
@@ -764,6 +838,262 @@ Package Contents
.. py:property:: outputs
+.. py:class:: DecisionNode(name, condition, input_mappings, description = None)
+
+ Bases: :py:obj:`WorkflowGraphNode`
+
+
+ Represents a decision node in an Agent workflow graph. It is connected between two workflow nodes and is used to determine if subsequent nodes should be executed.
+
+
+ .. py:attribute:: node_type
+ :value: 'decision_node'
+
+
+
+ .. py:attribute:: name
+
+
+ .. py:attribute:: source_code
+
+
+ .. py:attribute:: description
+ :value: None
+
+
+
+ .. py:attribute:: output_mappings
+
+
+ .. py:attribute:: template_metadata
+ :value: None
+
+
+
+ .. py:attribute:: trigger_config
+ :value: None
+
+
+
+ .. py:attribute:: input_schema
+ :value: None
+
+
+
+ .. py:attribute:: output_schema
+ :value: None
+
+
+
+ .. py:attribute:: function_name
+ :value: None
+
+
+
+ .. py:method:: to_dict()
+
+
+ .. py:method:: from_dict(node)
+ :classmethod:
+
+
+
+.. py:class:: LLMAgentNode(name, chatbot_deployment_id = None, chatbot_parameters = None, input_schema = None, output_schema = None)
+
+ Bases: :py:obj:`WorkflowGraphNode`
+
+
+ Represents an LLM agent node in an Agent workflow graph. The LLM Agent Node can be initialized using either chatbot_deployment_id or creation_parameters.
+
+ :param name: A unique name for the LLM agent node.
+ :type name: str
+ :param chatbot_deployment_id: The deployment ID of the chatbot to use for this node. If not provided, a new chatbot will be created.
+ :type chatbot_deployment_id: str
+ :param chatbot_parameters: Parameters for configuring the chatbot, such as data_feature_group_ids and document_retrievers.
+ :type chatbot_parameters: dict
+ :param uid: A unique identifier for the LLM agent node. If not provided, a unique ID will be auto-generated.
+ :type uid: str
+
+
+ .. py:attribute:: name
+ :type: str
+
+
+ .. py:attribute:: chatbot_deployment_id
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: chatbot_parameters
+ :type: dict
+
+
+ .. py:attribute:: uid
+ :type: str
+ :value: None
+
+
+
+ .. py:method:: is_special_node_type()
+ :classmethod:
+
+
+ LLM agent nodes skip source code validation.
+
+
+
+ .. py:attribute:: node_type
+ :value: 'llm_agent_node'
+
+
+
+ .. py:attribute:: source_code
+ :value: None
+
+
+
+ .. py:method:: get_source_code()
+
+
+ .. py:method:: get_function_name()
+
+
+ .. py:method:: to_dict()
+
+
+ .. py:method:: from_dict(node)
+ :classmethod:
+
+
+
+.. py:class:: InputNode(name, input_mappings = None, output_mappings = None, input_schema = None, output_schema = None, description = None, function_name = None, source_code = None, uid = None)
+
+ Bases: :py:obj:`WorkflowGraphNode`
+
+
+ Represents an input node in an agent workflow graph.
+
+ Accepts multiple user inputs (text and file) and processes them into a single combined text output.
+ File inputs are processed using OCR/document extraction when applicable.
+
+ :param name: The name of the input node.
+ :type name: str
+ :param input_mappings: The input mappings for the input node. If not provided, defaults to a text_input and file_input.
+ :type input_mappings: Union[Dict[str, WorkflowNodeInputMapping], List[WorkflowNodeInputMapping]]
+ :param output_mappings: The output mappings for the input node. If not provided, defaults to a single 'answer' output.
+ :type output_mappings: Union[List[str], Dict[str, str], List[WorkflowNodeOutputMapping]]
+ :param input_schema: The input schema for the input node. If not provided, will be generated from input_mappings.
+ :type input_schema: WorkflowNodeInputSchema
+ :param output_schema: The output schema for the input node. If not provided, will be generated from output_mappings.
+ :type output_schema: WorkflowNodeOutputSchema
+ :param description: The description of the input node.
+ :type description: str
+ :param uid: A unique identifier for the input node. Auto-generated if not provided.
+ :type uid: str
+
+
+ .. py:attribute:: name
+ :type: str
+
+
+ .. py:attribute:: input_mappings
+ :type: Union[Dict[str, WorkflowNodeInputMapping], List[WorkflowNodeInputMapping]]
+ :value: None
+
+
+
+ .. py:attribute:: output_mappings
+ :type: Union[List[str], Dict[str, str], List[WorkflowNodeOutputMapping]]
+ :value: None
+
+
+
+ .. py:attribute:: input_schema
+ :type: WorkflowNodeInputSchema
+ :value: None
+
+
+
+ .. py:attribute:: output_schema
+ :type: WorkflowNodeOutputSchema
+ :value: None
+
+
+
+ .. py:attribute:: description
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: uid
+ :type: str
+ :value: None
+
+
+
+ .. py:method:: is_special_node_type()
+ :classmethod:
+
+
+ Input nodes skip source code validation.
+
+
+
+ .. py:method:: _mark_file_inputs_in_schema(input_schema)
+ :staticmethod:
+
+
+ Mark file inputs in the schema based on format and ui:widget indicators.
+
+
+
+ .. py:attribute:: node_type
+ :value: 'input_node'
+
+
+
+ .. py:attribute:: source_code
+ :value: None
+
+
+
+ .. py:attribute:: template_metadata
+ :value: None
+
+
+
+ .. py:attribute:: trigger_config
+ :value: None
+
+
+
+ .. py:method:: to_dict()
+
+ Return snake_case keys to match return filter expectations.
+ (The base ApiClass.to_dict() camelCases keys, which causes the
+ WorkflowGraphNodeDetails return filter to miss fields.)
+
+
+
+ .. py:method:: from_dict(node)
+ :classmethod:
+
+
+
+ .. py:method:: get_function_name()
+
+
+ .. py:method:: get_source_code()
+
+ Return the stored source code, if any.
+
+
+
+ .. py:method:: get_input_schema()
+
+
.. py:class:: WorkflowGraphEdge(source, target, details = None)
Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
@@ -797,6 +1127,11 @@ Package Contents
.. py:method:: to_nx_edge()
+ .. py:method:: from_dict(input_dict)
+ :classmethod:
+
+
+
.. py:class:: WorkflowGraph
Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
@@ -804,20 +1139,16 @@ Package Contents
Represents an Agent workflow graph.
- The edges define the node invocation order.
-
:param nodes: A list of nodes in the workflow graph.
- :type nodes: List[WorkflowGraphNode]
- :param edges: A list of edges in the workflow graph, where each edge is a tuple of source, target, and details.
- :type edges: List[WorkflowGraphEdge]
+ :type nodes: List[Union[WorkflowGraphNode, DecisionNode, LLMAgentNode]]
:param primary_start_node: The primary node to start the workflow from.
- :type primary_start_node: Union[str, WorkflowGraphNode]
+ :type primary_start_node: Union[str, WorkflowGraphNode, LLMAgentNode]
:param common_source_code: Common source code that can be used across all nodes.
:type common_source_code: str
.. py:attribute:: nodes
- :type: List[WorkflowGraphNode]
+ :type: List[Union[WorkflowGraphNode, DecisionNode, LLMAgentNode]]
:value: []
@@ -829,7 +1160,7 @@ Package Contents
.. py:attribute:: primary_start_node
- :type: Union[str, WorkflowGraphNode]
+ :type: Union[str, WorkflowGraphNode, LLMAgentNode]
:value: None
@@ -851,11 +1182,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(graph)
:classmethod:
@@ -897,11 +1223,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:class:: WorkflowNodeTemplateConfig
@@ -944,11 +1265,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(mapping)
:classmethod:
@@ -968,6 +1284,8 @@ Package Contents
:type is_required: bool
:param description: The description of this input.
:type description: str
+ :param long_description: The detailed description of this input.
+ :type long_description: str
.. py:attribute:: name
@@ -986,13 +1304,14 @@ Package Contents
- .. py:method:: to_dict()
+ .. py:attribute:: long_description
+ :type: str
+ :value: ''
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
+ .. py:method:: to_dict()
+
.. py:method:: from_dict(mapping)
:classmethod:
@@ -1012,6 +1331,8 @@ Package Contents
:type variable_type: WorkflowNodeOutputType
:param description: The description of this output.
:type description: str
+ :param long_description: The detailed description of this output.
+ :type long_description: str
.. py:attribute:: name
@@ -1028,12 +1349,13 @@ Package Contents
- .. py:method:: to_dict()
+ .. py:attribute:: long_description
+ :type: str
+ :value: ''
+
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
+ .. py:method:: to_dict()
.. py:method:: from_dict(mapping)
@@ -1515,6 +1837,8 @@ Package Contents
:param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
:type for_eval: bool
+ :param files_input_location: The input location for the files.
+ :type files_input_location: str
:param files_output_location_prefix: The output location prefix for the files.
:type files_output_location_prefix: str
:param channel_id_to_label_map: JSON string for the map from channel ids to their labels.
@@ -1527,6 +1851,12 @@ Package Contents
+ .. py:attribute:: files_input_location
+ :type: str
+ :value: None
+
+
+
.. py:attribute:: files_output_location_prefix
:type: str
:value: None
@@ -1678,10 +2008,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -1909,10 +2235,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -2022,6 +2344,11 @@ Package Contents
+ .. py:attribute:: COMPREHENSIVE_MARKDOWN
+ :value: 'COMPREHENSIVE_MARKDOWN'
+
+
+
.. py:method:: is_ocr_forced(document_type)
:classmethod:
@@ -2457,6 +2784,8 @@ Package Contents
:type pull_attachments: bool
:param extract_bounding_boxes: Whether to extract bounding boxes from the documents
:type extract_bounding_boxes: bool
+ :param location_type: The type of location to be fetched. Maps values in `location` to content type, example: 'spaceKey/folderTitle/*' -> 'folder'
+ :type location_type: str
.. py:attribute:: location
@@ -2483,6 +2812,12 @@ Package Contents
+ .. py:attribute:: location_type
+ :type: str
+ :value: None
+
+
+
.. py:method:: __post_init__()
@@ -2572,6 +2907,8 @@ Package Contents
:type extract_bounding_boxes: bool
:param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
:type merge_file_schemas: bool
+ :param location_type: The type of path to fetch. 'shared' for shared files. if not provided, it will fetch from the root folder.
+ :type location_type: str
.. py:attribute:: location
@@ -2598,6 +2935,12 @@ Package Contents
+ .. py:attribute:: location_type
+ :type: str
+ :value: None
+
+
+
.. py:method:: __post_init__()
@@ -2704,6 +3047,8 @@ Package Contents
:type extract_bounding_boxes: bool
:param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
:type merge_file_schemas: bool
+ :param add_file_metadata: Signifies if the file metadata should be added to the dataset
+ :type add_file_metadata: bool
.. py:attribute:: location
@@ -2730,6 +3075,12 @@ Package Contents
+ .. py:attribute:: add_file_metadata
+ :type: bool
+ :value: False
+
+
+
.. py:method:: __post_init__()
@@ -2753,85 +3104,176 @@ Package Contents
.. py:method:: __post_init__()
-.. py:class:: AbacusUsageMetricsDatasetConfig
+.. py:class:: AbacusUsageMetricsDatasetConfig
+
+ Bases: :py:obj:`ApplicationConnectorDatasetConfig`
+
+
+ Dataset config for Abacus Usage Metrics Application Connector
+
+ :param include_entire_conversation_history: Whether to show the entire history for this deployment conversation
+ :type include_entire_conversation_history: bool
+ :param include_all_feedback: Whether to include all feedback for this deployment conversation
+ :type include_all_feedback: bool
+ :param resolve_matching_documents: Whether to get matching document references for response instead of prompt.
+ Needs to recalculate them if highlights are unavailable in summary_info
+ :type resolve_matching_documents: bool
+
+
+ .. py:attribute:: include_entire_conversation_history
+ :type: bool
+ :value: False
+
+
+
+ .. py:attribute:: include_all_feedback
+ :type: bool
+ :value: False
+
+
+
+ .. py:attribute:: resolve_matching_documents
+ :type: bool
+ :value: False
+
+
+
+ .. py:method:: __post_init__()
+
+
+.. py:class:: TeamsScraperDatasetConfig
+
+ Bases: :py:obj:`ApplicationConnectorDatasetConfig`
+
+
+ Dataset config for Teams Scraper Application Connector
+
+ :param pull_chat_messages: Whether to pull teams chat messages
+ :type pull_chat_messages: bool
+ :param pull_channel_posts: Whether to pull posts for each channel
+ :type pull_channel_posts: bool
+ :param pull_transcripts: Whether to pull transcripts for calendar meetings
+ :type pull_transcripts: bool
+ :param max_days_to_lookback: The maximum number of days to look back for data
+ :type max_days_to_lookback: int
+
+
+ .. py:attribute:: pull_chat_messages
+ :type: bool
+ :value: False
+
+
+
+ .. py:attribute:: pull_channel_posts
+ :type: bool
+ :value: False
+
+
+
+ .. py:attribute:: pull_transcripts
+ :type: bool
+ :value: False
+
+
+
+ .. py:attribute:: max_days_to_lookback
+ :type: int
+ :value: 365
+
+
+
+ .. py:method:: __post_init__()
+
+
+.. py:class:: OutlookDatasetConfig
+
+ Bases: :py:obj:`ApplicationConnectorDatasetConfig`
+
+
+ Dataset config for Outlook Application Connector
+
+
+ .. py:method:: __post_init__()
+
+
+.. py:class:: AzureStorageDatasetConfig
Bases: :py:obj:`ApplicationConnectorDatasetConfig`
- Dataset config for Abacus Usage Metrics Application Connector
+ Dataset config for Azure Storage Application Connector
- :param include_entire_conversation_history: Whether to show the entire history for this deployment conversation
- :type include_entire_conversation_history: bool
- :param include_all_feedback: Whether to include all feedback for this deployment conversation
- :type include_all_feedback: bool
- :param resolve_matching_documents: Whether to get matching document references for response instead of prompt.
- Needs to recalculate them if highlights are unavailable in summary_info
- :type resolve_matching_documents: bool
+ :param location: The location of the files to fetch
+ :type location: str
+ :param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
+ :type merge_file_schemas: bool
- .. py:attribute:: include_entire_conversation_history
- :type: bool
- :value: False
+ .. py:attribute:: location
+ :type: str
+ :value: None
- .. py:attribute:: include_all_feedback
+ .. py:attribute:: merge_file_schemas
:type: bool
:value: False
- .. py:attribute:: resolve_matching_documents
- :type: bool
- :value: False
+ .. py:method:: __post_init__()
+.. py:class:: FreshserviceDatasetConfig
- .. py:method:: __post_init__()
+ Bases: :py:obj:`ApplicationConnectorDatasetConfig`
-.. py:class:: TeamsScraperDatasetConfig
+ Dataset config for Freshservice Application Connector
- Bases: :py:obj:`ApplicationConnectorDatasetConfig`
+ .. py:method:: __post_init__()
- Dataset config for Teams Scraper Application Connector
- :param pull_chat_messages: Whether to pull teams chat messages
- :type pull_chat_messages: bool
- :param pull_channel_posts: Whether to pull posts for each channel
- :type pull_channel_posts: bool
- :param pull_transcripts: Whether to pull transcripts for calendar meetings
- :type pull_transcripts: bool
+.. py:class:: SftpDatasetConfig
+ Bases: :py:obj:`ApplicationConnectorDatasetConfig`
- .. py:attribute:: pull_chat_messages
- :type: bool
- :value: False
+ Dataset config for SFTP Application Connector
+ :param location: The regex location of the files to fetch
+ :type location: str
+ :param csv_delimiter: If the file format is CSV, use a specific csv delimiter
+ :type csv_delimiter: str
+ :param extract_bounding_boxes: Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True
+ :type extract_bounding_boxes: bool
+ :param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
+ :type merge_file_schemas: bool
- .. py:attribute:: pull_channel_posts
- :type: bool
- :value: False
+ .. py:attribute:: location
+ :type: str
+ :value: None
- .. py:attribute:: pull_transcripts
- :type: bool
- :value: False
+ .. py:attribute:: csv_delimiter
+ :type: str
+ :value: None
- .. py:method:: __post_init__()
+ .. py:attribute:: extract_bounding_boxes
+ :type: bool
+ :value: False
-.. py:class:: FreshserviceDatasetConfig
- Bases: :py:obj:`ApplicationConnectorDatasetConfig`
+ .. py:attribute:: merge_file_schemas
+ :type: bool
+ :value: False
- Dataset config for Freshservice Application Connector
.. py:method:: __post_init__()
@@ -2842,10 +3284,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -3325,10 +3763,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -4440,6 +4874,16 @@ Package Contents
+ .. py:attribute:: TS
+ :value: 'TS'
+
+
+
+ .. py:attribute:: TSX
+ :value: 'TSX'
+
+
+
.. py:class:: ExperimentationMode
Bases: :py:obj:`ApiEnum`
@@ -5140,6 +5584,11 @@ Package Contents
+ .. py:attribute:: GOOGLECALENDAR
+ :value: 'GOOGLECALENDAR'
+
+
+
.. py:attribute:: GIT
:value: 'GIT'
@@ -5215,11 +5664,6 @@ Package Contents
- .. py:attribute:: GOOGLECALENDAR
- :value: 'GOOGLECALENDAR'
-
-
-
.. py:attribute:: GOOGLESHEETS
:value: 'GOOGLESHEETS'
@@ -5250,6 +5694,71 @@ Package Contents
+ .. py:attribute:: SFTPAPPLICATION
+ :value: 'SFTPAPPLICATION'
+
+
+
+ .. py:attribute:: OAUTH
+ :value: 'OAUTH'
+
+
+
+ .. py:attribute:: SALESFORCE
+ :value: 'SALESFORCE'
+
+
+
+ .. py:attribute:: TWITTER
+ :value: 'TWITTER'
+
+
+
+ .. py:attribute:: MCP
+ :value: 'MCP'
+
+
+
+ .. py:attribute:: ODBC
+ :value: 'ODBC'
+
+
+
+ .. py:attribute:: DBC
+ :value: 'DBC'
+
+
+
+ .. py:attribute:: GENERIC_OAUTH
+ :value: 'GENERIC_OAUTH'
+
+
+
+ .. py:attribute:: OUTLOOK
+ :value: 'OUTLOOK'
+
+
+
+ .. py:attribute:: BIGQUERY
+ :value: 'BIGQUERY'
+
+
+
+ .. py:attribute:: AZURESTORAGE
+ :value: 'AZURESTORAGE'
+
+
+
+ .. py:method:: user_connectors()
+ :classmethod:
+
+
+
+ .. py:method:: database_connectors()
+ :classmethod:
+
+
+
.. py:class:: StreamingConnectorType
Bases: :py:obj:`ApiEnum`
@@ -5340,6 +5849,16 @@ Package Contents
+ .. py:attribute:: ATTACHMENT
+ :value: 'ATTACHMENT'
+
+
+
+ .. py:method:: to_json_type(type)
+ :staticmethod:
+
+
+
.. py:class:: PythonFunctionOutputArgumentType
Bases: :py:obj:`ApiEnum`
@@ -5415,6 +5934,11 @@ Package Contents
+ .. py:attribute:: ATTACHMENT
+ :value: 'ATTACHMENT'
+
+
+
.. py:class:: VectorStoreTextEncoder
Bases: :py:obj:`ApiEnum`
@@ -5462,126 +5986,261 @@ Package Contents
.. py:class:: LLMName
- Bases: :py:obj:`ApiEnum`
+ Bases: :py:obj:`ApiEnum`
+
+
+ Generic enumeration.
+
+ Derive from this class to define new enumerations.
+
+
+ .. py:attribute:: OPENAI_GPT3_5
+ :value: 'OPENAI_GPT3_5'
+
+
+
+ .. py:attribute:: OPENAI_GPT3_5_TEXT
+ :value: 'OPENAI_GPT3_5_TEXT'
+
+
+
+ .. py:attribute:: OPENAI_GPT4
+ :value: 'OPENAI_GPT4'
+
+
+
+ .. py:attribute:: OPENAI_GPT4_32K
+ :value: 'OPENAI_GPT4_32K'
+
+
+
+ .. py:attribute:: OPENAI_GPT4_128K
+ :value: 'OPENAI_GPT4_128K'
+
+
+
+ .. py:attribute:: OPENAI_GPT4_128K_LATEST
+ :value: 'OPENAI_GPT4_128K_LATEST'
+
+
+
+ .. py:attribute:: OPENAI_GPT4O
+ :value: 'OPENAI_GPT4O'
+
+
+
+ .. py:attribute:: OPENAI_GPT4O_MINI
+ :value: 'OPENAI_GPT4O_MINI'
+
+
+
+ .. py:attribute:: OPENAI_O1_MINI
+ :value: 'OPENAI_O1_MINI'
+
+
+
+ .. py:attribute:: OPENAI_O3
+ :value: 'OPENAI_O3'
+
+
+
+ .. py:attribute:: OPENAI_O3_HIGH
+ :value: 'OPENAI_O3_HIGH'
+
+
+
+ .. py:attribute:: OPENAI_O4_MINI
+ :value: 'OPENAI_O4_MINI'
+
+
+
+ .. py:attribute:: OPENAI_O4_MINI_HIGH
+ :value: 'OPENAI_O4_MINI_HIGH'
+
+
+
+ .. py:attribute:: OPENAI_GPT4_1
+ :value: 'OPENAI_GPT4_1'
+
+
+
+ .. py:attribute:: OPENAI_GPT4_1_MINI
+ :value: 'OPENAI_GPT4_1_MINI'
+
+
+
+ .. py:attribute:: OPENAI_GPT4_1_NANO
+ :value: 'OPENAI_GPT4_1_NANO'
+
+
+
+ .. py:attribute:: OPENAI_GPT5
+ :value: 'OPENAI_GPT5'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_MINI
+ :value: 'OPENAI_GPT5_MINI'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_MINI_HIGH
+ :value: 'OPENAI_GPT5_MINI_HIGH'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_MINI_LOW
+ :value: 'OPENAI_GPT5_MINI_LOW'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_NANO
+ :value: 'OPENAI_GPT5_NANO'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_NANO_HIGH
+ :value: 'OPENAI_GPT5_NANO_HIGH'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_1
+ :value: 'OPENAI_GPT5_1'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_2
+ :value: 'OPENAI_GPT5_2'
+
+
+
+ .. py:attribute:: CLAUDE_V3_HAIKU
+ :value: 'CLAUDE_V3_HAIKU'
+
+
+
+ .. py:attribute:: CLAUDE_V3_OPUS
+ :value: 'CLAUDE_V3_OPUS'
+
- Generic enumeration.
+ .. py:attribute:: CLAUDE_V3_5_HAIKU
+ :value: 'CLAUDE_V3_5_HAIKU'
- Derive from this class to define new enumerations.
- .. py:attribute:: OPENAI_GPT4
- :value: 'OPENAI_GPT4'
+ .. py:attribute:: CLAUDE_V3_5_SONNET
+ :value: 'CLAUDE_V3_5_SONNET'
- .. py:attribute:: OPENAI_GPT4_32K
- :value: 'OPENAI_GPT4_32K'
+ .. py:attribute:: CLAUDE_V3_7_SONNET
+ :value: 'CLAUDE_V3_7_SONNET'
- .. py:attribute:: OPENAI_GPT4_128K
- :value: 'OPENAI_GPT4_128K'
+ .. py:attribute:: CLAUDE_V4_SONNET
+ :value: 'CLAUDE_V4_SONNET'
- .. py:attribute:: OPENAI_GPT4_128K_LATEST
- :value: 'OPENAI_GPT4_128K_LATEST'
+ .. py:attribute:: CLAUDE_V4_OPUS
+ :value: 'CLAUDE_V4_OPUS'
- .. py:attribute:: OPENAI_GPT4O
- :value: 'OPENAI_GPT4O'
+ .. py:attribute:: CLAUDE_V4_5_SONNET
+ :value: 'CLAUDE_V4_5_SONNET'
- .. py:attribute:: OPENAI_GPT4O_MINI
- :value: 'OPENAI_GPT4O_MINI'
+ .. py:attribute:: GEMINI_1_5_PRO
+ :value: 'GEMINI_1_5_PRO'
- .. py:attribute:: OPENAI_O1_MINI
- :value: 'OPENAI_O1_MINI'
+ .. py:attribute:: GEMINI_1_5_FLASH
+ :value: 'GEMINI_1_5_FLASH'
- .. py:attribute:: OPENAI_GPT3_5
- :value: 'OPENAI_GPT3_5'
+ .. py:attribute:: GEMINI_2_PRO
+ :value: 'GEMINI_2_PRO'
- .. py:attribute:: OPENAI_GPT3_5_TEXT
- :value: 'OPENAI_GPT3_5_TEXT'
+ .. py:attribute:: GEMINI_2_FLASH
+ :value: 'GEMINI_2_FLASH'
- .. py:attribute:: LLAMA3_1_405B
- :value: 'LLAMA3_1_405B'
+ .. py:attribute:: GEMINI_2_5_PRO
+ :value: 'GEMINI_2_5_PRO'
- .. py:attribute:: LLAMA3_1_70B
- :value: 'LLAMA3_1_70B'
+ .. py:attribute:: GEMINI_3_PRO
+ :value: 'GEMINI_3_PRO'
- .. py:attribute:: LLAMA3_1_8B
- :value: 'LLAMA3_1_8B'
+ .. py:attribute:: GEMINI_2_5_FLASH
+ :value: 'GEMINI_2_5_FLASH'
- .. py:attribute:: LLAMA3_3_70B
- :value: 'LLAMA3_3_70B'
+ .. py:attribute:: GEMINI_3_FLASH
+ :value: 'GEMINI_3_FLASH'
- .. py:attribute:: LLAMA3_LARGE_CHAT
- :value: 'LLAMA3_LARGE_CHAT'
+ .. py:attribute:: XAI_GROK
+ :value: 'XAI_GROK'
- .. py:attribute:: CLAUDE_V3_OPUS
- :value: 'CLAUDE_V3_OPUS'
+ .. py:attribute:: XAI_GROK_3
+ :value: 'XAI_GROK_3'
- .. py:attribute:: CLAUDE_V3_SONNET
- :value: 'CLAUDE_V3_SONNET'
+ .. py:attribute:: XAI_GROK_3_MINI
+ :value: 'XAI_GROK_3_MINI'
- .. py:attribute:: CLAUDE_V3_HAIKU
- :value: 'CLAUDE_V3_HAIKU'
+ .. py:attribute:: XAI_GROK_4
+ :value: 'XAI_GROK_4'
- .. py:attribute:: CLAUDE_V3_5_SONNET
- :value: 'CLAUDE_V3_5_SONNET'
+ .. py:attribute:: LLAMA4_MAVERICK
+ :value: 'LLAMA4_MAVERICK'
- .. py:attribute:: CLAUDE_V3_5_HAIKU
- :value: 'CLAUDE_V3_5_HAIKU'
+ .. py:attribute:: LLAMA3_1_405B
+ :value: 'LLAMA3_1_405B'
- .. py:attribute:: GEMINI_1_5_PRO
- :value: 'GEMINI_1_5_PRO'
+ .. py:attribute:: LLAMA3_1_70B
+ :value: 'LLAMA3_1_70B'
- .. py:attribute:: GEMINI_2_FLASH
- :value: 'GEMINI_2_FLASH'
+ .. py:attribute:: LLAMA3_1_8B
+ :value: 'LLAMA3_1_8B'
- .. py:attribute:: GEMINI_2_FLASH_THINKING
- :value: 'GEMINI_2_FLASH_THINKING'
+ .. py:attribute:: LLAMA3_3_70B
+ :value: 'LLAMA3_3_70B'
- .. py:attribute:: GEMINI_2_PRO
- :value: 'GEMINI_2_PRO'
+ .. py:attribute:: LLAMA3_LARGE_CHAT
+ :value: 'LLAMA3_LARGE_CHAT'
@@ -5615,18 +6274,23 @@ Package Contents
- .. py:attribute:: GEMINI_1_5_FLASH
- :value: 'GEMINI_1_5_FLASH'
+ .. py:attribute:: QWEN3_32B
+ :value: 'QWEN3_32B'
- .. py:attribute:: XAI_GROK
- :value: 'XAI_GROK'
+ .. py:attribute:: QWEN3_235B_A22B
+ :value: 'QWEN3_235B_A22B'
+
+
+
+ .. py:attribute:: QWEN3_CODER
+ :value: 'QWEN3_CODER'
- .. py:attribute:: DEEPSEEK_V3
- :value: 'DEEPSEEK_V3'
+ .. py:attribute:: DEEPSEEK_V3_1
+ :value: 'DEEPSEEK_V3_1'
@@ -5830,6 +6494,11 @@ Package Contents
+ .. py:attribute:: CONNECTOR_TOOL
+ :value: 'CONNECTOR_TOOL'
+
+
+
.. py:class:: EvalArtifactType
Bases: :py:obj:`ApiEnum`
@@ -6095,6 +6764,11 @@ Package Contents
+ .. py:attribute:: COMPREHENSIVE_MARKDOWN
+ :value: 'COMPREHENSIVE_MARKDOWN'
+
+
+
.. py:method:: is_ocr_forced(document_type)
:classmethod:
@@ -6450,6 +7124,99 @@ Package Contents
+ .. py:attribute:: CODE_LLM_AGENT
+ :value: 'CODE_LLM_AGENT'
+
+
+
+ .. py:attribute:: CHAT_LLM_TASK
+ :value: 'CHAT_LLM_TASK'
+
+
+
+ .. py:attribute:: COMPUTER_AGENT
+ :value: 'COMPUTER_AGENT'
+
+
+
+ .. py:attribute:: SEARCH_LLM
+ :value: 'SEARCH_LLM'
+
+
+
+ .. py:attribute:: APP_LLM
+ :value: 'APP_LLM'
+
+
+
+ .. py:attribute:: TEST_AGENT
+ :value: 'TEST_AGENT'
+
+
+
+ .. py:attribute:: SUPER_AGENT
+ :value: 'SUPER_AGENT'
+
+
+
+ .. py:attribute:: CODE_LLM_NON_INTERACTIVE
+ :value: 'CODE_LLM_NON_INTERACTIVE'
+
+
+
+ .. py:attribute:: BROWSER_EXTENSION
+ :value: 'BROWSER_EXTENSION'
+
+
+
+.. py:class:: AgentClientType
+
+ Bases: :py:obj:`ApiEnum`
+
+
+ Generic enumeration.
+
+ Derive from this class to define new enumerations.
+
+
+ .. py:attribute:: CHAT_UI
+ :value: 'CHAT_UI'
+
+
+
+ .. py:attribute:: MESSAGING_APP
+ :value: 'MESSAGING_APP'
+
+
+
+ .. py:attribute:: API
+ :value: 'API'
+
+
+
+.. py:class:: OrganizationSecretType
+
+ Bases: :py:obj:`ApiEnum`
+
+
+ Enum for organization secret types
+
+
+ .. py:attribute:: ORG_SECRET
+ :value: 'ORG_SECRET'
+
+
+
+ .. py:attribute:: ORG_API_CREDENTIALS
+ :value: 'ORG_API_CREDENTIALS'
+
+
+
+ .. py:attribute:: USER_API_CREDENTIALS
+ :value: 'USER_API_CREDENTIALS'
+
+
+
.. py:class:: ApiClass
Bases: :py:obj:`abc.ABC`
@@ -8716,6 +9483,29 @@ Package Contents
.. py:method:: __post_init__()
+.. py:class:: SystemConnectorTool
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+
+ System connector tool used to integrate chatbots with external services.
+
+ :param value: The name of the tool.
+ :type value: str
+ :param configs: Optional. A dictionary of key-value pairs that are used to configure the tool.
+ :type configs: dict
+
+
+ .. py:attribute:: value
+ :type: str
+
+
+ .. py:attribute:: configs
+ :type: dict
+ :value: None
+
+
+
.. py:class:: ChatLLMTrainingConfig
Bases: :py:obj:`TrainingConfig`
@@ -8762,6 +9552,8 @@ Package Contents
:type hide_sql_and_code: bool
:param disable_data_summarization: After executing a query summarize the reponse and reply back with only the table and query run.
:type disable_data_summarization: bool
+ :param disable_data_fetch_for_training: Train using only table and column schema metadata without fetching sample data. This speeds up training but may result in less context for the model.
+ :type disable_data_fetch_for_training: bool
:param data_columns_to_ignore: Columns to ignore while encoding information about structured data tables in context for the LLM. A list of strings of format "."
:type data_columns_to_ignore: List[str]
:param search_score_cutoff: Minimum search score to consider a document as a valid search result.
@@ -8770,6 +9562,8 @@ Package Contents
:type include_bm25_retrieval: bool
:param database_connector_id: Database connector ID to use for connecting external database that gives access to structured data to the LLM.
:type database_connector_id: str
+ :param database_connector_ids: List of database connector IDs to use for connecting external databases that give access to structured data to the LLM.
+ :type database_connector_ids: List[str]
:param database_connector_tables: List of tables to use from the database connector for the ChatLLM.
:type database_connector_tables: List[str]
:param enable_code_execution: Enable python code execution in the ChatLLM. This equips the LLM with a python kernel in which all its code is executed.
@@ -8778,7 +9572,7 @@ Package Contents
:type enable_response_caching: bool
:param unknown_answer_phrase: Fallback response when the LLM can't find an answer.
:type unknown_answer_phrase: str
- :param enable_tool_bar: Enable the tool bar in Enterprise ChatLLM to provide additional functionalities like tool_use, web_search, image_gen, etc.
+ :param enable_tool_bar: Enable the tool bar in Enterprise ChatLLM to provide additional functionalities like tool_use, web_search, image_gen, etc. Enabling this requires enable_web_search to be enabled.
:type enable_tool_bar: bool
:param enable_inline_source_citations: Enable inline citations of the sources in the response.
:type enable_inline_source_citations: bool
@@ -8787,6 +9581,16 @@ Package Contents
:type json_response_instructions: str
:param json_response_schema: Specifies the JSON schema that the model should adhere to if `response_format` is set to "JSON". This should be a json-formatted string where each field of the expected schema is mapped to a dictionary containing the fields 'type', 'required' and 'description'. For example - '{"sample_field": {"type": "integer", "required": true, "description": "Sample Field"}}'
:type json_response_schema: str
+ :param mask_pii: Mask PII in the prompts and uploaded documents before sending it to the LLM. Only available for Enterprise users and will cause validation errors if set to True for ChatLLM Teams users.
+ :type mask_pii: bool
+ :param builtin_tools: List of builtin system connector tools to use in the ChatLLM. Using builtin tools does not require enabling tool bar (enable_tool_bar flag).
+ :type builtin_tools: List[SystemConnectorTool]
+ :param config_connectors: List of names of config connectors to use in the ChatLLM. This should not be used with document_retrievers.
+ :type config_connectors: List[str]
+ :param mcp_servers: List of names of MCP servers to use in the ChatLLM. This should not be used with document_retrievers.
+ :type mcp_servers: List[str]
+ :param agentic_loop_mode: Enables use of agentic loop that uses a series of tool calls when needed to respond. If set to False, the agentic loop will not be used. If not set or set to Auto, the agentic loop will be automatically used based on certain conditions like presence of tools in the model.
+ :type agentic_loop_mode: bool
.. py:attribute:: document_retrievers
@@ -8909,6 +9713,12 @@ Package Contents
+ .. py:attribute:: disable_data_fetch_for_training
+ :type: bool
+ :value: None
+
+
+
.. py:attribute:: data_columns_to_ignore
:type: List[str]
:value: None
@@ -8933,6 +9743,12 @@ Package Contents
+ .. py:attribute:: database_connector_ids
+ :type: List[str]
+ :value: None
+
+
+
.. py:attribute:: database_connector_tables
:type: List[str]
:value: None
@@ -8999,6 +9815,36 @@ Package Contents
+ .. py:attribute:: mask_pii
+ :type: bool
+ :value: None
+
+
+
+ .. py:attribute:: builtin_tools
+ :type: List[SystemConnectorTool]
+ :value: None
+
+
+
+ .. py:attribute:: config_connectors
+ :type: List[str]
+ :value: None
+
+
+
+ .. py:attribute:: mcp_servers
+ :type: List[str]
+ :value: None
+
+
+
+ .. py:attribute:: agentic_loop_mode
+ :type: bool
+ :value: None
+
+
+
.. py:method:: __post_init__()
@@ -9587,10 +10433,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -10358,10 +11200,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -10430,10 +11268,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -10478,11 +11312,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:class:: ApiClass
@@ -10744,10 +11573,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -10841,6 +11666,8 @@ Package Contents
:type pipeline_variable: str
:param description: The description of the argument
:type description: str
+ :param long_description: The detailed description of the argument
+ :type long_description: str
:param item_type: Type of items when variable_type is LIST
:type item_type: str
@@ -10881,6 +11708,12 @@ Package Contents
+ .. py:attribute:: long_description
+ :type: str
+ :value: None
+
+
+
.. py:attribute:: item_type
:type: str
:value: None
@@ -10898,6 +11731,10 @@ Package Contents
:type variable_type: PythonFunctionOutputArgumentType
:param name: The name of the python function variable
:type name: str
+ :param description: The description of the output
+ :type description: str
+ :param long_description: The detailed description of the output
+ :type long_description: str
.. py:attribute:: variable_type
@@ -10912,6 +11749,18 @@ Package Contents
+ .. py:attribute:: description
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: long_description
+ :type: str
+ :value: None
+
+
+
.. py:class:: ApiClass
Bases: :py:obj:`abc.ABC`
@@ -11051,11 +11900,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:class:: DatabaseConnectorExportConfig
@@ -11119,21 +11963,12 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:class:: _FeatureGroupExportConfigFactory
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -11234,11 +12069,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:data:: Segment
@@ -11290,7 +12120,7 @@ Package Contents
:type: int
-.. py:class:: TextResponseSection(text, section_key = None)
+.. py:class:: TextResponseSection(segment = None, section_key = None, text = None)
Bases: :py:obj:`ResponseSection`
@@ -11386,11 +12216,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:class:: ListResponseSection(items, section_key = None)
diff --git a/docs/_sources/autoapi/abacusai/api_class/model/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/model/index.rst.txt
index 59e6fa3d5..ca1b77c4c 100644
--- a/docs/_sources/autoapi/abacusai/api_class/model/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/model/index.rst.txt
@@ -15,6 +15,7 @@ Classes
abacusai.api_class.model.ForecastingTrainingConfig
abacusai.api_class.model.NamedEntityExtractionTrainingConfig
abacusai.api_class.model.NaturalLanguageSearchTrainingConfig
+ abacusai.api_class.model.SystemConnectorTool
abacusai.api_class.model.ChatLLMTrainingConfig
abacusai.api_class.model.SentenceBoundaryDetectionTrainingConfig
abacusai.api_class.model.SentimentDetectionTrainingConfig
@@ -1424,6 +1425,29 @@ Module Contents
.. py:method:: __post_init__()
+.. py:class:: SystemConnectorTool
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+
+ System connector tool used to integrate chatbots with external services.
+
+ :param value: The name of the tool.
+ :type value: str
+ :param configs: Optional. A dictionary of key-value pairs that are used to configure the tool.
+ :type configs: dict
+
+
+ .. py:attribute:: value
+ :type: str
+
+
+ .. py:attribute:: configs
+ :type: dict
+ :value: None
+
+
+
.. py:class:: ChatLLMTrainingConfig
Bases: :py:obj:`TrainingConfig`
@@ -1470,6 +1494,8 @@ Module Contents
:type hide_sql_and_code: bool
:param disable_data_summarization: After executing a query summarize the reponse and reply back with only the table and query run.
:type disable_data_summarization: bool
+ :param disable_data_fetch_for_training: Train using only table and column schema metadata without fetching sample data. This speeds up training but may result in less context for the model.
+ :type disable_data_fetch_for_training: bool
:param data_columns_to_ignore: Columns to ignore while encoding information about structured data tables in context for the LLM. A list of strings of format "."
:type data_columns_to_ignore: List[str]
:param search_score_cutoff: Minimum search score to consider a document as a valid search result.
@@ -1478,6 +1504,8 @@ Module Contents
:type include_bm25_retrieval: bool
:param database_connector_id: Database connector ID to use for connecting external database that gives access to structured data to the LLM.
:type database_connector_id: str
+ :param database_connector_ids: List of database connector IDs to use for connecting external databases that give access to structured data to the LLM.
+ :type database_connector_ids: List[str]
:param database_connector_tables: List of tables to use from the database connector for the ChatLLM.
:type database_connector_tables: List[str]
:param enable_code_execution: Enable python code execution in the ChatLLM. This equips the LLM with a python kernel in which all its code is executed.
@@ -1486,7 +1514,7 @@ Module Contents
:type enable_response_caching: bool
:param unknown_answer_phrase: Fallback response when the LLM can't find an answer.
:type unknown_answer_phrase: str
- :param enable_tool_bar: Enable the tool bar in Enterprise ChatLLM to provide additional functionalities like tool_use, web_search, image_gen, etc.
+ :param enable_tool_bar: Enable the tool bar in Enterprise ChatLLM to provide additional functionalities like tool_use, web_search, image_gen, etc. Enabling this requires enable_web_search to be enabled.
:type enable_tool_bar: bool
:param enable_inline_source_citations: Enable inline citations of the sources in the response.
:type enable_inline_source_citations: bool
@@ -1495,6 +1523,16 @@ Module Contents
:type json_response_instructions: str
:param json_response_schema: Specifies the JSON schema that the model should adhere to if `response_format` is set to "JSON". This should be a json-formatted string where each field of the expected schema is mapped to a dictionary containing the fields 'type', 'required' and 'description'. For example - '{"sample_field": {"type": "integer", "required": true, "description": "Sample Field"}}'
:type json_response_schema: str
+ :param mask_pii: Mask PII in the prompts and uploaded documents before sending it to the LLM. Only available for Enterprise users and will cause validation errors if set to True for ChatLLM Teams users.
+ :type mask_pii: bool
+ :param builtin_tools: List of builtin system connector tools to use in the ChatLLM. Using builtin tools does not require enabling tool bar (enable_tool_bar flag).
+ :type builtin_tools: List[SystemConnectorTool]
+ :param config_connectors: List of names of config connectors to use in the ChatLLM. This should not be used with document_retrievers.
+ :type config_connectors: List[str]
+ :param mcp_servers: List of names of MCP servers to use in the ChatLLM. This should not be used with document_retrievers.
+ :type mcp_servers: List[str]
+ :param agentic_loop_mode: Enables use of agentic loop that uses a series of tool calls when needed to respond. If set to False, the agentic loop will not be used. If not set or set to Auto, the agentic loop will be automatically used based on certain conditions like presence of tools in the model.
+ :type agentic_loop_mode: bool
.. py:attribute:: document_retrievers
@@ -1617,6 +1655,12 @@ Module Contents
+ .. py:attribute:: disable_data_fetch_for_training
+ :type: bool
+ :value: None
+
+
+
.. py:attribute:: data_columns_to_ignore
:type: List[str]
:value: None
@@ -1641,6 +1685,12 @@ Module Contents
+ .. py:attribute:: database_connector_ids
+ :type: List[str]
+ :value: None
+
+
+
.. py:attribute:: database_connector_tables
:type: List[str]
:value: None
@@ -1707,6 +1757,36 @@ Module Contents
+ .. py:attribute:: mask_pii
+ :type: bool
+ :value: None
+
+
+
+ .. py:attribute:: builtin_tools
+ :type: List[SystemConnectorTool]
+ :value: None
+
+
+
+ .. py:attribute:: config_connectors
+ :type: List[str]
+ :value: None
+
+
+
+ .. py:attribute:: mcp_servers
+ :type: List[str]
+ :value: None
+
+
+
+ .. py:attribute:: agentic_loop_mode
+ :type: bool
+ :value: None
+
+
+
.. py:method:: __post_init__()
@@ -2295,10 +2375,6 @@ Module Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
diff --git a/docs/_sources/autoapi/abacusai/api_class/monitor_alert/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/monitor_alert/index.rst.txt
index 89c20b1ae..33332fcf5 100644
--- a/docs/_sources/autoapi/abacusai/api_class/monitor_alert/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/monitor_alert/index.rst.txt
@@ -270,10 +270,6 @@ Module Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -342,10 +338,6 @@ Module Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -390,9 +382,4 @@ Module Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
diff --git a/docs/_sources/autoapi/abacusai/api_class/project/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/project/index.rst.txt
index 28eebb26c..2548738f2 100644
--- a/docs/_sources/autoapi/abacusai/api_class/project/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/project/index.rst.txt
@@ -191,10 +191,6 @@ Module Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
diff --git a/docs/_sources/autoapi/abacusai/api_class/python_functions/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/python_functions/index.rst.txt
index ffb796383..796e97954 100644
--- a/docs/_sources/autoapi/abacusai/api_class/python_functions/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/python_functions/index.rst.txt
@@ -35,6 +35,8 @@ Module Contents
:type pipeline_variable: str
:param description: The description of the argument
:type description: str
+ :param long_description: The detailed description of the argument
+ :type long_description: str
:param item_type: Type of items when variable_type is LIST
:type item_type: str
@@ -75,6 +77,12 @@ Module Contents
+ .. py:attribute:: long_description
+ :type: str
+ :value: None
+
+
+
.. py:attribute:: item_type
:type: str
:value: None
@@ -92,6 +100,10 @@ Module Contents
:type variable_type: PythonFunctionOutputArgumentType
:param name: The name of the python function variable
:type name: str
+ :param description: The description of the output
+ :type description: str
+ :param long_description: The detailed description of the output
+ :type long_description: str
.. py:attribute:: variable_type
@@ -106,3 +118,15 @@ Module Contents
+ .. py:attribute:: description
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: long_description
+ :type: str
+ :value: None
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/api_class/refresh/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/refresh/index.rst.txt
index a29a90d86..5229880aa 100644
--- a/docs/_sources/autoapi/abacusai/api_class/refresh/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/refresh/index.rst.txt
@@ -67,11 +67,6 @@ Module Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:class:: DatabaseConnectorExportConfig
@@ -135,21 +130,12 @@ Module Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:class:: _FeatureGroupExportConfigFactory
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
diff --git a/docs/_sources/autoapi/abacusai/api_class/segments/index.rst.txt b/docs/_sources/autoapi/abacusai/api_class/segments/index.rst.txt
index f0b75709c..6f9ffc252 100644
--- a/docs/_sources/autoapi/abacusai/api_class/segments/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_class/segments/index.rst.txt
@@ -59,11 +59,6 @@ Module Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:data:: Segment
@@ -115,7 +110,7 @@ Module Contents
:type: int
-.. py:class:: TextResponseSection(text, section_key = None)
+.. py:class:: TextResponseSection(segment = None, section_key = None, text = None)
Bases: :py:obj:`ResponseSection`
@@ -211,11 +206,6 @@ Module Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:class:: ListResponseSection(items, section_key = None)
diff --git a/docs/_sources/autoapi/abacusai/api_endpoint/index.rst.txt b/docs/_sources/autoapi/abacusai/api_endpoint/index.rst.txt
index bb18e33db..8eaad6a0c 100644
--- a/docs/_sources/autoapi/abacusai/api_endpoint/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_endpoint/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: ApiEndpoint(client, apiEndpoint=None, predictEndpoint=None, proxyEndpoint=None, llmEndpoint=None, externalChatEndpoint=None, dashboardEndpoint=None)
+.. py:class:: ApiEndpoint(client, apiEndpoint=None, predictEndpoint=None, proxyEndpoint=None, llmEndpoint=None, externalChatEndpoint=None, dashboardEndpoint=None, hostingDomain=None, publicHostingDomain=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -36,6 +36,10 @@ Module Contents
:type externalChatEndpoint: str
:param dashboardEndpoint: The URI that the external chat will use to go back to the dashboard
:type dashboardEndpoint: str
+ :param hostingDomain: The domain for hosted app deployments
+ :type hostingDomain: str
+ :param publicHostingDomain: The domain for publicly hosted app deployments
+ :type publicHostingDomain: str
.. py:attribute:: api_endpoint
@@ -68,6 +72,16 @@ Module Contents
+ .. py:attribute:: hosting_domain
+ :value: None
+
+
+
+ .. py:attribute:: public_hosting_domain
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
diff --git a/docs/_sources/autoapi/abacusai/api_key/index.rst.txt b/docs/_sources/autoapi/abacusai/api_key/index.rst.txt
index c8dea7755..80714b5a2 100644
--- a/docs/_sources/autoapi/abacusai/api_key/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/api_key/index.rst.txt
@@ -32,7 +32,7 @@ Module Contents
:type apiKeySuffix: str
:param tag: A user-friendly tag for the API key.
:type tag: str
- :param type: The type of the API key, either 'default', 'code-llm', or 'computer-use'.
+ :param type: The type of the API key, either 'default', 'code-llm', 'terminal', or 'computer-use'.
:type type: str
:param createdAt: The timestamp when the API key was created.
:type createdAt: str
diff --git a/docs/_sources/autoapi/abacusai/app_user_group/index.rst.txt b/docs/_sources/autoapi/abacusai/app_user_group/index.rst.txt
index e4cb55aaf..ecdfe78af 100644
--- a/docs/_sources/autoapi/abacusai/app_user_group/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/app_user_group/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: AppUserGroup(client, name=None, userGroupId=None, externalApplicationIds=None, invitedUserEmails=None, publicUserGroup=None, hasExternalApplicationReporting=None, isExternalServiceGroup=None, externalServiceGroupId=None, users={})
+.. py:class:: AppUserGroup(client, name=None, userGroupId=None, externalApplicationIds=None, invitedUserEmails=None, publicUserGroup=None, hasExternalApplicationReporting=None, isExternalServiceGroup=None, externalServiceGroupId=None, creationSource=None, users={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -40,6 +40,8 @@ Module Contents
:type isExternalServiceGroup: bool
:param externalServiceGroupId: The identifier that corresponds to the app user group's external service group representation
:type externalServiceGroupId: str
+ :param creationSource: The source of the app user group
+ :type creationSource: str
:param users: The users in the user group.
:type users: User
@@ -84,6 +86,11 @@ Module Contents
+ .. py:attribute:: creation_source
+ :value: None
+
+
+
.. py:attribute:: users
diff --git a/docs/_sources/autoapi/abacusai/application_connector/index.rst.txt b/docs/_sources/autoapi/abacusai/application_connector/index.rst.txt
index 4c94d6499..dc948b26b 100644
--- a/docs/_sources/autoapi/abacusai/application_connector/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/application_connector/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: ApplicationConnector(client, applicationConnectorId=None, service=None, name=None, createdAt=None, status=None, auth=None)
+.. py:class:: ApplicationConnector(client, applicationConnectorId=None, service=None, serviceName=None, name=None, createdAt=None, status=None, auth=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -28,6 +28,8 @@ Module Contents
:type applicationConnectorId: str
:param service: The service this connection connects to
:type service: str
+ :param serviceName: For OAuth services, the specific provider name (e.g., 'spotify')
+ :type serviceName: str
:param name: A user-friendly name for the service
:type name: str
:param createdAt: When the API key was created
@@ -48,6 +50,11 @@ Module Contents
+ .. py:attribute:: service_name
+ :value: None
+
+
+
.. py:attribute:: name
:value: None
diff --git a/docs/_sources/autoapi/abacusai/audio_gen_settings/index.rst.txt b/docs/_sources/autoapi/abacusai/audio_gen_settings/index.rst.txt
index 3a9fc53d3..ab5779b4c 100644
--- a/docs/_sources/autoapi/abacusai/audio_gen_settings/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/audio_gen_settings/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: AudioGenSettings(client, model=None)
+.. py:class:: AudioGenSettings(client, model=None, settings=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -24,8 +24,10 @@ Module Contents
:param client: An authenticated API Client instance
:type client: ApiClient
- :param model: Dropdown for models available for audio generation.
+ :param model: names of models available for audio generation.
:type model: dict
+ :param settings: settings for each model.
+ :type settings: dict
.. py:attribute:: model
@@ -33,6 +35,11 @@ Module Contents
+ .. py:attribute:: settings
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
diff --git a/docs/_sources/autoapi/abacusai/audio_url_result/index.rst.txt b/docs/_sources/autoapi/abacusai/audio_url_result/index.rst.txt
new file mode 100644
index 000000000..45f034b3e
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/audio_url_result/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.audio_url_result
+=========================
+
+.. py:module:: abacusai.audio_url_result
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.audio_url_result.AudioUrlResult
+
+
+Module Contents
+---------------
+
+.. py:class:: AudioUrlResult(client, audioUrl=None, creditsUsed=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ TTS result
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param audioUrl: The audio url.
+ :type audioUrl: str
+ :param creditsUsed: The credits used.
+ :type creditsUsed: float
+
+
+ .. py:attribute:: audio_url
+ :value: None
+
+
+
+ .. py:attribute:: credits_used
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/audit_log/index.rst.txt b/docs/_sources/autoapi/abacusai/audit_log/index.rst.txt
new file mode 100644
index 000000000..9a7925f05
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/audit_log/index.rst.txt
@@ -0,0 +1,92 @@
+abacusai.audit_log
+==================
+
+.. py:module:: abacusai.audit_log
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.audit_log.AuditLog
+
+
+Module Contents
+---------------
+
+.. py:class:: AuditLog(client, createdAt=None, userId=None, objectId=None, action=None, source=None, refreshPolicyId=None, pipelineId=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ An audit log entry
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param createdAt: The timestamp when the audit log entry was created
+ :type createdAt: str
+ :param userId: The hashed ID of the user who performed the action
+ :type userId: str
+ :param objectId: The hashed ID of the object that was affected by the action
+ :type objectId: str
+ :param action: The action performed (create, modify, start, stop, delete, share, hide, credential_change, login)
+ :type action: str
+ :param source: The source of the action (api, ui, pipeline, cli, system)
+ :type source: str
+ :param refreshPolicyId: The hashed ID of the refresh policy if applicable
+ :type refreshPolicyId: str
+ :param pipelineId: The hashed ID of the pipeline if applicable
+ :type pipelineId: str
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: user_id
+ :value: None
+
+
+
+ .. py:attribute:: object_id
+ :value: None
+
+
+
+ .. py:attribute:: action
+ :value: None
+
+
+
+ .. py:attribute:: source
+ :value: None
+
+
+
+ .. py:attribute:: refresh_policy_id
+ :value: None
+
+
+
+ .. py:attribute:: pipeline_id
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/chat_message/index.rst.txt b/docs/_sources/autoapi/abacusai/chat_message/index.rst.txt
index 1750c3fc8..51923a730 100644
--- a/docs/_sources/autoapi/abacusai/chat_message/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/chat_message/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: ChatMessage(client, role=None, text=None, timestamp=None, isUseful=None, feedback=None, docIds=None, hotkeyTitle=None, tasks=None, keywordArguments=None)
+.. py:class:: ChatMessage(client, role=None, text=None, timestamp=None, isUseful=None, feedback=None, docIds=None, hotkeyTitle=None, tasks=None, keywordArguments=None, computePointsUsed=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -42,6 +42,8 @@ Module Contents
:type tasks: list[str]
:param keywordArguments: A dict of kwargs used to generate the response.
:type keywordArguments: dict
+ :param computePointsUsed: The number of compute points used for the message.
+ :type computePointsUsed: int
.. py:attribute:: role
@@ -89,6 +91,11 @@ Module Contents
+ .. py:attribute:: compute_points_used
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
diff --git a/docs/_sources/autoapi/abacusai/chatllm_computer_status/index.rst.txt b/docs/_sources/autoapi/abacusai/chatllm_computer_status/index.rst.txt
new file mode 100644
index 000000000..fb5e52442
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/chatllm_computer_status/index.rst.txt
@@ -0,0 +1,71 @@
+abacusai.chatllm_computer_status
+================================
+
+.. py:module:: abacusai.chatllm_computer_status
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.chatllm_computer_status.ChatllmComputerStatus
+
+
+Module Contents
+---------------
+
+.. py:class:: ChatllmComputerStatus(client, computerId=None, vncEndpoint=None, computerStarted=None, restartRequired=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ ChatLLM Computer Status
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param computerId: The ID of the computer, it can be a deployment_conversation_id or a computer_id (TODO: add separate field for deployment_conversation_id)
+ :type computerId: str
+ :param vncEndpoint: The VNC endpoint of the computer
+ :type vncEndpoint: str
+ :param computerStarted: Whether the computer has started
+ :type computerStarted: bool
+ :param restartRequired: Whether the computer needs to be restarted
+ :type restartRequired: bool
+
+
+ .. py:attribute:: computer_id
+ :value: None
+
+
+
+ .. py:attribute:: vnc_endpoint
+ :value: None
+
+
+
+ .. py:attribute:: computer_started
+ :value: None
+
+
+
+ .. py:attribute:: restart_required
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/chatllm_memory/index.rst.txt b/docs/_sources/autoapi/abacusai/chatllm_memory/index.rst.txt
new file mode 100644
index 000000000..9edbf2c66
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/chatllm_memory/index.rst.txt
@@ -0,0 +1,64 @@
+abacusai.chatllm_memory
+=======================
+
+.. py:module:: abacusai.chatllm_memory
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.chatllm_memory.ChatllmMemory
+
+
+Module Contents
+---------------
+
+.. py:class:: ChatllmMemory(client, chatllmMemoryId=None, memory=None, sourceDeploymentConversationId=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ An LLM created memory in ChatLLM
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param chatllmMemoryId: The ID of the chatllm memory.
+ :type chatllmMemoryId: str
+ :param memory: The text of the ChatLLM memory.
+ :type memory: str
+ :param sourceDeploymentConversationId: The deployment conversation where this memory was created.
+ :type sourceDeploymentConversationId: str
+
+
+ .. py:attribute:: chatllm_memory_id
+ :value: None
+
+
+
+ .. py:attribute:: memory
+ :value: None
+
+
+
+ .. py:attribute:: source_deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/chatllm_project/index.rst.txt b/docs/_sources/autoapi/abacusai/chatllm_project/index.rst.txt
new file mode 100644
index 000000000..4d111e02a
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/chatllm_project/index.rst.txt
@@ -0,0 +1,85 @@
+abacusai.chatllm_project
+========================
+
+.. py:module:: abacusai.chatllm_project
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.chatllm_project.ChatllmProject
+
+
+Module Contents
+---------------
+
+.. py:class:: ChatllmProject(client, chatllmProjectId=None, name=None, description=None, customInstructions=None, createdAt=None, updatedAt=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ ChatLLM Project
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param chatllmProjectId: The ID of the chatllm project.
+ :type chatllmProjectId: id
+ :param name: The name of the chatllm project.
+ :type name: str
+ :param description: The description of the chatllm project.
+ :type description: str
+ :param customInstructions: The custom instructions of the chatllm project.
+ :type customInstructions: str
+ :param createdAt: The creation time of the chatllm project.
+ :type createdAt: str
+ :param updatedAt: The update time of the chatllm project.
+ :type updatedAt: str
+
+
+ .. py:attribute:: chatllm_project_id
+ :value: None
+
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: description
+ :value: None
+
+
+
+ .. py:attribute:: custom_instructions
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: updated_at
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/chatllm_project_permissions/index.rst.txt b/docs/_sources/autoapi/abacusai/chatllm_project_permissions/index.rst.txt
new file mode 100644
index 000000000..1e0c2c3fb
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/chatllm_project_permissions/index.rst.txt
@@ -0,0 +1,71 @@
+abacusai.chatllm_project_permissions
+====================================
+
+.. py:module:: abacusai.chatllm_project_permissions
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.chatllm_project_permissions.ChatllmProjectPermissions
+
+
+Module Contents
+---------------
+
+.. py:class:: ChatllmProjectPermissions(client, chatllmProjectId=None, accessLevel=None, userPermissions=None, userGroupPermissions=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ ChatLLM Project Permissions
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param chatllmProjectId: The ID of the chatllm project.
+ :type chatllmProjectId: id
+ :param accessLevel: The access level of the chatllm project.
+ :type accessLevel: str
+ :param userPermissions: List of tuples containing (user_id, permission).
+ :type userPermissions: list
+ :param userGroupPermissions: List of tuples containing (user_group_id, permission).
+ :type userGroupPermissions: list
+
+
+ .. py:attribute:: chatllm_project_id
+ :value: None
+
+
+
+ .. py:attribute:: access_level
+ :value: None
+
+
+
+ .. py:attribute:: user_permissions
+ :value: None
+
+
+
+ .. py:attribute:: user_group_permissions
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/chatllm_task/index.rst.txt b/docs/_sources/autoapi/abacusai/chatllm_task/index.rst.txt
index 54c8f6bb7..f95282486 100644
--- a/docs/_sources/autoapi/abacusai/chatllm_task/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/chatllm_task/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: ChatllmTask(client, chatllmTaskId=None, name=None, instructions=None, lifecycle=None, scheduleInfo=None, externalApplicationId=None, deploymentConversationId=None, enableEmailAlerts=None, email=None)
+.. py:class:: ChatllmTask(client, chatllmTaskId=None, daemonTaskId=None, taskType=None, name=None, instructions=None, description=None, lifecycle=None, scheduleInfo=None, externalApplicationId=None, deploymentConversationId=None, sourceDeploymentConversationId=None, enableEmailAlerts=None, email=None, numUnreadTaskInstances=None, computePointsUsed=None, displayMarkdown=None, requiresNewConversation=None, executionMode=None, taskDefinition=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -26,10 +26,16 @@ Module Contents
:type client: ApiClient
:param chatllmTaskId: The id of the chatllm task.
:type chatllmTaskId: str
+ :param daemonTaskId: The id of the daemon task.
+ :type daemonTaskId: str
+ :param taskType: The type of task ('chatllm' or 'daemon').
+ :type taskType: str
:param name: The name of the chatllm task.
:type name: str
:param instructions: The instructions of the chatllm task.
:type instructions: str
+ :param description: The description of the chatllm task.
+ :type description: str
:param lifecycle: The lifecycle of the chatllm task.
:type lifecycle: str
:param scheduleInfo: The schedule info of the chatllm task.
@@ -38,10 +44,24 @@ Module Contents
:type externalApplicationId: str
:param deploymentConversationId: The deployment conversation id associated with the chatllm task.
:type deploymentConversationId: str
+ :param sourceDeploymentConversationId: The source deployment conversation id associated with the chatllm task.
+ :type sourceDeploymentConversationId: str
:param enableEmailAlerts: Whether email alerts are enabled for the chatllm task.
:type enableEmailAlerts: bool
:param email: The email to send alerts to.
:type email: str
+ :param numUnreadTaskInstances: The number of unread task instances for the chatllm task.
+ :type numUnreadTaskInstances: int
+ :param computePointsUsed: The compute points used for the chatllm task.
+ :type computePointsUsed: int
+ :param displayMarkdown: The display markdown for the chatllm task.
+ :type displayMarkdown: str
+ :param requiresNewConversation: Whether a new conversation is required for the chatllm task.
+ :type requiresNewConversation: bool
+ :param executionMode: The execution mode of the chatllm task.
+ :type executionMode: str
+ :param taskDefinition: The task definition (for web_service_trigger tasks).
+ :type taskDefinition: dict
.. py:attribute:: chatllm_task_id
@@ -49,6 +69,16 @@ Module Contents
+ .. py:attribute:: daemon_task_id
+ :value: None
+
+
+
+ .. py:attribute:: task_type
+ :value: None
+
+
+
.. py:attribute:: name
:value: None
@@ -59,6 +89,11 @@ Module Contents
+ .. py:attribute:: description
+ :value: None
+
+
+
.. py:attribute:: lifecycle
:value: None
@@ -79,6 +114,11 @@ Module Contents
+ .. py:attribute:: source_deployment_conversation_id
+ :value: None
+
+
+
.. py:attribute:: enable_email_alerts
:value: None
@@ -89,6 +129,36 @@ Module Contents
+ .. py:attribute:: num_unread_task_instances
+ :value: None
+
+
+
+ .. py:attribute:: compute_points_used
+ :value: None
+
+
+
+ .. py:attribute:: display_markdown
+ :value: None
+
+
+
+ .. py:attribute:: requires_new_conversation
+ :value: None
+
+
+
+ .. py:attribute:: execution_mode
+ :value: None
+
+
+
+ .. py:attribute:: task_definition
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
diff --git a/docs/_sources/autoapi/abacusai/client/index.rst.txt b/docs/_sources/autoapi/abacusai/client/index.rst.txt
index 83a92e1ad..21772be5c 100644
--- a/docs/_sources/autoapi/abacusai/client/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/client/index.rst.txt
@@ -49,6 +49,7 @@ Classes
.. autoapisummary::
abacusai.client.AgentResponse
+ abacusai.client.ToolResponse
abacusai.client.ClientOptions
abacusai.client._ApiExceptionFactory
abacusai.client.BaseApiClient
@@ -136,6 +137,14 @@ Module Contents
.. py:method:: __getattr__(item)
+.. py:class:: ToolResponse(*args, **kwargs)
+
+ Bases: :py:obj:`AgentResponse`
+
+
+ Response object for tool to support non-text response sections
+
+
.. py:class:: ClientOptions(exception_on_404 = True, server = DEFAULT_SERVER)
Options for configuring the ApiClient
@@ -584,7 +593,7 @@ Module Contents
.. py:attribute:: client_version
- :value: '1.4.31'
+ :value: '1.4.76'
@@ -655,7 +664,7 @@ Module Contents
.. py:method:: _call_api(action, method, query_params=None, body=None, files=None, parse_type=None, streamable_response=False, server_override=None, timeout=None, retry_500 = False, data=None)
- .. py:method:: _proxy_request(name, method = 'POST', query_params = None, body = None, data = None, files=None, parse_type=None, is_sync = False, streamable_response = False)
+ .. py:method:: _proxy_request(name, method = 'POST', query_params = None, body = None, data = None, files=None, parse_type=None, is_sync = False, streamable_response = False, timeout=None)
.. py:method:: _build_class(return_class, values)
@@ -693,10 +702,13 @@ Module Contents
:type skip_version_check: bool
- .. py:method:: list_api_keys()
+ .. py:method:: list_api_keys(type = 'default')
Lists all of the user's API keys
+ :param type: The type of API keys to list.
+ :type type: str
+
:returns: List of API Keys for the current user's organization.
:rtype: list[ApiKey]
@@ -1373,6 +1385,32 @@ Module Contents
+ .. py:method:: query_database_connector_datallm(database_connector_id, query)
+
+ Runs a read-only query in the specified database connector. This API is specifically designed for DataLLM
+
+ and enforces read-only access with user-specific access control.
+
+
+ :param database_connector_id: A unique string identifier for the database connector.
+ :type database_connector_id: str
+ :param query: The query to be run in the database connector (must be read-only).
+ :type query: str
+
+
+
+ .. py:method:: get_database_connector_auth(database_connector_id)
+
+ Get the authentication details for a given database connector.
+
+ :param database_connector_id: The unique ID associated with the database connector.
+ :type database_connector_id: str
+
+ :returns: The database connector with the authentication details.
+ :rtype: DatabaseConnector
+
+
+
.. py:method:: list_application_connectors()
Retrieves a list of all application connectors along with their associated attributes.
@@ -1391,7 +1429,7 @@ Module Contents
- .. py:method:: get_connector_auth(service = None, application_connector_id = None, scopes = None)
+ .. py:method:: get_connector_auth(service = None, application_connector_id = None, scopes = None, generic_oauth_service = None)
Get the authentication details for a given connector. For user level connectors, the service is required. For org level connectors, the application_connector_id is required.
@@ -1401,8 +1439,40 @@ Module Contents
:type application_connector_id: str
:param scopes: The scopes to request for the connector.
:type scopes: List
+ :param generic_oauth_service: For GENERIC_OAUTH service, specify the OAuth provider (e.g., 'spotify').
+ :type generic_oauth_service: str
:returns: The application connector with the authentication details.
+ :rtype: UnifiedConnector
+
+
+
+ .. py:method:: get_user_connector_auth(service, scopes = None, generic_oauth_service = None, config_connector = None)
+
+ Get the authentication details for a given user level connector.
+
+ :param service: The service name.
+ :type service: ApplicationConnectorType
+ :param scopes: The scopes to request for the connector.
+ :type scopes: List
+ :param generic_oauth_service: For GENERIC_OAUTH service, specify the OAuth provider (e.g., 'spotify').
+ :type generic_oauth_service: str
+ :param config_connector: The config connector id.
+ :type config_connector: str
+
+ :returns: The application connector with the authentication details.
+ :rtype: UnifiedConnector
+
+
+
+ .. py:method:: get_user_mcp_connector_auth(mcp_server_name = None)
+
+ Get the auth for a MCP connector
+
+ :param mcp_server_name: The name of the MCP server
+ :type mcp_server_name: str
+
+ :returns: The MCP connector
:rtype: ApplicationConnector
@@ -1720,12 +1790,14 @@ Module Contents
- .. py:method:: list_model_monitors(project_id)
+ .. py:method:: list_model_monitors(project_id, limit = None)
Retrieves the list of model monitors in the specified project.
:param project_id: Unique string identifier associated with the project.
:type project_id: str
+ :param limit: Maximum number of model monitors to return. We'll have internal limit if not set.
+ :type limit: int
:returns: A list of model monitors.
:rtype: list[ModelMonitor]
@@ -2344,12 +2416,14 @@ Module Contents
- .. py:method:: list_batch_predictions(project_id)
+ .. py:method:: list_batch_predictions(project_id, limit = None)
Retrieves a list of batch predictions in the project.
:param project_id: Unique string identifier of the project.
:type project_id: str
+ :param limit: Maximum number of batch predictions to return. We'll have internal limit if not set.
+ :type limit: int
:returns: List of batch prediction jobs.
:rtype: list[BatchPrediction]
@@ -2713,11 +2787,16 @@ Module Contents
- .. py:method:: list_organization_secrets()
+ .. py:method:: list_organization_secrets(decrypt_value = False, secret_type = OrganizationSecretType.ORG_SECRET)
Lists all secrets for an organization.
- :returns: list of secrets belonging to the organization.
+ :param decrypt_value: Whether to decrypt the secret values.
+ :type decrypt_value: bool
+ :param secret_type: Filter secrets by type. Use OrganizationSecretType enum values.
+ :type secret_type: OrganizationSecretType
+
+ :returns: List of secrets.
:rtype: list[OrganizationSecret]
@@ -2810,7 +2889,7 @@ Module Contents
- .. py:method:: get_deployment_conversation(deployment_conversation_id = None, external_session_id = None, deployment_id = None, filter_intermediate_conversation_events = True, get_unused_document_uploads = False)
+ .. py:method:: get_deployment_conversation(deployment_conversation_id = None, external_session_id = None, deployment_id = None, filter_intermediate_conversation_events = True, get_unused_document_uploads = False, start = None, limit = None, include_all_versions = False)
Gets a deployment conversation.
@@ -2824,13 +2903,30 @@ Module Contents
:type filter_intermediate_conversation_events: bool
:param get_unused_document_uploads: If true, unused document uploads will be returned. Default is false.
:type get_unused_document_uploads: bool
+ :param start: The start index of the conversation.
+ :type start: int
+ :param limit: The limit of the conversation.
+ :type limit: int
+ :param include_all_versions: If True, includes all versions of the last bot message for version switching functionality.
+ :type include_all_versions: bool
:returns: The deployment conversation.
:rtype: DeploymentConversation
- .. py:method:: list_deployment_conversations(deployment_id = None, external_application_id = None, conversation_type = None, fetch_last_llm_info = False)
+ .. py:method:: get_deployment_conversation_file(deployment_conversation_id, file_path)
+
+ Gets a deployment conversation file.
+
+ :param deployment_conversation_id: Unique ID of the conversation.
+ :type deployment_conversation_id: str
+ :param file_path: The path of the file to get.
+ :type file_path: str
+
+
+
+ .. py:method:: list_deployment_conversations(deployment_id = None, external_application_id = None, conversation_type = None, fetch_last_llm_info = False, limit = None, search = None)
Lists all conversations for the given deployment and current user.
@@ -2842,6 +2938,10 @@ Module Contents
:type conversation_type: DeploymentConversationType
:param fetch_last_llm_info: If true, the LLM info for the most recent conversation will be fetched. Only applicable for system-created bots.
:type fetch_last_llm_info: bool
+ :param limit: The number of conversations to return. Defaults to 600.
+ :type limit: int
+ :param search: The search query to filter conversations by title.
+ :type search: str
:returns: The deployment conversations.
:rtype: list[DeploymentConversation]
@@ -2862,6 +2962,18 @@ Module Contents
+ .. py:method:: list_user_group_object_permissions()
+
+ List all user groups permissions associated with the objects in the organization.
+
+ If no associated user groups, the object is public among developers and admins.
+
+
+ :returns: List of user group object permissions in the organization.
+ :rtype: list[UserGroupObjectPermission]
+
+
+
.. py:method:: get_app_user_group(user_group_id)
Gets an App User Group.
@@ -2874,6 +2986,18 @@ Module Contents
+ .. py:method:: get_app_user_group_from_name(name = None)
+
+ Gets an App User Group by name.
+
+ :param name: The name of the app user group to retrieve.
+ :type name: str
+
+ :returns: The app user group with the specified name.
+ :rtype: AppUserGroup
+
+
+
.. py:method:: describe_external_application(external_application_id)
Describes an External Application.
@@ -3067,6 +3191,14 @@ Module Contents
+ .. py:method:: list_route_llm_models()
+
+ This function is used to list all the models and their rates, for routellm api.
+
+ :returns: list of dicts, each dict contains the model name, description, id, and api price info.
+
+
+
.. py:function:: get_source_code_info(train_function, predict_function = None, predict_many_function = None, initialize_function = None, common_functions = None)
.. py:function:: get_module_code_from_notebook(file_path)
@@ -3107,6 +3239,47 @@ Module Contents
+ .. py:method:: get_assignments_online_with_new_inputs(deployment_token, deployment_id, assignments_df = None, constraints_df = None, constraint_equations_df = None, feature_mapping_dict = None, solve_time_limit_seconds = None, optimality_gap_limit = None)
+
+ Get alternative positive assignments for given query. Optimal assignments are ignored and the alternative assignments are returned instead.
+
+ :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ :type deployment_token: str
+ :param deployment_id: The unique identifier of a deployment created under the project.
+ :type deployment_id: ID
+ :param assignments_df: A dataframe with all the variables involved in the optimization problem
+ :type assignments_df: pd.DataFrame
+ :param constraints_df: A dataframe of individual constraints, and variables in them
+ :type constraints_df: pd.DataFrame
+ :param constraint_equations_df: A dataframe which tells us about the operator / constant / penalty etc of a constraint
+ This gives us some data which is needed to make sense of the constraints_df.
+ :type constraint_equations_df: pd.DataFrame
+ :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
+ :type solve_time_limit_seconds: float
+ :param optimality_gap_limit: Optimality gap we want to come within, after which we accept the solution as valid. (0 means we only want an optimal solution). it is abs(best_solution_found - best_bound) / abs(best_solution_found)
+ :type optimality_gap_limit: float
+
+ :returns: The assignments for a given query.
+ :rtype: Dict
+
+
+
+ .. py:method:: get_optimization_input_dataframes_with_new_inputs(deployment_token, deployment_id, query_data)
+
+ Get assignments for given query, with new inputs
+
+ :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ :type deployment_token: str
+ :param deployment_id: The unique identifier of a deployment created under the project.
+ :type deployment_id: str
+ :param query_data: a dictionary with various key: value pairs corresponding to various updated FGs in the FG tree, which we want to update to compute new top level FGs for online solve. values are dataframes and keys are their names. Names should be same as the ones used during training.
+ :type query_data: dict
+
+ :returns: The output dataframes for a given query. These will be in serialized form. So, effectively we would have a dict of keys, and serialized dataframes.
+ :rtype: Dict
+
+
+
.. py:method:: create_dataset_version_from_pandas(table_name_or_id, df, clean_column_names = False)
[Deprecated]
@@ -3370,7 +3543,7 @@ Module Contents
- .. py:method:: create_python_function_from_function(name, function, function_variable_mappings = None, package_requirements = None, function_type = PythonFunctionType.FEATURE_GROUP.value)
+ .. py:method:: create_python_function_from_function(name, function, function_variable_mappings = None, package_requirements = None, function_type = PythonFunctionType.FEATURE_GROUP.value, description = None)
Creates a custom Python function
@@ -3384,6 +3557,8 @@ Module Contents
:type package_requirements: List
:param function_type: Type of Python function to create. Default is FEATURE_GROUP, but can also be PLOTLY_FIG.
:type function_type: PythonFunctionType
+ :param description: Description of the Python function.
+ :type description: str
@@ -3677,7 +3852,7 @@ Module Contents
.. py:method:: execute_workflow_node(node, inputs)
- Execute the workflow node given input arguments.
+ Execute the workflow node given input arguments. This is to be used for testing purposes only.
:param node: The workflow node to be executed.
:type node: WorkflowGraphNode
@@ -3754,6 +3929,15 @@ Module Contents
+ .. py:method:: _get_agent_client_type()
+
+ Returns the client type for the current request context.
+
+ :returns: The client type for the current request context.
+ :rtype: AgentClientType
+
+
+
.. py:method:: get_agent_context_chat_history()
Gets a history of chat messages from the current request context. Applicable within a AIAgent
@@ -3871,7 +4055,7 @@ Module Contents
.. py:method:: get_agent_context_user_info()
- Gets information about the user interacting with the agent.
+ Gets information about the user interacting with the agent and user action if applicable.
Applicable within a AIAgent execute function.
:returns: Containing email and name of the end user.
@@ -3879,14 +4063,16 @@ Module Contents
- .. py:method:: get_agent_runtime_config(key)
+ .. py:method:: get_runtime_config(key)
- Gets the deployment level runtime config for the agent
+ Retrieve the value of a specified configuration key from the deployment's runtime settings.
+ These settings can be configured in the deployment details page in the UI.
+ Currently supported for AI Agents, Custom Python Model and Prediction Operators.
- :param key: Key for which the config value is to be fetched
+ :param key: The configuration key whose value is to be fetched.
:type key: str
- :returns: Config value for the input key
+ :returns: The value associated with the specified configuration key, or None if the key does not exist.
:rtype: str
@@ -3970,9 +4156,9 @@ Module Contents
- .. py:method:: _get_agent_async_app_caller()
+ .. py:method:: _get_agent_caller()
- Gets the caller for the current request context of async app. Applicable within a AIAgent execute function.
+ Gets the caller for the current request context. Applicable within a AIAgent execute function.
:returns: The caller for the current request being processed by the Agent.
:rtype: str
@@ -3981,9 +4167,18 @@ Module Contents
.. py:method:: _is_proxy_app_caller()
- Gets the caller for the current request context of async app. Applicable within a AIAgent execute function.
+ Checks if the caller is cluster-proxy app.
+
+ :returns: True if the caller is cluster-proxy app.
+ :rtype: bool
+
+
+
+ .. py:method:: _is_async_app_caller()
- :returns: True if the caller is proxy app.
+ Checks if the caller is async app.
+
+ :returns: True if the caller is async app.
:rtype: bool
@@ -4192,7 +4387,7 @@ Module Contents
- .. py:method:: get_streaming_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, ignore_documents = False, include_search_results = False)
+ .. py:method:: get_streaming_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, ignore_documents = False, include_search_results = False, user_info = None)
Return an asynchronous generator which continues the conversation based on the input messages and search results.
@@ -4220,10 +4415,12 @@ Module Contents
:type ignore_documents: bool
:param include_search_results: If True, will also return search results, if relevant.
:type include_search_results: bool
+ :param user_info: The information of the user to act on behalf of for user restricted data sources.
+ :type user_info: dict
- .. py:method:: get_streaming_conversation_response(deployment_token, deployment_id, message, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, ignore_documents = False, include_search_results = False)
+ .. py:method:: get_streaming_conversation_response(deployment_token, deployment_id, message, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, ignore_documents = False, include_search_results = False, user_info = None)
Return an asynchronous generator which continues the conversation based on the input messages and search results.
@@ -4255,6 +4452,8 @@ Module Contents
:type ignore_documents: bool
:param include_search_results: If True, will also return search results, if relevant.
:type include_search_results: bool
+ :param user_info: The information of the user to act on behalf of for user restricted data sources.
+ :type user_info: dict
@@ -4363,6 +4562,30 @@ Module Contents
+ .. py:method:: execute_agent_with_binary_data(deployment_token, deployment_id, arguments = None, keyword_arguments = None, deployment_conversation_id = None, external_session_id = None, blobs = None)
+
+ Executes a deployed AI agent function with binary data as inputs.
+
+ :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
+ :type deployment_token: str
+ :param deployment_id: A unique string identifier for the deployment created under the project.
+ :type deployment_id: str
+ :param arguments: Positional arguments to the agent execute function.
+ :type arguments: list
+ :param keyword_arguments: A dictionary where each 'key' represents the parameter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
+ :type keyword_arguments: dict
+ :param deployment_conversation_id: A unique string identifier for the deployment conversation used for the conversation.
+ :type deployment_conversation_id: str
+ :param external_session_id: A unique string identifier for the session used for the conversation. If both deployment_conversation_id and external_session_id are not provided, a new session will be created.
+ :type external_session_id: str
+ :param blobs: A dictionary of binary data to use as inputs to the agent execute function.
+ :type blobs: None
+
+ :returns: The result of the agent execution
+ :rtype: AgentDataExecutionResult
+
+
+
.. py:method:: add_user_to_organization(email)
Invite a user to your organization. This method will send the specified email address an invitation link to join your organization.
@@ -4485,6 +4708,17 @@ Module Contents
+ .. py:method:: set_user_role_to_platform(workspace, email)
+
+ Set the user's role to have the Abacus.AI platform access.
+
+ :param workspace: The workspace for the user that's getting platform access.
+ :type workspace: str
+ :param email: The email for the user that's getting platform access.
+ :type email: str
+
+
+
.. py:method:: create_deployment_webhook(deployment_id, endpoint, webhook_event_type, payload_template = None)
Create a webhook attached to a given deployment ID.
@@ -4533,7 +4767,7 @@ Module Contents
:param name: The project's name.
:type name: str
- :param use_case: The use case that the project solves. Refer to our [guide on use cases](https://api.abacus.ai/app/help/useCases) for further details of each use case. The following enums are currently available for you to choose from: LANGUAGE_DETECTION, NLP_SENTIMENT, NLP_SEARCH, NLP_CHAT, CHAT_LLM, NLP_SENTENCE_BOUNDARY_DETECTION, NLP_CLASSIFICATION, NLP_SUMMARIZATION, NLP_DOCUMENT_VISUALIZATION, AI_AGENT, EMBEDDINGS_ONLY, MODEL_WITH_EMBEDDINGS, TORCH_MODEL, TORCH_MODEL_WITH_EMBEDDINGS, PYTHON_MODEL, NOTEBOOK_PYTHON_MODEL, DOCKER_MODEL, DOCKER_MODEL_WITH_EMBEDDINGS, CUSTOMER_CHURN, ENERGY, EVENT_ANOMALY_DETECTION, FINANCIAL_METRICS, CUMULATIVE_FORECASTING, FRAUD_ACCOUNT, FRAUD_TRANSACTIONS, CLOUD_SPEND, TIMESERIES_ANOMALY, OPERATIONS_MAINTENANCE, PERS_PROMOTIONS, PREDICTING, FEATURE_STORE, RETAIL, SALES_FORECASTING, SALES_SCORING, FEED_RECOMMEND, USER_RANKINGS, NAMED_ENTITY_RECOGNITION, USER_RECOMMENDATIONS, USER_RELATED, VISION, VISION_REGRESSION, VISION_OBJECT_DETECTION, FEATURE_DRIFT, SCHEDULING, GENERIC_FORECASTING, PRETRAINED_IMAGE_TEXT_DESCRIPTION, PRETRAINED_SPEECH_RECOGNITION, PRETRAINED_STYLE_TRANSFER, PRETRAINED_TEXT_TO_IMAGE_GENERATION, PRETRAINED_OCR_DOCUMENT_TO_TEXT, THEME_ANALYSIS, CLUSTERING, CLUSTERING_TIMESERIES, FINETUNED_LLM, PRETRAINED_INSTRUCT_PIX2PIX, PRETRAINED_TEXT_CLASSIFICATION.
+ :param use_case: The use case that the project solves. Refer to our [guide on use cases](https://api.abacus.ai/app/help/developer-platform/useCases) for further details of each use case. The following enums are currently available for you to choose from: LANGUAGE_DETECTION, NLP_SENTIMENT, NLP_SEARCH, NLP_CHAT, CHAT_LLM, NLP_SENTENCE_BOUNDARY_DETECTION, NLP_CLASSIFICATION, NLP_SUMMARIZATION, NLP_DOCUMENT_VISUALIZATION, AI_AGENT, EMBEDDINGS_ONLY, MODEL_WITH_EMBEDDINGS, TORCH_MODEL, TORCH_MODEL_WITH_EMBEDDINGS, PYTHON_MODEL, NOTEBOOK_PYTHON_MODEL, DOCKER_MODEL, DOCKER_MODEL_WITH_EMBEDDINGS, CUSTOMER_CHURN, ENERGY, EVENT_ANOMALY_DETECTION, FINANCIAL_METRICS, CUMULATIVE_FORECASTING, FRAUD_ACCOUNT, FRAUD_TRANSACTIONS, CLOUD_SPEND, TIMESERIES_ANOMALY, OPERATIONS_MAINTENANCE, PERS_PROMOTIONS, PREDICTING, FEATURE_STORE, RETAIL, SALES_FORECASTING, SALES_SCORING, FEED_RECOMMEND, USER_RANKINGS, NAMED_ENTITY_RECOGNITION, USER_RECOMMENDATIONS, USER_RELATED, VISION, VISION_REGRESSION, VISION_OBJECT_DETECTION, FEATURE_DRIFT, SCHEDULING, GENERIC_FORECASTING, PRETRAINED_IMAGE_TEXT_DESCRIPTION, PRETRAINED_SPEECH_RECOGNITION, PRETRAINED_STYLE_TRANSFER, PRETRAINED_TEXT_TO_IMAGE_GENERATION, PRETRAINED_OCR_DOCUMENT_TO_TEXT, THEME_ANALYSIS, CLUSTERING, CLUSTERING_TIMESERIES, FINETUNED_LLM, PRETRAINED_INSTRUCT_PIX2PIX, PRETRAINED_TEXT_CLASSIFICATION.
:type use_case: str
:returns: This object represents the newly created project.
@@ -4541,6 +4775,27 @@ Module Contents
+ .. py:method:: list_projects_dashboard(updated_filter = None, limit = 50, since_project_id = None, search = None, starred = None, tag = None, tags = None)
+
+ Retrieves a list of projects for dashboard display with filtering and pagination support.
+
+ :param updated_filter: Unix timestamp to filter projects updated after this time.
+ :type updated_filter: int
+ :param limit: The maximum number of projects to return (default: 50).
+ :type limit: int
+ :param since_project_id: The ID of the project after which the list starts for pagination.
+ :type since_project_id: str
+ :param search: Search term to filter projects by name, creator email, or use case.
+ :type search: str
+ :param starred: Filter by starred status (True for starred, False for non-starred).
+ :type starred: bool
+ :param tag: Filter by a single tag (deprecated, use tags instead).
+ :type tag: str
+ :param tags: List of tags to filter projects by.
+ :type tags: list
+
+
+
.. py:method:: rename_project(project_id, name)
This method renames a project after it is created.
@@ -4592,6 +4847,23 @@ Module Contents
+ .. py:method:: get_raw_data_from_realtime_dataset(dataset_id, check_permissions = False, start_time = None, end_time = None, column_filter = None)
+
+ Returns raw data from a realtime dataset. Only Microsoft Teams datasets are supported currently due to data size constraints in realtime datasets.
+
+ :param dataset_id: The unique ID associated with the dataset.
+ :type dataset_id: str
+ :param check_permissions: If True, checks user permissions using session email.
+ :type check_permissions: bool
+ :param start_time: Start time filter (inclusive) for created_date_time_t in ISO 8601 format (e.g. 2025-05-13T08:25:11Z or 2025-05-13T08:25:11+00:00).
+ :type start_time: str
+ :param end_time: End time filter (inclusive) for created_date_time_t in ISO 8601 format (e.g. 2025-05-13T08:25:11Z or 2025-05-13T08:25:11+00:00).
+ :type end_time: str
+ :param column_filter: Dictionary mapping column names to filter values. Only rows matching all column filters will be returned.
+ :type column_filter: dict
+
+
+
.. py:method:: add_feature_group_to_project(feature_group_id, project_id, feature_group_type = 'CUSTOM_TABLE')
Adds a feature group to a project.
@@ -4662,6 +4934,19 @@ Module Contents
+ .. py:method:: add_project_scope_for_user(project_id, email, scope)
+
+ Add a user to a project.
+
+ :param project_id: The project's id.
+ :type project_id: str
+ :param email: The user's email.
+ :type email: str
+ :param scope: The list of project scopes.
+ :type scope: list
+
+
+
.. py:method:: add_annotation(annotation, feature_group_id, feature_name, doc_id = None, feature_group_row_identifier = None, annotation_source = 'ui', status = None, comments = None, project_id = None, save_metadata = False, pages = None)
Add an annotation entry to the database.
@@ -4876,16 +5161,17 @@ Module Contents
Creates a new feature group defined as the union of other feature group versions.
- Args:
- source_feature_group_id (str): Unique string identifier corresponding to the dataset feature group that will have its versions merged into this feature group.
- table_name (str): Unique string identifier to be given to this merge feature group. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
- merge_config (MergeConfig): JSON object defining the merging method and its parameters.
- description (str): Human-readable description of this feature group.
+ :param source_feature_group_id: Unique string identifier corresponding to the dataset feature group that will have its versions merged into this feature group.
+ :type source_feature_group_id: str
+ :param table_name: Unique string identifier to be given to this merge feature group. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
+ :type table_name: str
+ :param merge_config: JSON object defining the merging method and its parameters.
+ :type merge_config: MergeConfig
+ :param description: Human-readable description of this feature group.
+ :type description: str
- Returns:
- FeatureGroup: The created feature group.
- Description:
- Creates a new feature group defined as the union of other feature group versions.
+ :returns: The created feature group.
+ :rtype: FeatureGroup
@@ -5652,7 +5938,7 @@ Module Contents
:type database_connector_id: str
:param object_name: Name of the database object to write to.
:type object_name: str
- :param write_mode: Enum string indicating whether to use INSERT or UPSERT.
+ :param write_mode: Enum string indicating whether to use INSERT or UPSERT or REPLACE.
:type write_mode: str
:param database_feature_mapping: Key/value pair JSON object of "database connector column" -> "feature name" pairs.
:type database_feature_mapping: dict
@@ -6019,7 +6305,7 @@ Module Contents
- .. py:method:: create_dataset_from_application_connector(table_name, application_connector_id, dataset_config = None, refresh_schedule = None, version_limit = 30)
+ .. py:method:: create_dataset_from_application_connector(table_name, application_connector_id, dataset_config = None, refresh_schedule = None, version_limit = 30, incremental = False)
Creates a dataset from an Application Connector.
@@ -6033,6 +6319,8 @@ Module Contents
:type refresh_schedule: str
:param version_limit: The number of recent versions to preserve for the dataset (minimum 30).
:type version_limit: int
+ :param incremental: Signifies if the dataset is an incremental dataset.
+ :type incremental: bool
:returns: The created dataset.
:rtype: Dataset
@@ -8079,7 +8367,7 @@ Module Contents
- .. py:method:: get_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None)
+ .. py:method:: get_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, user_info = None)
Return a chat response which continues the conversation based on the input messages and search results.
@@ -8135,7 +8423,7 @@ Module Contents
- .. py:method:: get_conversation_response(deployment_id, message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None)
+ .. py:method:: get_conversation_response(deployment_id, message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None, user_info = None, execute_usercode_tool = False)
Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).
@@ -8165,6 +8453,8 @@ Module Contents
:type chat_config: dict
:param doc_infos: An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
:type doc_infos: list
+ :param execute_usercode_tool: If True, will return the tool output in the response.
+ :type execute_usercode_tool: bool
@@ -8201,6 +8491,17 @@ Module Contents
+ .. py:method:: get_deep_agent_response(message, deployment_conversation_id = None)
+
+ Return a DeepAgent response with generated files if any, based on the input message.
+
+ :param message: The user's message/task for DeepAgent to complete
+ :type message: str
+ :param deployment_conversation_id: The unique identifier of a deployment conversation to continue. If not specified, a new one will be created.
+ :type deployment_conversation_id: str
+
+
+
.. py:method:: get_search_results(deployment_token, deployment_id, query_data, num = 15)
Return the most relevant search results to the search query from the uploaded documents.
@@ -8319,6 +8620,36 @@ Module Contents
+ .. py:method:: get_optimization_inputs_from_serialized(deployment_token, deployment_id, query_data = None)
+
+ Get assignments for given query, with new inputs
+
+ :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ :type deployment_token: str
+ :param deployment_id: The unique identifier of a deployment created under the project.
+ :type deployment_id: str
+ :param query_data: a dictionary with various key: value pairs corresponding to various updated FGs in the FG tree, which we want to update to compute new top level FGs for online solve. (query data will be dict of names: serialized dataframes)
+ :type query_data: dict
+
+
+
+ .. py:method:: get_assignments_online_with_new_serialized_inputs(deployment_token, deployment_id, query_data = None, solve_time_limit_seconds = None, optimality_gap_limit = None)
+
+ Get assignments for given query, with new inputs
+
+ :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ :type deployment_token: str
+ :param deployment_id: The unique identifier of a deployment created under the project.
+ :type deployment_id: str
+ :param query_data: a dictionary with assignment, constraint and constraint_equations_df (under these specific keys)
+ :type query_data: dict
+ :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
+ :type solve_time_limit_seconds: float
+ :param optimality_gap_limit: Optimality gap we want to come within, after which we accept the solution as valid. (0 means we only want an optimal solution). it is abs(best_solution_found - best_bound) / abs(best_solution_found)
+ :type optimality_gap_limit: float
+
+
+
.. py:method:: check_constraints(deployment_token, deployment_id, query_data)
Check for any constraints violated by the overrides.
@@ -8476,19 +8807,6 @@ Module Contents
- .. py:method:: generate_image(deployment_token, deployment_id, query_data)
-
- Generate an image from text prompt.
-
- :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
- :type deployment_token: str
- :param deployment_id: A unique identifier to a deployment created under the project.
- :type deployment_id: str
- :param query_data: Specifies the text prompt. For example, {'prompt': 'a cat'}
- :type query_data: dict
-
-
-
.. py:method:: execute_agent(deployment_token, deployment_id, arguments = None, keyword_arguments = None)
Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
@@ -8606,30 +8924,6 @@ Module Contents
- .. py:method:: execute_agent_with_binary_data(deployment_token, deployment_id, arguments = None, keyword_arguments = None, deployment_conversation_id = None, external_session_id = None, blobs = None)
-
- Executes a deployed AI agent function with binary data as inputs.
-
- :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
- :type deployment_token: str
- :param deployment_id: A unique string identifier for the deployment created under the project.
- :type deployment_id: str
- :param arguments: Positional arguments to the agent execute function.
- :type arguments: list
- :param keyword_arguments: A dictionary where each 'key' represents the parameter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
- :type keyword_arguments: dict
- :param deployment_conversation_id: A unique string identifier for the deployment conversation used for the conversation.
- :type deployment_conversation_id: str
- :param external_session_id: A unique string identifier for the session used for the conversation. If both deployment_conversation_id and external_session_id are not provided, a new session will be created.
- :type external_session_id: str
- :param blobs: A dictionary of binary data to use as inputs to the agent execute function.
- :type blobs: None
-
- :returns: The result of the agent execution
- :rtype: AgentDataExecutionResult
-
-
-
.. py:method:: start_autonomous_agent(deployment_token, deployment_id, arguments = None, keyword_arguments = None, save_conversations = True)
Starts a deployed Autonomous agent associated with the given deployment_conversation_id using the arguments and keyword arguments as inputs for execute function of trigger node.
@@ -9011,7 +9305,7 @@ Module Contents
- .. py:method:: create_python_function(name, source_code = None, function_name = None, function_variable_mappings = None, package_requirements = None, function_type = 'FEATURE_GROUP', description = None, examples = None)
+ .. py:method:: create_python_function(name, source_code = None, function_name = None, function_variable_mappings = None, package_requirements = None, function_type = 'FEATURE_GROUP', description = None, examples = None, user_level_connectors = None, org_level_connectors = None, output_variable_mappings = None)
Creates a custom Python function that is reusable.
@@ -9029,15 +9323,21 @@ Module Contents
:type function_type: str
:param description: Description of the Python function. This should include details about the function's purpose, expected inputs and outputs, and any important usage considerations or limitations.
:type description: str
- :param examples: Dictionary containing example use cases and anti-patterns. Should include 'positive_examples' showing recommended usage and 'negative_examples' showing cases to avoid.
+ :param examples: Dictionary containing example use cases and anti-patterns. Should include 'positive_examples' showing recommended usage and 'negative_examples' showing cases to avoid.])
:type examples: dict
+ :param user_level_connectors: Dictionary containing user level connectors.
+ :type user_level_connectors: Dict
+ :param org_level_connectors: List containing organization level connectors.
+ :type org_level_connectors: List
+ :param output_variable_mappings: List of output variable mappings that defines the elements of the function's return value.
+ :type output_variable_mappings: List
:returns: The Python function that can be used (e.g. for feature group transform).
:rtype: PythonFunction
- .. py:method:: update_python_function(name, source_code = None, function_name = None, function_variable_mappings = None, package_requirements = None, description = None, examples = None)
+ .. py:method:: update_python_function(name, source_code = None, function_name = None, function_variable_mappings = None, package_requirements = None, description = None, examples = None, user_level_connectors = None, org_level_connectors = None, output_variable_mappings = None)
Update custom python function with user inputs for the given python function.
@@ -9055,6 +9355,12 @@ Module Contents
:type description: str
:param examples: Dictionary containing example use cases and anti-patterns. Should include 'positive_examples' showing recommended usage and 'negative_examples' showing cases to avoid.
:type examples: dict
+ :param user_level_connectors: Dictionary containing user level connectors.
+ :type user_level_connectors: Dict
+ :param org_level_connectors: List of organization level connectors.
+ :type org_level_connectors: List
+ :param output_variable_mappings: List of output variable mappings that defines the elements of the function's return value.
+ :type output_variable_mappings: List
:returns: The Python function object.
:rtype: PythonFunction
@@ -9636,7 +9942,7 @@ Module Contents
- .. py:method:: create_organization_secret(secret_key, value)
+ .. py:method:: create_organization_secret(secret_key, value, secret_type = OrganizationSecretType.ORG_SECRET, metadata = None)
Creates a secret which can be accessed in functions and notebooks.
@@ -9644,6 +9950,10 @@ Module Contents
:type secret_key: str
:param value: The secret value.
:type value: str
+ :param secret_type: The type of secret. Use OrganizationSecretType enum values.
+ :type secret_type: OrganizationSecretType
+ :param metadata: Additional metadata for the secret.
+ :type metadata: dict
:returns: The created secret.
:rtype: OrganizationSecret
@@ -9829,6 +10139,49 @@ Module Contents
+ .. py:method:: add_user_group_object_permission(object_id, user_group_id, object_type, permission = 'ALL')
+
+ Add user group object permission for any object type.
+
+ :param object_id: The ID of the object (dataset_id, project_id, external_connection_id, feature_group_id, etc.)
+ :type object_id: str
+ :param user_group_id: The ID of the user group
+ :type user_group_id: str
+ :param object_type: The type of object ('dataset', 'project', 'external_connection', 'feature_group')
+ :type object_type: str
+ :param permission: The permission to grant (default: 'ALL')
+ :type permission: str
+
+
+
+ .. py:method:: update_user_group_object_permission(object_id, user_group_id, object_type, permission = 'ALL')
+
+ Update user group object permission for any object type.
+
+ :param object_id: The ID of the object (dataset_id, project_id, external_connection_id, feature_group_id, etc.)
+ :type object_id: str
+ :param user_group_id: The ID of the user group
+ :type user_group_id: str
+ :param object_type: The type of object ('dataset', 'project', 'external_connection', 'feature_group')
+ :type object_type: str
+ :param permission: The permission to grant (default: 'ALL')
+ :type permission: str
+
+
+
+ .. py:method:: remove_user_group_object_permission(object_id, user_group_id, object_type)
+
+ Remove user group object permission for any object type.
+
+ :param object_id: The ID of the object (dataset_id, project_id, external_connection_id, feature_group_id, etc.)
+ :type object_id: str
+ :param user_group_id: The ID of the user group
+ :type user_group_id: str
+ :param object_type: The type of object ('dataset', 'project', 'external_connection', 'feature_group')
+ :type object_type: str
+
+
+
.. py:method:: create_app_user_group(name)
Creates a new App User Group. This User Group is used to have permissions to access the external chatbots.
@@ -9929,6 +10282,24 @@ Module Contents
+ .. py:method:: add_developers_to_external_application(external_application_id)
+
+ Adds a permission for the platform App User Group to access an External Application.
+
+ :param external_application_id: The ID of the External Application.
+ :type external_application_id: str
+
+
+
+ .. py:method:: remove_developers_from_external_application(external_application_id)
+
+ Removes a permission for the platform App User Group to access an External Application.
+
+ :param external_application_id: The ID of the External Application.
+ :type external_application_id: str
+
+
+
.. py:method:: create_external_application(deployment_id, name = None, description = None, logo = None, theme = None)
Creates a new External Application from an existing ChatLLM Deployment.
@@ -10388,3 +10759,22 @@ Module Contents
+ .. py:method:: query_mcp_server(task, server_name = None, mcp_server_connection_id = None)
+
+ Query a Remote MCP server. For a given task, it runs the required tools of the MCP
+
+ server with appropriate arguments and returns the output.
+
+
+ :param task: a comprehensive description of the task to perform using the MCP server tools.
+ :type task: str
+ :param server_name: The name of the MCP server.
+ :type server_name: str
+ :param mcp_server_connection_id: connection id of the MCP server to query.
+ :type mcp_server_connection_id: str
+
+ :returns: The execution logs as well as the final output of the task execution.
+ :rtype: McpServerQueryResult
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/code_autocomplete_edit_prediction_response/index.rst.txt b/docs/_sources/autoapi/abacusai/code_autocomplete_edit_prediction_response/index.rst.txt
new file mode 100644
index 000000000..6fb4f96ef
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/code_autocomplete_edit_prediction_response/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.code_autocomplete_edit_prediction_response
+===================================================
+
+.. py:module:: abacusai.code_autocomplete_edit_prediction_response
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.code_autocomplete_edit_prediction_response.CodeAutocompleteEditPredictionResponse
+
+
+Module Contents
+---------------
+
+.. py:class:: CodeAutocompleteEditPredictionResponse(client, autocompleteResponse=None, showAutocomplete=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A autocomplete response from an LLM
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param autocompleteResponse: autocomplete code
+ :type autocompleteResponse: str
+ :param showAutocomplete: Whether to show autocomplete in the client
+ :type showAutocomplete: bool
+
+
+ .. py:attribute:: autocomplete_response
+ :value: None
+
+
+
+ .. py:attribute:: show_autocomplete
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/code_bot/index.rst.txt b/docs/_sources/autoapi/abacusai/code_bot/index.rst.txt
index 0808b7def..e19509f75 100644
--- a/docs/_sources/autoapi/abacusai/code_bot/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/code_bot/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: CodeBot(client, llmName=None, name=None, imageUploadSupported=None, codeAgentSupported=None, isPremium=None)
+.. py:class:: CodeBot(client, llmName=None, name=None, imageUploadSupported=None, codeAgentSupported=None, codeEditSupported=None, isPremium=None, llmBotIcon=None, provider=None, isUserApiKeyAllowed=None, isRateLimited=None, apiKeyUrl=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -32,8 +32,20 @@ Module Contents
:type imageUploadSupported: bool
:param codeAgentSupported: Whether the LLM supports code agent.
:type codeAgentSupported: bool
+ :param codeEditSupported: Whether the LLM supports code edit.
+ :type codeEditSupported: bool
:param isPremium: Whether the LLM is a premium LLM.
:type isPremium: bool
+ :param llmBotIcon: The icon of the LLM bot.
+ :type llmBotIcon: str
+ :param provider: The provider of the LLM.
+ :type provider: str
+ :param isUserApiKeyAllowed: Whether the LLM supports user API key.
+ :type isUserApiKeyAllowed: bool
+ :param isRateLimited: Whether the LLM is rate limited.
+ :type isRateLimited: bool
+ :param apiKeyUrl: The URL to get the API key.
+ :type apiKeyUrl: str
.. py:attribute:: llm_name
@@ -56,11 +68,41 @@ Module Contents
+ .. py:attribute:: code_edit_supported
+ :value: None
+
+
+
.. py:attribute:: is_premium
:value: None
+ .. py:attribute:: llm_bot_icon
+ :value: None
+
+
+
+ .. py:attribute:: provider
+ :value: None
+
+
+
+ .. py:attribute:: is_user_api_key_allowed
+ :value: None
+
+
+
+ .. py:attribute:: is_rate_limited
+ :value: None
+
+
+
+ .. py:attribute:: api_key_url
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
diff --git a/docs/_sources/autoapi/abacusai/code_embeddings/index.rst.txt b/docs/_sources/autoapi/abacusai/code_embeddings/index.rst.txt
new file mode 100644
index 000000000..4778aae1d
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/code_embeddings/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.code_embeddings
+========================
+
+.. py:module:: abacusai.code_embeddings
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.code_embeddings.CodeEmbeddings
+
+
+Module Contents
+---------------
+
+.. py:class:: CodeEmbeddings(client, embeddings=None, chunkingScheme=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Code embeddings
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param embeddings: A dictionary mapping the file name to its embeddings.
+ :type embeddings: dict
+ :param chunkingScheme: The scheme used for chunking the embeddings.
+ :type chunkingScheme: str
+
+
+ .. py:attribute:: embeddings
+ :value: None
+
+
+
+ .. py:attribute:: chunking_scheme
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/code_llm_changed_files/index.rst.txt b/docs/_sources/autoapi/abacusai/code_llm_changed_files/index.rst.txt
new file mode 100644
index 000000000..f6ea174ed
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/code_llm_changed_files/index.rst.txt
@@ -0,0 +1,64 @@
+abacusai.code_llm_changed_files
+===============================
+
+.. py:module:: abacusai.code_llm_changed_files
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.code_llm_changed_files.CodeLlmChangedFiles
+
+
+Module Contents
+---------------
+
+.. py:class:: CodeLlmChangedFiles(client, addedFiles=None, updatedFiles=None, deletedFiles=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Code changed files
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param addedFiles: A list of added file paths.
+ :type addedFiles: list
+ :param updatedFiles: A list of updated file paths.
+ :type updatedFiles: list
+ :param deletedFiles: A list of deleted file paths.
+ :type deletedFiles: list
+
+
+ .. py:attribute:: added_files
+ :value: None
+
+
+
+ .. py:attribute:: updated_files
+ :value: None
+
+
+
+ .. py:attribute:: deleted_files
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/code_suggestion_validation_response/index.rst.txt b/docs/_sources/autoapi/abacusai/code_suggestion_validation_response/index.rst.txt
new file mode 100644
index 000000000..bbcf41cdc
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/code_suggestion_validation_response/index.rst.txt
@@ -0,0 +1,50 @@
+abacusai.code_suggestion_validation_response
+============================================
+
+.. py:module:: abacusai.code_suggestion_validation_response
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.code_suggestion_validation_response.CodeSuggestionValidationResponse
+
+
+Module Contents
+---------------
+
+.. py:class:: CodeSuggestionValidationResponse(client, isValid=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A response from an LLM to validate a code suggestion.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param isValid: Whether the code suggestion is valid.
+ :type isValid: bool
+
+
+ .. py:attribute:: is_valid
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/code_summary_response/index.rst.txt b/docs/_sources/autoapi/abacusai/code_summary_response/index.rst.txt
new file mode 100644
index 000000000..7327aa747
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/code_summary_response/index.rst.txt
@@ -0,0 +1,50 @@
+abacusai.code_summary_response
+==============================
+
+.. py:module:: abacusai.code_summary_response
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.code_summary_response.CodeSummaryResponse
+
+
+Module Contents
+---------------
+
+.. py:class:: CodeSummaryResponse(client, summary=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A summary response from an LLM
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param summary: The summary of the code.
+ :type summary: str
+
+
+ .. py:attribute:: summary
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/codellm_embedding_constants/index.rst.txt b/docs/_sources/autoapi/abacusai/codellm_embedding_constants/index.rst.txt
new file mode 100644
index 000000000..116f7fd8f
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/codellm_embedding_constants/index.rst.txt
@@ -0,0 +1,78 @@
+abacusai.codellm_embedding_constants
+====================================
+
+.. py:module:: abacusai.codellm_embedding_constants
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.codellm_embedding_constants.CodellmEmbeddingConstants
+
+
+Module Contents
+---------------
+
+.. py:class:: CodellmEmbeddingConstants(client, maxSupportedWorkspaceFiles=None, maxSupportedWorkspaceChunks=None, maxConcurrentRequests=None, fileExtensionToChunkingScheme=None, idleTimeoutSeconds=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A dictionary of constants to be used in the autocomplete.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param maxSupportedWorkspaceFiles: Max supported workspace files
+ :type maxSupportedWorkspaceFiles: int
+ :param maxSupportedWorkspaceChunks: Max supported workspace chunks
+ :type maxSupportedWorkspaceChunks: int
+ :param maxConcurrentRequests: Max concurrent requests
+ :type maxConcurrentRequests: int
+ :param fileExtensionToChunkingScheme: Map between the file extensions and their chunking schema
+ :type fileExtensionToChunkingScheme: dict
+ :param idleTimeoutSeconds: The idle timeout without any activity before the workspace is refreshed.
+ :type idleTimeoutSeconds: int
+
+
+ .. py:attribute:: max_supported_workspace_files
+ :value: None
+
+
+
+ .. py:attribute:: max_supported_workspace_chunks
+ :value: None
+
+
+
+ .. py:attribute:: max_concurrent_requests
+ :value: None
+
+
+
+ .. py:attribute:: file_extension_to_chunking_scheme
+ :value: None
+
+
+
+ .. py:attribute:: idle_timeout_seconds
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/compute_point_info/index.rst.txt b/docs/_sources/autoapi/abacusai/compute_point_info/index.rst.txt
index c4f78b8b9..30d5bade3 100644
--- a/docs/_sources/autoapi/abacusai/compute_point_info/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/compute_point_info/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: ComputePointInfo(client, updatedAt=None, last24HoursUsage=None, last7DaysUsage=None, currMonthAvailPoints=None, currMonthUsage=None, lastThrottlePopUp=None)
+.. py:class:: ComputePointInfo(client, updatedAt=None, last24HoursUsage=None, last7DaysUsage=None, currMonthAvailPoints=None, currMonthUsage=None, lastThrottlePopUp=None, alwaysDisplay=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -36,6 +36,8 @@ Module Contents
:type currMonthUsage: int
:param lastThrottlePopUp: The last time the organization was throttled
:type lastThrottlePopUp: str
+ :param alwaysDisplay: Whether to always display the compute point toggle
+ :type alwaysDisplay: bool
.. py:attribute:: updated_at
@@ -68,6 +70,11 @@ Module Contents
+ .. py:attribute:: always_display
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
diff --git a/docs/_sources/autoapi/abacusai/constants_autocomplete_response/index.rst.txt b/docs/_sources/autoapi/abacusai/constants_autocomplete_response/index.rst.txt
new file mode 100644
index 000000000..2bc1601b7
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/constants_autocomplete_response/index.rst.txt
@@ -0,0 +1,316 @@
+abacusai.constants_autocomplete_response
+========================================
+
+.. py:module:: abacusai.constants_autocomplete_response
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.constants_autocomplete_response.ConstantsAutocompleteResponse
+
+
+Module Contents
+---------------
+
+.. py:class:: ConstantsAutocompleteResponse(client, maxPendingRequests=None, acceptanceDelay=None, debounceDelay=None, recordUserAction=None, validateSuggestion=None, validationLinesThreshold=None, maxTrackedRecentChanges=None, diffThreshold=None, derivativeThreshold=None, defaultSurroundingLines=None, maxTrackedVisitChanges=None, selectionCooldownMs=None, viewingCooldownMs=None, maxLines=None, editCooldownMs=None, scrollDebounceMs=None, lspDeadline=None, diagnosticsThreshold=None, diagnosticEachThreshold=None, numVsCodeSuggestions=None, minReindexingInterval=None, minRefreshSummaryInterval=None, summaryBatchSize=None, jobReorderInterval=None, stopRapidChanges=None, delaySummaryBatches=None, delaySummaryBatchesRateLimit=None, maxSymbolsFuzzyMatch=None, fuzzySymbolMatchThreshold=None, symbolsCacheUpdateInterval=None, symbolsStorageUpdateInterval=None, editPredictionSimilarityThreshold=None, minSearchWordLength=None, maxOccurrencesPerWord=None, maxWordsContentMatches=None, editPredictionEnabled=None, snapshotIntervalMs=None, linesForSnapshot=None, embeddingConstants=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A dictionary of constants to be used in the autocomplete.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param maxPendingRequests: The maximum number of pending requests.
+ :type maxPendingRequests: int
+ :param acceptanceDelay: The acceptance delay.
+ :type acceptanceDelay: int
+ :param debounceDelay: The debounce delay.
+ :type debounceDelay: int
+ :param recordUserAction: Whether to record user action.
+ :type recordUserAction: bool
+ :param validateSuggestion: Whether to validate the suggestion.
+ :type validateSuggestion: bool
+ :param validationLinesThreshold: The number of lines to validate the suggestion.
+ :type validationLinesThreshold: int
+ :param maxTrackedRecentChanges: The maximum number of recent file changes to track.
+ :type maxTrackedRecentChanges: int
+ :param diffThreshold: The diff operations threshold.
+ :type diffThreshold: int
+ :param derivativeThreshold: The derivative threshold for deletions
+ :type derivativeThreshold: int
+ :param defaultSurroundingLines: The default number of surrounding lines to include in the recently visited context.
+ :type defaultSurroundingLines: int
+ :param maxTrackedVisitChanges: The maximum number of recently visited ranges to track.
+ :type maxTrackedVisitChanges: int
+ :param selectionCooldownMs: The cooldown time in milliseconds for selection changes.
+ :type selectionCooldownMs: int
+ :param viewingCooldownMs: The cooldown time in milliseconds for viewing changes.
+ :type viewingCooldownMs: int
+ :param maxLines: The maximum number of lines to include in recently visited context.
+ :type maxLines: int
+ :param editCooldownMs: The cooldown time in milliseconds after last edit.
+ :type editCooldownMs: int
+ :param scrollDebounceMs: The debounce time in milliseconds for scroll events.
+ :type scrollDebounceMs: int
+ :param lspDeadline: The deadline in milliseconds for LSP context.
+ :type lspDeadline: int
+ :param diagnosticsThreshold: The max number of diagnostics to show.
+ :type diagnosticsThreshold: int
+ :param diagnosticEachThreshold: The max number of characters to show for each diagnostic type.
+ :type diagnosticEachThreshold: int
+ :param numVsCodeSuggestions: The number of VS Code suggestions to show.
+ :type numVsCodeSuggestions: int
+ :param minReindexingInterval: The minimum interval between reindexes in ms.
+ :type minReindexingInterval: int
+ :param minRefreshSummaryInterval: The minimum interval between refresh summary in ms.
+ :type minRefreshSummaryInterval: int
+ :param summaryBatchSize: The batch size for code summary in autocomplete.
+ :type summaryBatchSize: int
+ :param jobReorderInterval: The interval in ms to reorder jobs in the job queue for summary.
+ :type jobReorderInterval: int
+ :param stopRapidChanges: Whether to stop rapid changes in autocomplete.
+ :type stopRapidChanges: bool
+ :param delaySummaryBatches: The delay in ms between summary batches.
+ :type delaySummaryBatches: int
+ :param delaySummaryBatchesRateLimit: The delay in ms in case of rate limit for delay summary batches.
+ :type delaySummaryBatchesRateLimit: int
+ :param maxSymbolsFuzzyMatch: The max number of symbols to fuzzy match.
+ :type maxSymbolsFuzzyMatch: int
+ :param fuzzySymbolMatchThreshold: The threshold for fuzzy symbol match.
+ :type fuzzySymbolMatchThreshold: int
+ :param symbolsCacheUpdateInterval: The interval in ms to update the symbols cache.
+ :type symbolsCacheUpdateInterval: int
+ :param symbolsStorageUpdateInterval: The interval in ms to update the symbols storage.
+ :type symbolsStorageUpdateInterval: int
+ :param editPredictionSimilarityThreshold: The threshold for edit prediction similarity.
+ :type editPredictionSimilarityThreshold: int
+ :param minSearchWordLength: The minimum length of the word to be searched.
+ :type minSearchWordLength: int
+ :param maxOccurrencesPerWord: The maximum occurrences of a particular search word present in the file.
+ :type maxOccurrencesPerWord: int
+ :param maxWordsContentMatches: The maximum number of content matches from the client.
+ :type maxWordsContentMatches: int
+ :param editPredictionEnabled: Whether to enable edit prediction.
+ :type editPredictionEnabled: bool
+ :param snapshotIntervalMs: The interval in ms to snapshot the file for recent file changes.
+ :type snapshotIntervalMs: int
+ :param linesForSnapshot: Limit of max number of lines to snapshot for recent file changes.
+ :type linesForSnapshot: int
+ :param embeddingConstants: Embedding constants
+ :type embeddingConstants: codellmembeddingconstants
+
+
+ .. py:attribute:: max_pending_requests
+ :value: None
+
+
+
+ .. py:attribute:: acceptance_delay
+ :value: None
+
+
+
+ .. py:attribute:: debounce_delay
+ :value: None
+
+
+
+ .. py:attribute:: record_user_action
+ :value: None
+
+
+
+ .. py:attribute:: validate_suggestion
+ :value: None
+
+
+
+ .. py:attribute:: validation_lines_threshold
+ :value: None
+
+
+
+ .. py:attribute:: max_tracked_recent_changes
+ :value: None
+
+
+
+ .. py:attribute:: diff_threshold
+ :value: None
+
+
+
+ .. py:attribute:: derivative_threshold
+ :value: None
+
+
+
+ .. py:attribute:: default_surrounding_lines
+ :value: None
+
+
+
+ .. py:attribute:: max_tracked_visit_changes
+ :value: None
+
+
+
+ .. py:attribute:: selection_cooldown_ms
+ :value: None
+
+
+
+ .. py:attribute:: viewing_cooldown_ms
+ :value: None
+
+
+
+ .. py:attribute:: max_lines
+ :value: None
+
+
+
+ .. py:attribute:: edit_cooldown_ms
+ :value: None
+
+
+
+ .. py:attribute:: scroll_debounce_ms
+ :value: None
+
+
+
+ .. py:attribute:: lsp_deadline
+ :value: None
+
+
+
+ .. py:attribute:: diagnostics_threshold
+ :value: None
+
+
+
+ .. py:attribute:: diagnostic_each_threshold
+ :value: None
+
+
+
+ .. py:attribute:: num_vs_code_suggestions
+ :value: None
+
+
+
+ .. py:attribute:: min_reindexing_interval
+ :value: None
+
+
+
+ .. py:attribute:: min_refresh_summary_interval
+ :value: None
+
+
+
+ .. py:attribute:: summary_batch_size
+ :value: None
+
+
+
+ .. py:attribute:: job_reorder_interval
+ :value: None
+
+
+
+ .. py:attribute:: stop_rapid_changes
+ :value: None
+
+
+
+ .. py:attribute:: delay_summary_batches
+ :value: None
+
+
+
+ .. py:attribute:: delay_summary_batches_rate_limit
+ :value: None
+
+
+
+ .. py:attribute:: max_symbols_fuzzy_match
+ :value: None
+
+
+
+ .. py:attribute:: fuzzy_symbol_match_threshold
+ :value: None
+
+
+
+ .. py:attribute:: symbols_cache_update_interval
+ :value: None
+
+
+
+ .. py:attribute:: symbols_storage_update_interval
+ :value: None
+
+
+
+ .. py:attribute:: edit_prediction_similarity_threshold
+ :value: None
+
+
+
+ .. py:attribute:: min_search_word_length
+ :value: None
+
+
+
+ .. py:attribute:: max_occurrences_per_word
+ :value: None
+
+
+
+ .. py:attribute:: max_words_content_matches
+ :value: None
+
+
+
+ .. py:attribute:: edit_prediction_enabled
+ :value: None
+
+
+
+ .. py:attribute:: snapshot_interval_ms
+ :value: None
+
+
+
+ .. py:attribute:: lines_for_snapshot
+ :value: None
+
+
+
+ .. py:attribute:: embedding_constants
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/custom_chat_instructions/index.rst.txt b/docs/_sources/autoapi/abacusai/custom_chat_instructions/index.rst.txt
index 657d8c906..fe8e15150 100644
--- a/docs/_sources/autoapi/abacusai/custom_chat_instructions/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/custom_chat_instructions/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: CustomChatInstructions(client, userInformationInstructions=None, responseInstructions=None, enableCodeExecution=None, enableImageGeneration=None, enableWebSearch=None, enablePlayground=None, experimentalFeatures=None)
+.. py:class:: CustomChatInstructions(client, userInformationInstructions=None, responseInstructions=None, enableCodeExecution=None, enableImageGeneration=None, enableWebSearch=None, enablePlayground=None, enableMemories=None, experimentalFeatures=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -36,6 +36,8 @@ Module Contents
:type enableWebSearch: bool
:param enablePlayground: Whether or not playground is enabled.
:type enablePlayground: bool
+ :param enableMemories: Whether or not memories are enabled.
+ :type enableMemories: bool
:param experimentalFeatures: Experimental features.
:type experimentalFeatures: dict
@@ -70,6 +72,11 @@ Module Contents
+ .. py:attribute:: enable_memories
+ :value: None
+
+
+
.. py:attribute:: experimental_features
:value: None
diff --git a/docs/_sources/autoapi/abacusai/custom_domain/index.rst.txt b/docs/_sources/autoapi/abacusai/custom_domain/index.rst.txt
new file mode 100644
index 000000000..bcf22b7d8
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/custom_domain/index.rst.txt
@@ -0,0 +1,71 @@
+abacusai.custom_domain
+======================
+
+.. py:module:: abacusai.custom_domain
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.custom_domain.CustomDomain
+
+
+Module Contents
+---------------
+
+.. py:class:: CustomDomain(client, status=None, message=None, expectedNameservers=None, currentNameservers=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Result of adding a custom domain to a hosted app
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param status: Whether the custom domain was added successfully
+ :type status: bool
+ :param message: The message from the custom domain
+ :type message: str
+ :param expectedNameservers: The expected nameservers for the custom domain
+ :type expectedNameservers: list
+ :param currentNameservers: The current nameservers for the custom domain
+ :type currentNameservers: list
+
+
+ .. py:attribute:: status
+ :value: None
+
+
+
+ .. py:attribute:: message
+ :value: None
+
+
+
+ .. py:attribute:: expected_nameservers
+ :value: None
+
+
+
+ .. py:attribute:: current_nameservers
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/daemon_task_conversation/index.rst.txt b/docs/_sources/autoapi/abacusai/daemon_task_conversation/index.rst.txt
new file mode 100644
index 000000000..0f708ebcf
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/daemon_task_conversation/index.rst.txt
@@ -0,0 +1,78 @@
+abacusai.daemon_task_conversation
+=================================
+
+.. py:module:: abacusai.daemon_task_conversation
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.daemon_task_conversation.DaemonTaskConversation
+
+
+Module Contents
+---------------
+
+.. py:class:: DaemonTaskConversation(client, deploymentConversationId=None, createdAt=None, updatedAt=None, deploymentConversationName=None, externalApplicationId=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Daemon Task Conversation
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param deploymentConversationId: The ID of the deployment conversation
+ :type deploymentConversationId: id
+ :param createdAt: The creation timestamp
+ :type createdAt: str
+ :param updatedAt: The last update timestamp
+ :type updatedAt: str
+ :param deploymentConversationName: The name of the conversation
+ :type deploymentConversationName: str
+ :param externalApplicationId: The external application ID
+ :type externalApplicationId: str
+
+
+ .. py:attribute:: deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: updated_at
+ :value: None
+
+
+
+ .. py:attribute:: deployment_conversation_name
+ :value: None
+
+
+
+ .. py:attribute:: external_application_id
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/database_connector/index.rst.txt b/docs/_sources/autoapi/abacusai/database_connector/index.rst.txt
index f58cee495..3f6ae9b19 100644
--- a/docs/_sources/autoapi/abacusai/database_connector/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/database_connector/index.rst.txt
@@ -142,3 +142,27 @@ Module Contents
+ .. py:method:: query_datallm(query)
+
+ Runs a read-only query in the specified database connector. This API is specifically designed for DataLLM
+
+ and enforces read-only access with user-specific access control.
+
+
+ :param query: The query to be run in the database connector (must be read-only).
+ :type query: str
+
+
+
+ .. py:method:: get_auth()
+
+ Get the authentication details for a given database connector.
+
+ :param database_connector_id: The unique ID associated with the database connector.
+ :type database_connector_id: str
+
+ :returns: The database connector with the authentication details.
+ :rtype: DatabaseConnector
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/dataset/index.rst.txt b/docs/_sources/autoapi/abacusai/dataset/index.rst.txt
index a7d055668..2373b1881 100644
--- a/docs/_sources/autoapi/abacusai/dataset/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/dataset/index.rst.txt
@@ -204,6 +204,21 @@ Module Contents
+ .. py:method:: get_raw_data_from_realtime(check_permissions = False, start_time = None, end_time = None, column_filter = None)
+
+ Returns raw data from a realtime dataset. Only Microsoft Teams datasets are supported currently due to data size constraints in realtime datasets.
+
+ :param check_permissions: If True, checks user permissions using session email.
+ :type check_permissions: bool
+ :param start_time: Start time filter (inclusive) for created_date_time_t in ISO 8601 format (e.g. 2025-05-13T08:25:11Z or 2025-05-13T08:25:11+00:00).
+ :type start_time: str
+ :param end_time: End time filter (inclusive) for created_date_time_t in ISO 8601 format (e.g. 2025-05-13T08:25:11Z or 2025-05-13T08:25:11+00:00).
+ :type end_time: str
+ :param column_filter: Dictionary mapping column names to filter values. Only rows matching all column filters will be returned.
+ :type column_filter: dict
+
+
+
.. py:method:: create_version_from_file_connector(location = None, file_format = None, csv_delimiter = None, merge_file_schemas = None, parsing_config = None, sql_query = None)
Creates a new version of the specified dataset.
diff --git a/docs/_sources/autoapi/abacusai/default_llm/index.rst.txt b/docs/_sources/autoapi/abacusai/default_llm/index.rst.txt
new file mode 100644
index 000000000..313f1e573
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/default_llm/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.default_llm
+====================
+
+.. py:module:: abacusai.default_llm
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.default_llm.DefaultLlm
+
+
+Module Contents
+---------------
+
+.. py:class:: DefaultLlm(client, name=None, enum=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A default LLM.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param name: The name of the LLM.
+ :type name: str
+ :param enum: The enum of the LLM.
+ :type enum: str
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: enum
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/deployment/index.rst.txt b/docs/_sources/autoapi/abacusai/deployment/index.rst.txt
index c0688686a..ffa225e48 100644
--- a/docs/_sources/autoapi/abacusai/deployment/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/deployment/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: Deployment(client, deploymentId=None, name=None, status=None, description=None, deployedAt=None, createdAt=None, projectId=None, modelId=None, modelVersion=None, featureGroupId=None, featureGroupVersion=None, callsPerSecond=None, autoDeploy=None, skipMetricsCheck=None, algoName=None, regions=None, error=None, batchStreamingUpdates=None, algorithm=None, pendingModelVersion=None, modelDeploymentConfig=None, predictionOperatorId=None, predictionOperatorVersion=None, pendingPredictionOperatorVersion=None, onlineFeatureGroupId=None, outputOnlineFeatureGroupId=None, realtimeMonitorId=None, runtimeConfigs=None, refreshSchedules={}, featureGroupExportConfig={}, defaultPredictionArguments={})
+.. py:class:: Deployment(client, deploymentId=None, name=None, status=None, description=None, deployedAt=None, createdAt=None, projectId=None, modelId=None, modelVersion=None, featureGroupId=None, featureGroupVersion=None, callsPerSecond=None, autoDeploy=None, skipMetricsCheck=None, algoName=None, regions=None, error=None, batchStreamingUpdates=None, algorithm=None, pendingModelVersion=None, modelDeploymentConfig=None, predictionOperatorId=None, predictionOperatorVersion=None, pendingPredictionOperatorVersion=None, onlineFeatureGroupId=None, outputOnlineFeatureGroupId=None, realtimeMonitorId=None, runtimeConfigs=None, isSystemCreated=None, refreshSchedules={}, featureGroupExportConfig={}, defaultPredictionArguments={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -80,6 +80,8 @@ Module Contents
:type realtimeMonitorId: id
:param runtimeConfigs: The runtime configurations of a deployment which is used by some of the usecases during prediction.
:type runtimeConfigs: dict
+ :param isSystemCreated: Whether the deployment is system created.
+ :type isSystemCreated: bool
:param refreshSchedules: A list of refresh schedules that indicate when the deployment will be updated to the latest model version.
:type refreshSchedules: RefreshSchedule
:param featureGroupExportConfig: The export config (file connector or database connector information) for feature group deployment exports.
@@ -228,6 +230,11 @@ Module Contents
+ .. py:attribute:: is_system_created
+ :value: None
+
+
+
.. py:attribute:: refresh_schedules
@@ -507,7 +514,7 @@ Module Contents
- .. py:method:: get_conversation_response(message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None)
+ .. py:method:: get_conversation_response(message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None, user_info = None, execute_usercode_tool = False)
Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).
@@ -535,6 +542,8 @@ Module Contents
:type chat_config: dict
:param doc_infos: An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
:type doc_infos: list
+ :param execute_usercode_tool: If True, will return the tool output in the response.
+ :type execute_usercode_tool: bool
@@ -697,7 +706,7 @@ Module Contents
- .. py:method:: list_conversations(external_application_id = None, conversation_type = None, fetch_last_llm_info = False)
+ .. py:method:: list_conversations(external_application_id = None, conversation_type = None, fetch_last_llm_info = False, limit = None, search = None)
Lists all conversations for the given deployment and current user.
@@ -707,6 +716,10 @@ Module Contents
:type conversation_type: DeploymentConversationType
:param fetch_last_llm_info: If true, the LLM info for the most recent conversation will be fetched. Only applicable for system-created bots.
:type fetch_last_llm_info: bool
+ :param limit: The number of conversations to return. Defaults to 600.
+ :type limit: int
+ :param search: The search query to filter conversations by title.
+ :type search: str
:returns: The deployment conversations.
:rtype: list[DeploymentConversation]
diff --git a/docs/_sources/autoapi/abacusai/deployment_conversation/index.rst.txt b/docs/_sources/autoapi/abacusai/deployment_conversation/index.rst.txt
index 2f05519a7..bea9f7549 100644
--- a/docs/_sources/autoapi/abacusai/deployment_conversation/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/deployment_conversation/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: DeploymentConversation(client, deploymentConversationId=None, name=None, deploymentId=None, createdAt=None, lastEventCreatedAt=None, hasHistory=None, externalSessionId=None, regenerateAttempt=None, externalApplicationId=None, unusedDocumentUploadIds=None, humanizeInstructions=None, conversationWarning=None, conversationType=None, metadata=None, llmDisplayName=None, llmBotIcon=None, searchSuggestions=None, chatllmTaskId=None, history={})
+.. py:class:: DeploymentConversation(client, deploymentConversationId=None, name=None, deploymentId=None, ownerUserId=None, ownerOrgId=None, createdAt=None, lastEventCreatedAt=None, hasHistory=None, externalSessionId=None, regenerateAttempt=None, externalApplicationId=None, unusedDocumentUploadIds=None, humanizeInstructions=None, conversationWarning=None, conversationType=None, metadata=None, llmDisplayName=None, llmBotIcon=None, searchSuggestions=None, chatllmTaskId=None, conversationStatus=None, computerStatus=None, filesystemStatus=None, totalEvents=None, contestNames=None, daemonTaskId=None, parentDeploymentConversationId=None, introMessage=None, previewInfo=None, latestContext=None, shareType=None, history={}, hostedArtifacts={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -30,6 +30,10 @@ Module Contents
:type name: str
:param deploymentId: The deployment id associated with the deployment conversation.
:type deploymentId: str
+ :param ownerUserId: The user id of the owner of the deployment conversation.
+ :type ownerUserId: str
+ :param ownerOrgId: The organization id of the owner of the deployment conversation.
+ :type ownerOrgId: str
:param createdAt: The timestamp at which the deployment conversation was created.
:type createdAt: str
:param lastEventCreatedAt: The timestamp at which the most recent corresponding deployment conversation event was created at.
@@ -60,8 +64,32 @@ Module Contents
:type searchSuggestions: list
:param chatllmTaskId: The chatllm task id associated with the deployment conversation.
:type chatllmTaskId: str
+ :param conversationStatus: The status of the deployment conversation (used for deep agent conversations).
+ :type conversationStatus: str
+ :param computerStatus: The status of the computer associated with the deployment conversation (used for deep agent conversations).
+ :type computerStatus: str
+ :param filesystemStatus: The status of the filesystem associated with the deployment conversation (used for deep agent conversations).
+ :type filesystemStatus: str
+ :param totalEvents: The total number of events in the deployment conversation.
+ :type totalEvents: int
+ :param contestNames: Names of contests that this deployment is a part of.
+ :type contestNames: list[str]
+ :param daemonTaskId: The daemon task id associated with the deployment conversation.
+ :type daemonTaskId: str
+ :param parentDeploymentConversationId: The parent deployment conversation id associated with the deployment conversation.
+ :type parentDeploymentConversationId: str
+ :param introMessage: The intro message for the deployment conversation.
+ :type introMessage: str
+ :param previewInfo: App preview info.
+ :type previewInfo: dict
+ :param latestContext: dict containing a list of important segments for pagination in deepagent.
+ :type latestContext: dict
+ :param shareType: The sharing status of the deployment conversation (PUBLIC or PRIVATE).
+ :type shareType: str
:param history: The history of the deployment conversation.
:type history: DeploymentConversationEvent
+ :param hostedArtifacts: Artifacts that have been deployed by this conversation.
+ :type hostedArtifacts: HostedArtifact
.. py:attribute:: deployment_conversation_id
@@ -79,6 +107,16 @@ Module Contents
+ .. py:attribute:: owner_user_id
+ :value: None
+
+
+
+ .. py:attribute:: owner_org_id
+ :value: None
+
+
+
.. py:attribute:: created_at
:value: None
@@ -154,9 +192,67 @@ Module Contents
+ .. py:attribute:: conversation_status
+ :value: None
+
+
+
+ .. py:attribute:: computer_status
+ :value: None
+
+
+
+ .. py:attribute:: filesystem_status
+ :value: None
+
+
+
+ .. py:attribute:: total_events
+ :value: None
+
+
+
+ .. py:attribute:: contest_names
+ :value: None
+
+
+
+ .. py:attribute:: daemon_task_id
+ :value: None
+
+
+
+ .. py:attribute:: parent_deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: intro_message
+ :value: None
+
+
+
+ .. py:attribute:: preview_info
+ :value: None
+
+
+
+ .. py:attribute:: latest_context
+ :value: None
+
+
+
+ .. py:attribute:: share_type
+ :value: None
+
+
+
.. py:attribute:: history
+ .. py:attribute:: hosted_artifacts
+
+
.. py:attribute:: deprecated_keys
@@ -172,7 +268,7 @@ Module Contents
- .. py:method:: get(external_session_id = None, deployment_id = None, filter_intermediate_conversation_events = True, get_unused_document_uploads = False)
+ .. py:method:: get(external_session_id = None, deployment_id = None, filter_intermediate_conversation_events = True, get_unused_document_uploads = False, start = None, limit = None, include_all_versions = False)
Gets a deployment conversation.
@@ -184,12 +280,27 @@ Module Contents
:type filter_intermediate_conversation_events: bool
:param get_unused_document_uploads: If true, unused document uploads will be returned. Default is false.
:type get_unused_document_uploads: bool
+ :param start: The start index of the conversation.
+ :type start: int
+ :param limit: The limit of the conversation.
+ :type limit: int
+ :param include_all_versions: If True, includes all versions of the last bot message for version switching functionality.
+ :type include_all_versions: bool
:returns: The deployment conversation.
:rtype: DeploymentConversation
+ .. py:method:: get_file(file_path)
+
+ Gets a deployment conversation file.
+
+ :param file_path: The path of the file to get.
+ :type file_path: str
+
+
+
.. py:method:: delete(deployment_id = None)
Delete a Deployment Conversation.
diff --git a/docs/_sources/autoapi/abacusai/deployment_conversation_event/index.rst.txt b/docs/_sources/autoapi/abacusai/deployment_conversation_event/index.rst.txt
index 9021b2544..27e41116f 100644
--- a/docs/_sources/autoapi/abacusai/deployment_conversation_event/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/deployment_conversation_event/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: DeploymentConversationEvent(client, role=None, text=None, timestamp=None, messageIndex=None, regenerateAttempt=None, modelVersion=None, searchResults=None, isUseful=None, feedback=None, feedbackType=None, docInfos=None, keywordArguments=None, inputParams=None, attachments=None, responseVersion=None, agentWorkflowNodeId=None, nextAgentWorkflowNodeId=None, chatType=None, agentResponse=None, error=None, segments=None, streamedData=None, streamedSectionData=None, highlights=None, llmDisplayName=None, llmBotIcon=None, formResponse=None, routedLlm=None, computePointsUsed=None, computerFiles=None, toolUseRequest=None, verificationSummary=None)
+.. py:class:: DeploymentConversationEvent(client, role=None, text=None, timestamp=None, messageIndex=None, regenerateAttempt=None, modelVersion=None, searchResults=None, isUseful=None, feedback=None, feedbackType=None, docInfos=None, keywordArguments=None, inputParams=None, attachments=None, responseVersion=None, agentWorkflowNodeId=None, nextAgentWorkflowNodeId=None, chatType=None, agentResponse=None, error=None, segments=None, streamedData=None, streamedSectionData=None, highlights=None, llmDisplayName=None, deepagentMode=None, llmBotIcon=None, formResponse=None, routedLlm=None, computePointsUsed=None, computerFiles=None, toolUseRequest=None, toolUseResult=None, verificationSummary=None, attachedUserFileNames=None, oldFileContent=None, allVersions=None, selectedVersionIndex=None, totalVersions=None, isLastMessage=None, allowTaskAutomation=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -74,6 +74,8 @@ Module Contents
:type highlights: dict
:param llmDisplayName: The display name of the LLM model used to generate the response. Only used for system-created bots.
:type llmDisplayName: str
+ :param deepagentMode: The Deep Agent run mode (e.g. normal or high) associated with the message.
+ :type deepagentMode: str
:param llmBotIcon: The icon location of the LLM model used to generate the response. Only used for system-created bots.
:type llmBotIcon: str
:param formResponse: Contains form data response from the user when a Form Segment is given out by the bot.
@@ -86,8 +88,24 @@ Module Contents
:type computerFiles: list
:param toolUseRequest: The tool use request for the message.
:type toolUseRequest: dict
+ :param toolUseResult: The tool use response for the message.
+ :type toolUseResult: dict
:param verificationSummary: The summary of the verification process for the message.
:type verificationSummary: str
+ :param attachedUserFileNames: The list of files attached by the user on the message.
+ :type attachedUserFileNames: list
+ :param oldFileContent: The content of the file associated with an edit tool request before the file was edited
+ :type oldFileContent: str
+ :param allVersions: All versions of the last bot message for version switching functionality.
+ :type allVersions: list
+ :param selectedVersionIndex: The index of the currently selected version of the last bot message.
+ :type selectedVersionIndex: int
+ :param totalVersions: The total number of versions available for the last bot message.
+ :type totalVersions: int
+ :param isLastMessage: Whether this message is the last message in the conversation.
+ :type isLastMessage: bool
+ :param allowTaskAutomation: Whether to show the automate task button. Only set for the last message in the conversation.
+ :type allowTaskAutomation: bool
.. py:attribute:: role
@@ -215,6 +233,11 @@ Module Contents
+ .. py:attribute:: deepagent_mode
+ :value: None
+
+
+
.. py:attribute:: llm_bot_icon
:value: None
@@ -245,11 +268,51 @@ Module Contents
+ .. py:attribute:: tool_use_result
+ :value: None
+
+
+
.. py:attribute:: verification_summary
:value: None
+ .. py:attribute:: attached_user_file_names
+ :value: None
+
+
+
+ .. py:attribute:: old_file_content
+ :value: None
+
+
+
+ .. py:attribute:: all_versions
+ :value: None
+
+
+
+ .. py:attribute:: selected_version_index
+ :value: None
+
+
+
+ .. py:attribute:: total_versions
+ :value: None
+
+
+
+ .. py:attribute:: is_last_message
+ :value: None
+
+
+
+ .. py:attribute:: allow_task_automation
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
diff --git a/docs/_sources/autoapi/abacusai/document_retriever_lookup_result/index.rst.txt b/docs/_sources/autoapi/abacusai/document_retriever_lookup_result/index.rst.txt
index b35968f7f..0be63c7d6 100644
--- a/docs/_sources/autoapi/abacusai/document_retriever_lookup_result/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/document_retriever_lookup_result/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: DocumentRetrieverLookupResult(client, document=None, score=None, properties=None, pages=None, boundingBoxes=None, documentSource=None, imageIds=None)
+.. py:class:: DocumentRetrieverLookupResult(client, document=None, score=None, properties=None, pages=None, boundingBoxes=None, documentSource=None, imageIds=None, metadata=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -38,6 +38,8 @@ Module Contents
:type documentSource: str
:param imageIds: List of Image IDs for all the pages.
:type imageIds: list
+ :param metadata: Metadata column values for the retrieved documents.
+ :type metadata: dict
.. py:attribute:: document
@@ -75,6 +77,11 @@ Module Contents
+ .. py:attribute:: metadata
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
diff --git a/docs/_sources/autoapi/abacusai/document_retriever_version_logs/index.rst.txt b/docs/_sources/autoapi/abacusai/document_retriever_version_logs/index.rst.txt
new file mode 100644
index 000000000..120ae59f3
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/document_retriever_version_logs/index.rst.txt
@@ -0,0 +1,50 @@
+abacusai.document_retriever_version_logs
+========================================
+
+.. py:module:: abacusai.document_retriever_version_logs
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.document_retriever_version_logs.DocumentRetrieverVersionLogs
+
+
+Module Contents
+---------------
+
+.. py:class:: DocumentRetrieverVersionLogs(client, logs=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Logs from document retriever version.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param logs: List of logs from document retriever version.
+ :type logs: list[str]
+
+
+ .. py:attribute:: logs
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/domain_purchase_result/index.rst.txt b/docs/_sources/autoapi/abacusai/domain_purchase_result/index.rst.txt
new file mode 100644
index 000000000..3b4e210d9
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/domain_purchase_result/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.domain_purchase_result
+===============================
+
+.. py:module:: abacusai.domain_purchase_result
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.domain_purchase_result.DomainPurchaseResult
+
+
+Module Contents
+---------------
+
+.. py:class:: DomainPurchaseResult(client, domain=None, billedCredits=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Domain purchase result
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param domain: The domain name being purchased.
+ :type domain: str
+ :param billedCredits: The total credits used for the purchase.
+ :type billedCredits: int
+
+
+ .. py:attribute:: domain
+ :value: None
+
+
+
+ .. py:attribute:: billed_credits
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/domain_search_result/index.rst.txt b/docs/_sources/autoapi/abacusai/domain_search_result/index.rst.txt
new file mode 100644
index 000000000..274732ccd
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/domain_search_result/index.rst.txt
@@ -0,0 +1,78 @@
+abacusai.domain_search_result
+=============================
+
+.. py:module:: abacusai.domain_search_result
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.domain_search_result.DomainSearchResult
+
+
+Module Contents
+---------------
+
+.. py:class:: DomainSearchResult(client, domain=None, registerCredits=None, registerYears=None, renewalCredits=None, renewalYears=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Domain search result
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param domain: name of the domain in a search result
+ :type domain: str
+ :param registerCredits: number of credits required for registration
+ :type registerCredits: int
+ :param registerYears: number of years the domain will get registered for
+ :type registerYears: int
+ :param renewalCredits: number of credits required for renewal
+ :type renewalCredits: int
+ :param renewalYears: number of years the domain will be renewed for
+ :type renewalYears: int
+
+
+ .. py:attribute:: domain
+ :value: None
+
+
+
+ .. py:attribute:: register_credits
+ :value: None
+
+
+
+ .. py:attribute:: register_years
+ :value: None
+
+
+
+ .. py:attribute:: renewal_credits
+ :value: None
+
+
+
+ .. py:attribute:: renewal_years
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/edit_image_models/index.rst.txt b/docs/_sources/autoapi/abacusai/edit_image_models/index.rst.txt
new file mode 100644
index 000000000..2447ef8d3
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/edit_image_models/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.edit_image_models
+==========================
+
+.. py:module:: abacusai.edit_image_models
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.edit_image_models.EditImageModels
+
+
+Module Contents
+---------------
+
+.. py:class:: EditImageModels(client, models=None, default=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Edit image models
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param models: The models available for edit image.
+ :type models: list
+ :param default: The default model for edit image.
+ :type default: str
+
+
+ .. py:attribute:: models
+ :value: None
+
+
+
+ .. py:attribute:: default
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/entri_auth_token/index.rst.txt b/docs/_sources/autoapi/abacusai/entri_auth_token/index.rst.txt
new file mode 100644
index 000000000..6e8ee4ca6
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/entri_auth_token/index.rst.txt
@@ -0,0 +1,64 @@
+abacusai.entri_auth_token
+=========================
+
+.. py:module:: abacusai.entri_auth_token
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.entri_auth_token.EntriAuthToken
+
+
+Module Contents
+---------------
+
+.. py:class:: EntriAuthToken(client, token=None, applicationId=None, ttl=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Entri Auth Token
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param token: The authentication token for Entri
+ :type token: str
+ :param applicationId: application Id from Entri dashboard
+ :type applicationId: str
+ :param ttl: The duration in milliseconds for which the token is valid
+ :type ttl: int
+
+
+ .. py:attribute:: token
+ :value: None
+
+
+
+ .. py:attribute:: application_id
+ :value: None
+
+
+
+ .. py:attribute:: ttl
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/external_application/index.rst.txt b/docs/_sources/autoapi/abacusai/external_application/index.rst.txt
index 767ce4bdd..df953fc9e 100644
--- a/docs/_sources/autoapi/abacusai/external_application/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/external_application/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: ExternalApplication(client, name=None, externalApplicationId=None, deploymentId=None, description=None, logo=None, theme=None, userGroupIds=None, useCase=None, isAgent=None, status=None, deploymentConversationRetentionHours=None, managedUserService=None, predictionOverrides=None, isSystemCreated=None, isCustomizable=None, isDeprecated=None, isVisible=None)
+.. py:class:: ExternalApplication(client, name=None, externalApplicationId=None, deploymentId=None, description=None, logo=None, theme=None, userGroupIds=None, useCase=None, isAgent=None, status=None, deploymentConversationRetentionHours=None, managedUserService=None, predictionOverrides=None, isSystemCreated=None, isCustomizable=None, isDeprecated=None, isVisible=None, hasThinkingOption=None, hasFastMode=None, onlyImageGenEnabled=None, projectId=None, isCodellmChatmodeSupported=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -58,6 +58,16 @@ Module Contents
:type isDeprecated: bool
:param isVisible: Whether the external application should be shown in the dropdown.
:type isVisible: bool
+ :param hasThinkingOption: Whether to show the thinking option in the toolbar.
+ :type hasThinkingOption: bool
+ :param hasFastMode: Whether the external application has fast mode.
+ :type hasFastMode: bool
+ :param onlyImageGenEnabled: Whether to LLM only allows image generation.
+ :type onlyImageGenEnabled: bool
+ :param projectId: The project id associated with the external application.
+ :type projectId: str
+ :param isCodellmChatmodeSupported: Whether the external application is codellm chatmode supported
+ :type isCodellmChatmodeSupported: bool
.. py:attribute:: name
@@ -145,6 +155,31 @@ Module Contents
+ .. py:attribute:: has_thinking_option
+ :value: None
+
+
+
+ .. py:attribute:: has_fast_mode
+ :value: None
+
+
+
+ .. py:attribute:: only_image_gen_enabled
+ :value: None
+
+
+
+ .. py:attribute:: project_id
+ :value: None
+
+
+
+ .. py:attribute:: is_codellm_chatmode_supported
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
@@ -160,6 +195,24 @@ Module Contents
+ .. py:method:: add_developers_to()
+
+ Adds a permission for the platform App User Group to access an External Application.
+
+ :param external_application_id: The ID of the External Application.
+ :type external_application_id: str
+
+
+
+ .. py:method:: remove_developers_from()
+
+ Removes a permission for the platform App User Group to access an External Application.
+
+ :param external_application_id: The ID of the External Application.
+ :type external_application_id: str
+
+
+
.. py:method:: update(name = None, description = None, theme = None, deployment_id = None, deployment_conversation_retention_hours = None, reset_retention_policy = False)
Updates an External Application.
diff --git a/docs/_sources/autoapi/abacusai/feature_group_version/index.rst.txt b/docs/_sources/autoapi/abacusai/feature_group_version/index.rst.txt
index f65135758..2e37af8a1 100644
--- a/docs/_sources/autoapi/abacusai/feature_group_version/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/feature_group_version/index.rst.txt
@@ -228,7 +228,7 @@ Module Contents
:type database_connector_id: str
:param object_name: Name of the database object to write to.
:type object_name: str
- :param write_mode: Enum string indicating whether to use INSERT or UPSERT.
+ :param write_mode: Enum string indicating whether to use INSERT or UPSERT or REPLACE.
:type write_mode: str
:param database_feature_mapping: Key/value pair JSON object of "database connector column" -> "feature name" pairs.
:type database_feature_mapping: dict
diff --git a/docs/_sources/autoapi/abacusai/feature_mapping/index.rst.txt b/docs/_sources/autoapi/abacusai/feature_mapping/index.rst.txt
index 0495bb3b2..c061f1dc1 100644
--- a/docs/_sources/autoapi/abacusai/feature_mapping/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/feature_mapping/index.rst.txt
@@ -24,7 +24,7 @@ Module Contents
:param client: An authenticated API Client instance
:type client: ApiClient
- :param featureMapping: The mapping of the feature. The possible values will be based on the project's use-case. See the (Use Case Documentation)[https://api.abacus.ai/app/help/useCases] for more details.
+ :param featureMapping: The mapping of the feature. The possible values will be based on the project's use-case. See the (Use Case Documentation)[https://api.abacus.ai/app/help/developer-platform/useCases] for more details.
:type featureMapping: str
:param featureName: The unique name of the feature.
:type featureName: str
diff --git a/docs/_sources/autoapi/abacusai/file_connector/index.rst.txt b/docs/_sources/autoapi/abacusai/file_connector/index.rst.txt
index 943532113..a3eb42c71 100644
--- a/docs/_sources/autoapi/abacusai/file_connector/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/file_connector/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: FileConnector(client, bucket=None, verified=None, writePermission=None, authExpiresAt=None)
+.. py:class:: FileConnector(client, bucket=None, verified=None, writePermission=None, authExpiresAt=None, createdAt=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -32,6 +32,8 @@ Module Contents
:type writePermission: bool
:param authExpiresAt: The time when the file connector's auth expires, if applicable
:type authExpiresAt: str
+ :param createdAt: The timestamp at which the file connector was created
+ :type createdAt: str
.. py:attribute:: bucket
@@ -54,6 +56,11 @@ Module Contents
+ .. py:attribute:: created_at
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
diff --git a/docs/_sources/autoapi/abacusai/fs_entry/index.rst.txt b/docs/_sources/autoapi/abacusai/fs_entry/index.rst.txt
new file mode 100644
index 000000000..583244da0
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/fs_entry/index.rst.txt
@@ -0,0 +1,92 @@
+abacusai.fs_entry
+=================
+
+.. py:module:: abacusai.fs_entry
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.fs_entry.FsEntry
+
+
+Module Contents
+---------------
+
+.. py:class:: FsEntry(client, name=None, type=None, path=None, size=None, modified=None, isFolderEmpty=None, children=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ File system entry.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param name: The name of the file/folder
+ :type name: str
+ :param type: The type of entry (file/folder)
+ :type type: str
+ :param path: The path of the entry
+ :type path: str
+ :param size: The size of the entry in bytes
+ :type size: int
+ :param modified: The last modified timestamp
+ :type modified: int
+ :param isFolderEmpty: Whether the folder is empty (only for folders)
+ :type isFolderEmpty: bool
+ :param children: List of child FSEntry objects (only for folders)
+ :type children: list
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: type
+ :value: None
+
+
+
+ .. py:attribute:: path
+ :value: None
+
+
+
+ .. py:attribute:: size
+ :value: None
+
+
+
+ .. py:attribute:: modified
+ :value: None
+
+
+
+ .. py:attribute:: isFolderEmpty
+ :value: None
+
+
+
+ .. py:attribute:: children
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/hosted_app/index.rst.txt b/docs/_sources/autoapi/abacusai/hosted_app/index.rst.txt
new file mode 100644
index 000000000..9e0f8ef0b
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/hosted_app/index.rst.txt
@@ -0,0 +1,71 @@
+abacusai.hosted_app
+===================
+
+.. py:module:: abacusai.hosted_app
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.hosted_app.HostedApp
+
+
+Module Contents
+---------------
+
+.. py:class:: HostedApp(client, hostedAppId=None, deploymentConversationId=None, name=None, createdAt=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Hosted App
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param hostedAppId: The ID of the hosted app
+ :type hostedAppId: id
+ :param deploymentConversationId: The ID of the deployment conversation
+ :type deploymentConversationId: id
+ :param name: The name of the hosted app
+ :type name: str
+ :param createdAt: The creation timestamp
+ :type createdAt: str
+
+
+ .. py:attribute:: hosted_app_id
+ :value: None
+
+
+
+ .. py:attribute:: deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/hosted_app_container/index.rst.txt b/docs/_sources/autoapi/abacusai/hosted_app_container/index.rst.txt
new file mode 100644
index 000000000..104a674a3
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/hosted_app_container/index.rst.txt
@@ -0,0 +1,239 @@
+abacusai.hosted_app_container
+=============================
+
+.. py:module:: abacusai.hosted_app_container
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.hosted_app_container.HostedAppContainer
+
+
+Module Contents
+---------------
+
+.. py:class:: HostedAppContainer(client, hostedAppContainerId=None, hostedAppId=None, deploymentConversationId=None, hostedAppVersion=None, name=None, userId=None, email=None, createdAt=None, updatedAt=None, containerImage=None, route=None, appConfig=None, isDev=None, isDeployable=None, isPreviewAvailable=None, lifecycle=None, status=None, deployedStatus=None, accessLevel=None, hostname=None, llmArtifactId=None, artifactType=None, deployedLlmArtifactId=None, hasDatabase=None, hasStorage=None, webAppProjectId=None, parentConversationId=None, projectMetadata=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Hosted app + Deep agent container information.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param hostedAppContainerId: The ID of the hosted app container
+ :type hostedAppContainerId: id
+ :param hostedAppId: The ID of the hosted app
+ :type hostedAppId: id
+ :param deploymentConversationId: The deployment conversation ID
+ :type deploymentConversationId: id
+ :param hostedAppVersion: The instance of the hosted app
+ :type hostedAppVersion: id
+ :param name: The name of the hosted app
+ :type name: str
+ :param userId: The ID of the creation user
+ :type userId: id
+ :param email: The email of the creation user
+ :type email: str
+ :param createdAt: Creation timestamp
+ :type createdAt: str
+ :param updatedAt: Last update timestamp
+ :type updatedAt: str
+ :param containerImage: Container image name
+ :type containerImage: str
+ :param route: Container route
+ :type route: str
+ :param appConfig: App configuration
+ :type appConfig: dict
+ :param isDev: Whether this is a dev container
+ :type isDev: bool
+ :param isDeployable: Can this version be deployed
+ :type isDeployable: bool
+ :param isPreviewAvailable: Is the dev preview available on the container
+ :type isPreviewAvailable: bool
+ :param lifecycle: Container lifecycle status (PENDING/DEPLOYING/ACTIVE/FAILED/STOPPED/DELETING)
+ :type lifecycle: str
+ :param status: Container status (RUNNING/STOPPED/DEPLOYING/FAILED)
+ :type status: str
+ :param deployedStatus: Deployment status (PENDING/ACTIVE/STOPPED/NOT_DEPLOYED)
+ :type deployedStatus: str
+ :param accessLevel: Access Level (PUBLIC/PRIVATE/DEDICATED)
+ :type accessLevel: str
+ :param hostname: Hostname of the deployed app
+ :type hostname: str
+ :param llmArtifactId: The ID of the LLM artifact
+ :type llmArtifactId: id
+ :param artifactType: The type of the artifact
+ :type artifactType: str
+ :param deployedLlmArtifactId: The ID of the deployed LLM artifact
+ :type deployedLlmArtifactId: id
+ :param hasDatabase: Whether the app has a database associated to it
+ :type hasDatabase: bool
+ :param hasStorage: Whether the app has a cloud storage associated to it
+ :type hasStorage: bool
+ :param webAppProjectId: The ID of the web app project
+ :type webAppProjectId: id
+ :param parentConversationId: The ID of the parent conversation
+ :type parentConversationId: id
+ :param projectMetadata: The metadata of the web app project
+ :type projectMetadata: dict
+
+
+ .. py:attribute:: hosted_app_container_id
+ :value: None
+
+
+
+ .. py:attribute:: hosted_app_id
+ :value: None
+
+
+
+ .. py:attribute:: deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: hosted_app_version
+ :value: None
+
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: user_id
+ :value: None
+
+
+
+ .. py:attribute:: email
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: updated_at
+ :value: None
+
+
+
+ .. py:attribute:: container_image
+ :value: None
+
+
+
+ .. py:attribute:: route
+ :value: None
+
+
+
+ .. py:attribute:: app_config
+ :value: None
+
+
+
+ .. py:attribute:: is_dev
+ :value: None
+
+
+
+ .. py:attribute:: is_deployable
+ :value: None
+
+
+
+ .. py:attribute:: is_preview_available
+ :value: None
+
+
+
+ .. py:attribute:: lifecycle
+ :value: None
+
+
+
+ .. py:attribute:: status
+ :value: None
+
+
+
+ .. py:attribute:: deployed_status
+ :value: None
+
+
+
+ .. py:attribute:: access_level
+ :value: None
+
+
+
+ .. py:attribute:: hostname
+ :value: None
+
+
+
+ .. py:attribute:: llm_artifact_id
+ :value: None
+
+
+
+ .. py:attribute:: artifact_type
+ :value: None
+
+
+
+ .. py:attribute:: deployed_llm_artifact_id
+ :value: None
+
+
+
+ .. py:attribute:: has_database
+ :value: None
+
+
+
+ .. py:attribute:: has_storage
+ :value: None
+
+
+
+ .. py:attribute:: web_app_project_id
+ :value: None
+
+
+
+ .. py:attribute:: parent_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: project_metadata
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/hosted_app_file_read/index.rst.txt b/docs/_sources/autoapi/abacusai/hosted_app_file_read/index.rst.txt
new file mode 100644
index 000000000..4c96a7960
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/hosted_app_file_read/index.rst.txt
@@ -0,0 +1,71 @@
+abacusai.hosted_app_file_read
+=============================
+
+.. py:module:: abacusai.hosted_app_file_read
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.hosted_app_file_read.HostedAppFileRead
+
+
+Module Contents
+---------------
+
+.. py:class:: HostedAppFileRead(client, content=None, start=None, end=None, retcode=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Result of reading file content from a hosted app container.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param content: The contents of the file or a portion of it.
+ :type content: str
+ :param start: If present, the starting position of the read.
+ :type start: int
+ :param end: If present, the last position in the file returned in this read.
+ :type end: int
+ :param retcode: If the read is associated with a log the return code of the command.
+ :type retcode: int
+
+
+ .. py:attribute:: content
+ :value: None
+
+
+
+ .. py:attribute:: start
+ :value: None
+
+
+
+ .. py:attribute:: end
+ :value: None
+
+
+
+ .. py:attribute:: retcode
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/hosted_artifact/index.rst.txt b/docs/_sources/autoapi/abacusai/hosted_artifact/index.rst.txt
new file mode 100644
index 000000000..c8eb7406e
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/hosted_artifact/index.rst.txt
@@ -0,0 +1,92 @@
+abacusai.hosted_artifact
+========================
+
+.. py:module:: abacusai.hosted_artifact
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.hosted_artifact.HostedArtifact
+
+
+Module Contents
+---------------
+
+.. py:class:: HostedArtifact(client, hostname=None, artifactType=None, llmArtifactId=None, lifecycle=None, externalApplicationId=None, deploymentConversationId=None, conversationSequenceNumber=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A hosted artifact being served by the platform.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param hostname: The url at which the application is being hosted.
+ :type hostname: str
+ :param artifactType: The type of artifact being hosted.
+ :type artifactType: str
+ :param llmArtifactId: The artifact id being hosted.
+ :type llmArtifactId: str
+ :param lifecycle: The lifecycle of the artifact.
+ :type lifecycle: str
+ :param externalApplicationId: Agent that deployed this application.
+ :type externalApplicationId: str
+ :param deploymentConversationId: Conversation that created deployed this artifact, null if not applicable.
+ :type deploymentConversationId: str
+ :param conversationSequenceNumber: Conversation event associated with this artifact, null if not applicable.
+ :type conversationSequenceNumber: number(integer)
+
+
+ .. py:attribute:: hostname
+ :value: None
+
+
+
+ .. py:attribute:: artifact_type
+ :value: None
+
+
+
+ .. py:attribute:: llm_artifact_id
+ :value: None
+
+
+
+ .. py:attribute:: lifecycle
+ :value: None
+
+
+
+ .. py:attribute:: external_application_id
+ :value: None
+
+
+
+ .. py:attribute:: deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: conversation_sequence_number
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/hosted_database/index.rst.txt b/docs/_sources/autoapi/abacusai/hosted_database/index.rst.txt
new file mode 100644
index 000000000..b917a4dea
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/hosted_database/index.rst.txt
@@ -0,0 +1,78 @@
+abacusai.hosted_database
+========================
+
+.. py:module:: abacusai.hosted_database
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.hosted_database.HostedDatabase
+
+
+Module Contents
+---------------
+
+.. py:class:: HostedDatabase(client, hostedDatabaseId=None, displayName=None, createdAt=None, updatedAt=None, lifecycle=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Hosted Database
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param hostedDatabaseId: The ID of the hosted database
+ :type hostedDatabaseId: id
+ :param displayName: The name of the hosted database
+ :type displayName: str
+ :param createdAt: The creation timestamp
+ :type createdAt: str
+ :param updatedAt: The last update timestamp
+ :type updatedAt: str
+ :param lifecycle: The lifecycle of the hosted database
+ :type lifecycle: str
+
+
+ .. py:attribute:: hosted_database_id
+ :value: None
+
+
+
+ .. py:attribute:: display_name
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: updated_at
+ :value: None
+
+
+
+ .. py:attribute:: lifecycle
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/hosted_database_snapshot/index.rst.txt b/docs/_sources/autoapi/abacusai/hosted_database_snapshot/index.rst.txt
new file mode 100644
index 000000000..261653bc5
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/hosted_database_snapshot/index.rst.txt
@@ -0,0 +1,78 @@
+abacusai.hosted_database_snapshot
+=================================
+
+.. py:module:: abacusai.hosted_database_snapshot
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.hosted_database_snapshot.HostedDatabaseSnapshot
+
+
+Module Contents
+---------------
+
+.. py:class:: HostedDatabaseSnapshot(client, hostedDatabaseSnapshotId=None, srcHostedDatabaseId=None, createdAt=None, updatedAt=None, lifecycle=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Hosted Database Snapshot
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param hostedDatabaseSnapshotId: The ID of the hosted database snapshot
+ :type hostedDatabaseSnapshotId: id
+ :param srcHostedDatabaseId: The ID of the source hosted database
+ :type srcHostedDatabaseId: id
+ :param createdAt: The creation timestamp
+ :type createdAt: str
+ :param updatedAt: The last update timestamp
+ :type updatedAt: str
+ :param lifecycle: The lifecycle of the hosted database snapshot
+ :type lifecycle: str
+
+
+ .. py:attribute:: hosted_database_snapshot_id
+ :value: None
+
+
+
+ .. py:attribute:: src_hosted_database_id
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: updated_at
+ :value: None
+
+
+
+ .. py:attribute:: lifecycle
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/hostname_info/index.rst.txt b/docs/_sources/autoapi/abacusai/hostname_info/index.rst.txt
new file mode 100644
index 000000000..801802345
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/hostname_info/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.hostname_info
+======================
+
+.. py:module:: abacusai.hostname_info
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.hostname_info.HostnameInfo
+
+
+Module Contents
+---------------
+
+.. py:class:: HostnameInfo(client, isRootDomain=None, hasRootNameserversConfigured=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Hostname Info
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param isRootDomain: Whether the hostname is a root domain
+ :type isRootDomain: bool
+ :param hasRootNameserversConfigured: Whether the root domain has Abacus nameservers configured.
+ :type hasRootNameserversConfigured: bool
+
+
+ .. py:attribute:: is_root_domain
+ :value: None
+
+
+
+ .. py:attribute:: has_root_nameservers_configured
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/image_gen_model/index.rst.txt b/docs/_sources/autoapi/abacusai/image_gen_model/index.rst.txt
new file mode 100644
index 000000000..2872f4a98
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/image_gen_model/index.rst.txt
@@ -0,0 +1,90 @@
+abacusai.image_gen_model
+========================
+
+.. py:module:: abacusai.image_gen_model
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.image_gen_model.ImageGenModel
+
+
+Module Contents
+---------------
+
+.. py:class:: ImageGenModel(client, displayName=None, type=None, valueType=None, optional=None, default=None, helptext=None, options={})
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Image generation model
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param displayName: The display name for the UI component.
+ :type displayName: str
+ :param type: The type of the UI component.
+ :type type: str
+ :param valueType: The data type of the values within the UI component.
+ :type valueType: str
+ :param optional: Whether the selection of a value is optional.
+ :type optional: bool
+ :param default: The default value for the image generation model.
+ :type default: str
+ :param helptext: The helptext for the UI component.
+ :type helptext: str
+ :param options: The options of models available for image generation.
+ :type options: ImageGenModelOptions
+
+
+ .. py:attribute:: display_name
+ :value: None
+
+
+
+ .. py:attribute:: type
+ :value: None
+
+
+
+ .. py:attribute:: value_type
+ :value: None
+
+
+
+ .. py:attribute:: optional
+ :value: None
+
+
+
+ .. py:attribute:: default
+ :value: None
+
+
+
+ .. py:attribute:: helptext
+ :value: None
+
+
+
+ .. py:attribute:: options
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/image_gen_model_options/index.rst.txt b/docs/_sources/autoapi/abacusai/image_gen_model_options/index.rst.txt
new file mode 100644
index 000000000..1a008f0b1
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/image_gen_model_options/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.image_gen_model_options
+================================
+
+.. py:module:: abacusai.image_gen_model_options
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.image_gen_model_options.ImageGenModelOptions
+
+
+Module Contents
+---------------
+
+.. py:class:: ImageGenModelOptions(client, keys=None, values=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Image generation model options
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param keys: The keys of the image generation model options represented as the enum values.
+ :type keys: list
+ :param values: The display names of the image generation model options.
+ :type values: list
+
+
+ .. py:attribute:: keys
+ :value: None
+
+
+
+ .. py:attribute:: values
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/image_gen_settings/index.rst.txt b/docs/_sources/autoapi/abacusai/image_gen_settings/index.rst.txt
index a672af67b..77060aafb 100644
--- a/docs/_sources/autoapi/abacusai/image_gen_settings/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/image_gen_settings/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: ImageGenSettings(client, model=None, settings=None)
+.. py:class:: ImageGenSettings(client, settings=None, warnings=None, model={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -24,22 +24,27 @@ Module Contents
:param client: An authenticated API Client instance
:type client: ApiClient
- :param model: Dropdown for models available for image generation.
- :type model: dict
:param settings: The settings for each model.
:type settings: dict
+ :param warnings: The warnings for each model.
+ :type warnings: dict
+ :param model: Dropdown for models available for image generation.
+ :type model: ImageGenModel
- .. py:attribute:: model
+ .. py:attribute:: settings
:value: None
- .. py:attribute:: settings
+ .. py:attribute:: warnings
:value: None
+ .. py:attribute:: model
+
+
.. py:attribute:: deprecated_keys
diff --git a/docs/_sources/autoapi/abacusai/index.rst.txt b/docs/_sources/autoapi/abacusai/index.rst.txt
index a2c056bce..4f67c260e 100644
--- a/docs/_sources/autoapi/abacusai/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/index.rst.txt
@@ -33,6 +33,8 @@ Submodules
/autoapi/abacusai/app_user_group_sign_in_token/index
/autoapi/abacusai/application_connector/index
/autoapi/abacusai/audio_gen_settings/index
+ /autoapi/abacusai/audio_url_result/index
+ /autoapi/abacusai/audit_log/index
/autoapi/abacusai/batch_prediction/index
/autoapi/abacusai/batch_prediction_version/index
/autoapi/abacusai/batch_prediction_version_logs/index
@@ -41,25 +43,38 @@ Submodules
/autoapi/abacusai/chat_message/index
/autoapi/abacusai/chat_session/index
/autoapi/abacusai/chatllm_computer/index
+ /autoapi/abacusai/chatllm_computer_status/index
+ /autoapi/abacusai/chatllm_memory/index
+ /autoapi/abacusai/chatllm_project/index
+ /autoapi/abacusai/chatllm_project_permissions/index
/autoapi/abacusai/chatllm_referral_invite/index
/autoapi/abacusai/chatllm_task/index
/autoapi/abacusai/client/index
/autoapi/abacusai/code_agent_response/index
+ /autoapi/abacusai/code_autocomplete_edit_prediction_response/index
/autoapi/abacusai/code_autocomplete_response/index
/autoapi/abacusai/code_bot/index
/autoapi/abacusai/code_edit/index
/autoapi/abacusai/code_edit_response/index
/autoapi/abacusai/code_edits/index
+ /autoapi/abacusai/code_embeddings/index
+ /autoapi/abacusai/code_llm_changed_files/index
/autoapi/abacusai/code_source/index
+ /autoapi/abacusai/code_suggestion_validation_response/index
+ /autoapi/abacusai/code_summary_response/index
+ /autoapi/abacusai/codellm_embedding_constants/index
/autoapi/abacusai/compute_point_info/index
/autoapi/abacusai/concatenation_config/index
+ /autoapi/abacusai/constants_autocomplete_response/index
/autoapi/abacusai/cpu_gpu_memory_specs/index
/autoapi/abacusai/cryptography/index
/autoapi/abacusai/custom_chat_instructions/index
+ /autoapi/abacusai/custom_domain/index
/autoapi/abacusai/custom_loss_function/index
/autoapi/abacusai/custom_metric/index
/autoapi/abacusai/custom_metric_version/index
/autoapi/abacusai/custom_train_function_info/index
+ /autoapi/abacusai/daemon_task_conversation/index
/autoapi/abacusai/data_consistency_duplication/index
/autoapi/abacusai/data_metrics/index
/autoapi/abacusai/data_prep_logs/index
@@ -73,6 +88,7 @@ Submodules
/autoapi/abacusai/dataset_column/index
/autoapi/abacusai/dataset_version/index
/autoapi/abacusai/dataset_version_logs/index
+ /autoapi/abacusai/default_llm/index
/autoapi/abacusai/deployment/index
/autoapi/abacusai/deployment_auth_token/index
/autoapi/abacusai/deployment_conversation/index
@@ -83,6 +99,9 @@ Submodules
/autoapi/abacusai/document_retriever/index
/autoapi/abacusai/document_retriever_lookup_result/index
/autoapi/abacusai/document_retriever_version/index
+ /autoapi/abacusai/document_retriever_version_logs/index
+ /autoapi/abacusai/domain_purchase_result/index
+ /autoapi/abacusai/domain_search_result/index
/autoapi/abacusai/drift_distribution/index
/autoapi/abacusai/drift_distributions/index
/autoapi/abacusai/eda/index
@@ -93,7 +112,9 @@ Submodules
/autoapi/abacusai/eda_feature_collinearity/index
/autoapi/abacusai/eda_forecasting_analysis/index
/autoapi/abacusai/eda_version/index
+ /autoapi/abacusai/edit_image_models/index
/autoapi/abacusai/embedding_feature_drift_distribution/index
+ /autoapi/abacusai/entri_auth_token/index
/autoapi/abacusai/execute_feature_group_operation/index
/autoapi/abacusai/external_application/index
/autoapi/abacusai/external_invite/index
@@ -120,7 +141,6 @@ Submodules
/autoapi/abacusai/feature_importance/index
/autoapi/abacusai/feature_mapping/index
/autoapi/abacusai/feature_performance_analysis/index
- /autoapi/abacusai/feature_record/index
/autoapi/abacusai/file_connector/index
/autoapi/abacusai/file_connector_instructions/index
/autoapi/abacusai/file_connector_verification/index
@@ -128,20 +148,32 @@ Submodules
/autoapi/abacusai/forecasting_analysis_graph_data/index
/autoapi/abacusai/forecasting_monitor_item_analysis/index
/autoapi/abacusai/forecasting_monitor_summary/index
+ /autoapi/abacusai/fs_entry/index
/autoapi/abacusai/function_logs/index
/autoapi/abacusai/generated_pit_feature_config_option/index
/autoapi/abacusai/global_context/index
/autoapi/abacusai/graph_dashboard/index
/autoapi/abacusai/holdout_analysis/index
/autoapi/abacusai/holdout_analysis_version/index
+ /autoapi/abacusai/hosted_app/index
+ /autoapi/abacusai/hosted_app_container/index
+ /autoapi/abacusai/hosted_app_file_read/index
+ /autoapi/abacusai/hosted_artifact/index
+ /autoapi/abacusai/hosted_database/index
+ /autoapi/abacusai/hosted_database_snapshot/index
/autoapi/abacusai/hosted_model_token/index
+ /autoapi/abacusai/hostname_info/index
/autoapi/abacusai/hume_voice/index
+ /autoapi/abacusai/image_gen_model/index
+ /autoapi/abacusai/image_gen_model_options/index
/autoapi/abacusai/image_gen_settings/index
/autoapi/abacusai/indexing_config/index
/autoapi/abacusai/inferred_database_column_to_feature_mappings/index
/autoapi/abacusai/inferred_feature_mappings/index
/autoapi/abacusai/item_statistics/index
+ /autoapi/abacusai/lip_sync_gen_settings/index
/autoapi/abacusai/llm_app/index
+ /autoapi/abacusai/llm_artifact/index
/autoapi/abacusai/llm_code_block/index
/autoapi/abacusai/llm_execution_preview/index
/autoapi/abacusai/llm_execution_result/index
@@ -149,6 +181,10 @@ Submodules
/autoapi/abacusai/llm_input/index
/autoapi/abacusai/llm_parameters/index
/autoapi/abacusai/llm_response/index
+ /autoapi/abacusai/mcp_config/index
+ /autoapi/abacusai/mcp_server/index
+ /autoapi/abacusai/mcp_server_connection/index
+ /autoapi/abacusai/mcp_server_query_result/index
/autoapi/abacusai/memory_options/index
/autoapi/abacusai/messaging_connector_response/index
/autoapi/abacusai/model/index
@@ -203,6 +239,8 @@ Submodules
/autoapi/abacusai/prediction_log_record/index
/autoapi/abacusai/prediction_operator/index
/autoapi/abacusai/prediction_operator_version/index
+ /autoapi/abacusai/presentation_export_result/index
+ /autoapi/abacusai/private_web_app_deployment/index
/autoapi/abacusai/problem_type/index
/autoapi/abacusai/project/index
/autoapi/abacusai/project_config/index
@@ -219,33 +257,51 @@ Submodules
/autoapi/abacusai/refresh_policy/index
/autoapi/abacusai/refresh_schedule/index
/autoapi/abacusai/regenerate_llm_external_application/index
+ /autoapi/abacusai/regenerate_llm_option/index
/autoapi/abacusai/resolved_feature_group_template/index
/autoapi/abacusai/return_class/index
/autoapi/abacusai/routing_action/index
/autoapi/abacusai/schema/index
+ /autoapi/abacusai/session_summary/index
+ /autoapi/abacusai/session_transcript/index
+ /autoapi/abacusai/session_transcripts/index
/autoapi/abacusai/sftp_key/index
/autoapi/abacusai/streaming_auth_token/index
/autoapi/abacusai/streaming_client/index
/autoapi/abacusai/streaming_connector/index
/autoapi/abacusai/streaming_row_count/index
/autoapi/abacusai/streaming_sample_code/index
+ /autoapi/abacusai/sts_gen_settings/index
+ /autoapi/abacusai/stt_gen_model/index
+ /autoapi/abacusai/stt_gen_model_options/index
+ /autoapi/abacusai/stt_gen_settings/index
/autoapi/abacusai/template_node_details/index
/autoapi/abacusai/test_point_predictions/index
/autoapi/abacusai/tone_details/index
/autoapi/abacusai/training_config_options/index
+ /autoapi/abacusai/tts_gen_settings/index
/autoapi/abacusai/twitter_search_result/index
+ /autoapi/abacusai/unified_connector/index
/autoapi/abacusai/upload/index
/autoapi/abacusai/upload_part/index
/autoapi/abacusai/use_case/index
/autoapi/abacusai/use_case_requirements/index
/autoapi/abacusai/user/index
/autoapi/abacusai/user_exception/index
+ /autoapi/abacusai/user_group_object_permission/index
+ /autoapi/abacusai/video_gen_costs/index
+ /autoapi/abacusai/video_gen_model/index
+ /autoapi/abacusai/video_gen_model_options/index
/autoapi/abacusai/video_gen_settings/index
/autoapi/abacusai/video_search_result/index
/autoapi/abacusai/voice_gen_details/index
+ /autoapi/abacusai/web_app_conversation/index
+ /autoapi/abacusai/web_app_deployment_permission_dict/index
+ /autoapi/abacusai/web_app_domain/index
/autoapi/abacusai/web_page_response/index
/autoapi/abacusai/web_search_response/index
/autoapi/abacusai/web_search_result/index
+ /autoapi/abacusai/web_service_trigger_run/index
/autoapi/abacusai/webhook/index
/autoapi/abacusai/workflow_graph_node_details/index
/autoapi/abacusai/workflow_node_template/index
@@ -256,6 +312,7 @@ Attributes
.. autoapisummary::
+ abacusai.MIN_AGENT_SLEEP_TIME
abacusai.DocumentRetrieverConfig
abacusai.Segment
abacusai._request_context
@@ -299,6 +356,9 @@ Classes
abacusai.WorkflowNodeOutputSchema
abacusai.TriggerConfig
abacusai.WorkflowGraphNode
+ abacusai.DecisionNode
+ abacusai.LLMAgentNode
+ abacusai.InputNode
abacusai.WorkflowGraphEdge
abacusai.WorkflowGraph
abacusai.AgentConversationMessage
@@ -343,7 +403,10 @@ Classes
abacusai.ZendeskDatasetConfig
abacusai.AbacusUsageMetricsDatasetConfig
abacusai.TeamsScraperDatasetConfig
+ abacusai.OutlookDatasetConfig
+ abacusai.AzureStorageDatasetConfig
abacusai.FreshserviceDatasetConfig
+ abacusai.SftpDatasetConfig
abacusai._ApplicationConnectorDatasetConfigFactory
abacusai.PredictionArguments
abacusai.OptimizationPredictionArguments
@@ -420,6 +483,8 @@ Classes
abacusai.ResponseSectionType
abacusai.CodeLanguage
abacusai.DeploymentConversationType
+ abacusai.AgentClientType
+ abacusai.OrganizationSecretType
abacusai.SamplingConfig
abacusai.NSamplingConfig
abacusai.PercentSamplingConfig
@@ -442,6 +507,7 @@ Classes
abacusai.ForecastingTrainingConfig
abacusai.NamedEntityExtractionTrainingConfig
abacusai.NaturalLanguageSearchTrainingConfig
+ abacusai.SystemConnectorTool
abacusai.ChatLLMTrainingConfig
abacusai.SentenceBoundaryDetectionTrainingConfig
abacusai.SentimentDetectionTrainingConfig
@@ -509,6 +575,8 @@ Classes
abacusai.AppUserGroupSignInToken
abacusai.ApplicationConnector
abacusai.AudioGenSettings
+ abacusai.AudioUrlResult
+ abacusai.AuditLog
abacusai.BatchPrediction
abacusai.BatchPredictionVersion
abacusai.BatchPredictionVersionLogs
@@ -517,27 +585,41 @@ Classes
abacusai.ChatMessage
abacusai.ChatSession
abacusai.ChatllmComputer
+ abacusai.ChatllmComputerStatus
+ abacusai.ChatllmMemory
+ abacusai.ChatllmProject
+ abacusai.ChatllmProjectPermissions
abacusai.ChatllmReferralInvite
abacusai.ChatllmTask
abacusai.AgentResponse
abacusai.ApiClient
abacusai.ClientOptions
abacusai.ReadOnlyClient
+ abacusai.ToolResponse
abacusai.CodeAgentResponse
+ abacusai.CodeAutocompleteEditPredictionResponse
abacusai.CodeAutocompleteResponse
abacusai.CodeBot
abacusai.CodeEdit
abacusai.CodeEditResponse
abacusai.CodeEdits
+ abacusai.CodeEmbeddings
+ abacusai.CodeLlmChangedFiles
abacusai.CodeSource
+ abacusai.CodeSuggestionValidationResponse
+ abacusai.CodeSummaryResponse
+ abacusai.CodellmEmbeddingConstants
abacusai.ComputePointInfo
abacusai.ConcatenationConfig
+ abacusai.ConstantsAutocompleteResponse
abacusai.CpuGpuMemorySpecs
abacusai.CustomChatInstructions
+ abacusai.CustomDomain
abacusai.CustomLossFunction
abacusai.CustomMetric
abacusai.CustomMetricVersion
abacusai.CustomTrainFunctionInfo
+ abacusai.DaemonTaskConversation
abacusai.DataConsistencyDuplication
abacusai.DataMetrics
abacusai.DataPrepLogs
@@ -551,6 +633,7 @@ Classes
abacusai.DatasetColumn
abacusai.DatasetVersion
abacusai.DatasetVersionLogs
+ abacusai.DefaultLlm
abacusai.Deployment
abacusai.DeploymentAuthToken
abacusai.DeploymentConversation
@@ -561,6 +644,9 @@ Classes
abacusai.DocumentRetriever
abacusai.DocumentRetrieverLookupResult
abacusai.DocumentRetrieverVersion
+ abacusai.DocumentRetrieverVersionLogs
+ abacusai.DomainPurchaseResult
+ abacusai.DomainSearchResult
abacusai.DriftDistribution
abacusai.DriftDistributions
abacusai.Eda
@@ -571,7 +657,9 @@ Classes
abacusai.EdaFeatureCollinearity
abacusai.EdaForecastingAnalysis
abacusai.EdaVersion
+ abacusai.EditImageModels
abacusai.EmbeddingFeatureDriftDistribution
+ abacusai.EntriAuthToken
abacusai.ExecuteFeatureGroupOperation
abacusai.ExternalApplication
abacusai.ExternalInvite
@@ -598,7 +686,6 @@ Classes
abacusai.FeatureImportance
abacusai.FeatureMapping
abacusai.FeaturePerformanceAnalysis
- abacusai.FeatureRecord
abacusai.FileConnector
abacusai.FileConnectorInstructions
abacusai.FileConnectorVerification
@@ -606,19 +693,31 @@ Classes
abacusai.ForecastingAnalysisGraphData
abacusai.ForecastingMonitorItemAnalysis
abacusai.ForecastingMonitorSummary
+ abacusai.FsEntry
abacusai.FunctionLogs
abacusai.GeneratedPitFeatureConfigOption
abacusai.GraphDashboard
abacusai.HoldoutAnalysis
abacusai.HoldoutAnalysisVersion
+ abacusai.HostedApp
+ abacusai.HostedAppContainer
+ abacusai.HostedAppFileRead
+ abacusai.HostedArtifact
+ abacusai.HostedDatabase
+ abacusai.HostedDatabaseSnapshot
abacusai.HostedModelToken
+ abacusai.HostnameInfo
abacusai.HumeVoice
+ abacusai.ImageGenModel
+ abacusai.ImageGenModelOptions
abacusai.ImageGenSettings
abacusai.IndexingConfig
abacusai.InferredDatabaseColumnToFeatureMappings
abacusai.InferredFeatureMappings
abacusai.ItemStatistics
+ abacusai.LipSyncGenSettings
abacusai.LlmApp
+ abacusai.LlmArtifact
abacusai.LlmCodeBlock
abacusai.LlmExecutionPreview
abacusai.LlmExecutionResult
@@ -626,6 +725,10 @@ Classes
abacusai.LlmInput
abacusai.LlmParameters
abacusai.LlmResponse
+ abacusai.McpConfig
+ abacusai.McpServer
+ abacusai.McpServerConnection
+ abacusai.McpServerQueryResult
abacusai.MemoryOptions
abacusai.MessagingConnectorResponse
abacusai.Model
@@ -680,6 +783,8 @@ Classes
abacusai.PredictionLogRecord
abacusai.PredictionOperator
abacusai.PredictionOperatorVersion
+ abacusai.PresentationExportResult
+ abacusai.PrivateWebAppDeployment
abacusai.ProblemType
abacusai.Project
abacusai.ProjectConfig
@@ -695,32 +800,50 @@ Classes
abacusai.RefreshPolicy
abacusai.RefreshSchedule
abacusai.RegenerateLlmExternalApplication
+ abacusai.RegenerateLlmOption
abacusai.ResolvedFeatureGroupTemplate
abacusai.RoutingAction
abacusai.Schema
+ abacusai.SessionSummary
+ abacusai.SessionTranscript
+ abacusai.SessionTranscripts
abacusai.SftpKey
abacusai.StreamingAuthToken
abacusai.StreamingClient
abacusai.StreamingConnector
abacusai.StreamingRowCount
abacusai.StreamingSampleCode
+ abacusai.StsGenSettings
+ abacusai.SttGenModel
+ abacusai.SttGenModelOptions
+ abacusai.SttGenSettings
abacusai.TemplateNodeDetails
abacusai.TestPointPredictions
abacusai.ToneDetails
abacusai.TrainingConfigOptions
+ abacusai.TtsGenSettings
abacusai.TwitterSearchResult
+ abacusai.UnifiedConnector
abacusai.Upload
abacusai.UploadPart
abacusai.UseCase
abacusai.UseCaseRequirements
abacusai.User
abacusai.UserException
+ abacusai.UserGroupObjectPermission
+ abacusai.VideoGenCosts
+ abacusai.VideoGenModel
+ abacusai.VideoGenModelOptions
abacusai.VideoGenSettings
abacusai.VideoSearchResult
abacusai.VoiceGenDetails
+ abacusai.WebAppConversation
+ abacusai.WebAppDeploymentPermissionDict
+ abacusai.WebAppDomain
abacusai.WebPageResponse
abacusai.WebSearchResponse
abacusai.WebSearchResult
+ abacusai.WebServiceTriggerRun
abacusai.Webhook
abacusai.WorkflowGraphNodeDetails
abacusai.WorkflowNodeTemplate
@@ -2037,7 +2160,11 @@ Package Contents
.. py:function:: validate_constructor_arg_types(friendly_class_name=None)
-.. py:function:: validate_input_dict_param(dict_object, friendly_class_name, must_contain=[])
+.. py:data:: MIN_AGENT_SLEEP_TIME
+ :value: 300
+
+
+.. py:function:: validate_input_dict_param(dict_object, friendly_class_name, must_contain=[], schema={})
.. py:class:: FieldDescriptor
@@ -2108,6 +2235,8 @@ Package Contents
:type is_required: bool
:param description: The description of this input.
:type description: str
+ :param long_description: The detailed description of this input.
+ :type long_description: str
:param constant_value: The constant value of this input if variable type is CONSTANT. Only applicable for template nodes.
:type constant_value: str
@@ -2144,6 +2273,12 @@ Package Contents
+ .. py:attribute:: long_description
+ :type: str
+ :value: None
+
+
+
.. py:attribute:: constant_value
:type: str
:value: None
@@ -2155,11 +2290,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(mapping)
:classmethod:
@@ -2209,11 +2339,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(schema)
:classmethod:
@@ -2244,6 +2369,17 @@ Package Contents
+ .. py:method:: from_tool_variable_mappings(tool_variable_mappings)
+ :classmethod:
+
+
+ Creates a WorkflowNodeInputSchema for the given tool variable mappings.
+
+ :param tool_variable_mappings: The tool variable mappings for the node.
+ :type tool_variable_mappings: List[dict]
+
+
+
.. py:class:: WorkflowNodeOutputMapping
Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
@@ -2257,6 +2393,8 @@ Package Contents
:type variable_type: Union[WorkflowNodeOutputType, str]
:param description: The description of this output.
:type description: str
+ :param long_description: The detailed description of this output.
+ :type long_description: str
.. py:attribute:: name
@@ -2273,16 +2411,17 @@ Package Contents
- .. py:method:: __post_init__()
+ .. py:attribute:: long_description
+ :type: str
+ :value: None
- .. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
+ .. py:method:: __post_init__()
+ .. py:method:: to_dict()
+
.. py:method:: from_dict(mapping)
:classmethod:
@@ -2306,11 +2445,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(schema)
:classmethod:
@@ -2336,18 +2470,13 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(configs)
:classmethod:
-.. py:class:: WorkflowGraphNode(name, function = None, input_mappings = None, output_mappings = None, function_name = None, source_code = None, input_schema = None, output_schema = None, template_metadata = None, trigger_config = None)
+.. py:class:: WorkflowGraphNode(name, function = None, input_mappings = None, output_mappings = None, function_name = None, source_code = None, input_schema = None, output_schema = None, template_metadata = None, trigger_config = None, description = None)
Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
@@ -2358,14 +2487,16 @@ Package Contents
:type name: str
:param input_mappings: List of input mappings for the node. Each arg/kwarg of the node function should have a corresponding input mapping.
:type input_mappings: List[WorkflowNodeInputMapping]
- :param output_mappings: List of output mappings for the node. Each field in the returned dict/AgentResponse must have a corresponding output mapping.
- :type output_mappings: List[WorkflowNodeOutputMapping]
+ :param output_mappings: List of outputs for the node. Each field in the returned dict/AgentResponse must have a corresponding output in the list.
+ :type output_mappings: List[str]
:param function: The callable node function reference.
:type function: callable
- :param input_schema: The react json schema for the user input variables.
+ :param input_schema: The react json schema for the user input variables. This should be empty for CHAT interface.
:type input_schema: WorkflowNodeInputSchema
- :param output_schema: The react json schema for the output to be shown on UI.
- :type output_schema: WorkflowNodeOutputSchema
+ :param output_schema: The list of outputs to be shown on UI. Each output corresponds to a field in the output mappings of the node.
+ :type output_schema: List[str]
+ :param description: A description of the workflow node.
+ :type description: str
Additional Attributes:
function_name (str): The name of the function.
@@ -2373,6 +2504,17 @@ Package Contents
trigger_config (TriggerConfig): The configuration for a trigger workflow node.
+ .. py:method:: is_special_node_type()
+ :classmethod:
+
+
+ Returns True if this node type skips source code validation.
+
+ Special node types (like InputNode and LLMAgentNode) are executed
+ directly without source code validation.
+
+
+
.. py:attribute:: template_metadata
:value: None
@@ -2383,7 +2525,17 @@ Package Contents
- .. py:method:: _raw_init(name, input_mappings = None, output_mappings = None, function = None, function_name = None, source_code = None, input_schema = None, output_schema = None, template_metadata = None, trigger_config = None)
+ .. py:attribute:: node_type
+ :value: 'workflow_node'
+
+
+
+ .. py:attribute:: description
+ :value: None
+
+
+
+ .. py:method:: _raw_init(name, input_mappings = None, output_mappings = None, function = None, function_name = None, source_code = None, input_schema = None, output_schema = None, template_metadata = None, trigger_config = None, description = None)
:classmethod:
@@ -2393,12 +2545,47 @@ Package Contents
- .. py:method:: to_dict()
+ .. py:method:: from_tool(tool_name, name, configs = None, input_mappings = None, input_schema = None, output_schema = None)
+ :classmethod:
+
+
+ Creates and returns a WorkflowGraphNode based on an available user created tool.
+ Note: DO NOT specify the output mapping for the tool; it will be inferred automatically. Doing so will raise an error.
+
+ :param tool_name: The name of the tool. There should already be a tool created in the platform with tool_name.
+ :param name: The name to assign to the WorkflowGraphNode instance.
+ :param configs: The configuration state of the tool to use (if necessary). If not specified, will use the tool's default configuration.
+ :type configs: optional
+ :param input_mappings: The WorkflowNodeInputMappings for this node.
+ :type input_mappings: optional
+ :param input_schema: The WorkflowNodeInputSchema for this node.
+ :type input_schema: optional
+ :param output_schema: The WorkflowNodeOutputSchema for this node.
+ :type output_schema: optional
+
+
+
+ .. py:method:: from_system_tool(tool_name, name, configs = None, input_mappings = None, input_schema = None, output_schema = None)
+ :classmethod:
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
+ Creates and returns a WorkflowGraphNode based on the name of an available system tool.
+ Note: DO NOT specify the output mapping for the tool; it will be inferred automatically. Doing so will raise an error.
+
+ :param tool_name: The name of the tool. There should already be a tool created in the platform with tool_name.
+ :param name: The name to assign to the WorkflowGraphNode instance.
+ :param configs: The configuration state of the tool to use (if necessary). If not specified, will use the tool's default configuration.
+ :type configs: optional
+ :param input_mappings: The WorkflowNodeInputMappings for this node.
+ :type input_mappings: optional
+ :param input_schema: The WorkflowNodeInputSchema for this node.
+ :type input_schema: optional
+ :param output_schema: The WorkflowNodeOutputSchema for this node.
+ :type output_schema: optional
+
+
+
+ .. py:method:: to_dict()
.. py:method:: is_template_node()
@@ -2430,6 +2617,262 @@ Package Contents
.. py:property:: outputs
+.. py:class:: DecisionNode(name, condition, input_mappings, description = None)
+
+ Bases: :py:obj:`WorkflowGraphNode`
+
+
+ Represents a decision node in an Agent workflow graph. It is connected between two workflow nodes and is used to determine if subsequent nodes should be executed.
+
+
+ .. py:attribute:: node_type
+ :value: 'decision_node'
+
+
+
+ .. py:attribute:: name
+
+
+ .. py:attribute:: source_code
+
+
+ .. py:attribute:: description
+ :value: None
+
+
+
+ .. py:attribute:: output_mappings
+
+
+ .. py:attribute:: template_metadata
+ :value: None
+
+
+
+ .. py:attribute:: trigger_config
+ :value: None
+
+
+
+ .. py:attribute:: input_schema
+ :value: None
+
+
+
+ .. py:attribute:: output_schema
+ :value: None
+
+
+
+ .. py:attribute:: function_name
+ :value: None
+
+
+
+ .. py:method:: to_dict()
+
+
+ .. py:method:: from_dict(node)
+ :classmethod:
+
+
+
+.. py:class:: LLMAgentNode(name, chatbot_deployment_id = None, chatbot_parameters = None, input_schema = None, output_schema = None)
+
+ Bases: :py:obj:`WorkflowGraphNode`
+
+
+ Represents an LLM agent node in an Agent workflow graph. The LLM Agent Node can be initialized using either chatbot_deployment_id or creation_parameters.
+
+ :param name: A unique name for the LLM agent node.
+ :type name: str
+ :param chatbot_deployment_id: The deployment ID of the chatbot to use for this node. If not provided, a new chatbot will be created.
+ :type chatbot_deployment_id: str
+ :param chatbot_parameters: Parameters for configuring the chatbot, such as data_feature_group_ids and document_retrievers.
+ :type chatbot_parameters: dict
+ :param uid: A unique identifier for the LLM agent node. If not provided, a unique ID will be auto-generated.
+ :type uid: str
+
+
+ .. py:attribute:: name
+ :type: str
+
+
+ .. py:attribute:: chatbot_deployment_id
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: chatbot_parameters
+ :type: dict
+
+
+ .. py:attribute:: uid
+ :type: str
+ :value: None
+
+
+
+ .. py:method:: is_special_node_type()
+ :classmethod:
+
+
+ LLM agent nodes skip source code validation.
+
+
+
+ .. py:attribute:: node_type
+ :value: 'llm_agent_node'
+
+
+
+ .. py:attribute:: source_code
+ :value: None
+
+
+
+ .. py:method:: get_source_code()
+
+
+ .. py:method:: get_function_name()
+
+
+ .. py:method:: to_dict()
+
+
+ .. py:method:: from_dict(node)
+ :classmethod:
+
+
+
+.. py:class:: InputNode(name, input_mappings = None, output_mappings = None, input_schema = None, output_schema = None, description = None, function_name = None, source_code = None, uid = None)
+
+ Bases: :py:obj:`WorkflowGraphNode`
+
+
+ Represents an input node in an agent workflow graph.
+
+ Accepts multiple user inputs (text and file) and processes them into a single combined text output.
+ File inputs are processed using OCR/document extraction when applicable.
+
+ :param name: The name of the input node.
+ :type name: str
+ :param input_mappings: The input mappings for the input node. If not provided, defaults to a text_input and file_input.
+ :type input_mappings: Union[Dict[str, WorkflowNodeInputMapping], List[WorkflowNodeInputMapping]]
+ :param output_mappings: The output mappings for the input node. If not provided, defaults to a single 'answer' output.
+ :type output_mappings: Union[List[str], Dict[str, str], List[WorkflowNodeOutputMapping]]
+ :param input_schema: The input schema for the input node. If not provided, will be generated from input_mappings.
+ :type input_schema: WorkflowNodeInputSchema
+ :param output_schema: The output schema for the input node. If not provided, will be generated from output_mappings.
+ :type output_schema: WorkflowNodeOutputSchema
+ :param description: The description of the input node.
+ :type description: str
+ :param uid: A unique identifier for the input node. Auto-generated if not provided.
+ :type uid: str
+
+
+ .. py:attribute:: name
+ :type: str
+
+
+ .. py:attribute:: input_mappings
+ :type: Union[Dict[str, WorkflowNodeInputMapping], List[WorkflowNodeInputMapping]]
+ :value: None
+
+
+
+ .. py:attribute:: output_mappings
+ :type: Union[List[str], Dict[str, str], List[WorkflowNodeOutputMapping]]
+ :value: None
+
+
+
+ .. py:attribute:: input_schema
+ :type: WorkflowNodeInputSchema
+ :value: None
+
+
+
+ .. py:attribute:: output_schema
+ :type: WorkflowNodeOutputSchema
+ :value: None
+
+
+
+ .. py:attribute:: description
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: uid
+ :type: str
+ :value: None
+
+
+
+ .. py:method:: is_special_node_type()
+ :classmethod:
+
+
+ Input nodes skip source code validation.
+
+
+
+ .. py:method:: _mark_file_inputs_in_schema(input_schema)
+ :staticmethod:
+
+
+ Mark file inputs in the schema based on format and ui:widget indicators.
+
+
+
+ .. py:attribute:: node_type
+ :value: 'input_node'
+
+
+
+ .. py:attribute:: source_code
+ :value: None
+
+
+
+ .. py:attribute:: template_metadata
+ :value: None
+
+
+
+ .. py:attribute:: trigger_config
+ :value: None
+
+
+
+ .. py:method:: to_dict()
+
+ Return snake_case keys to match return filter expectations.
+ (The base ApiClass.to_dict() camelCases keys, which causes the
+ WorkflowGraphNodeDetails return filter to miss fields.)
+
+
+
+ .. py:method:: from_dict(node)
+ :classmethod:
+
+
+
+ .. py:method:: get_function_name()
+
+
+ .. py:method:: get_source_code()
+
+ Return the stored source code, if any.
+
+
+
+ .. py:method:: get_input_schema()
+
+
.. py:class:: WorkflowGraphEdge(source, target, details = None)
Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
@@ -2463,6 +2906,11 @@ Package Contents
.. py:method:: to_nx_edge()
+ .. py:method:: from_dict(input_dict)
+ :classmethod:
+
+
+
.. py:class:: WorkflowGraph
Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
@@ -2470,20 +2918,16 @@ Package Contents
Represents an Agent workflow graph.
- The edges define the node invocation order.
-
:param nodes: A list of nodes in the workflow graph.
- :type nodes: List[WorkflowGraphNode]
- :param edges: A list of edges in the workflow graph, where each edge is a tuple of source, target, and details.
- :type edges: List[WorkflowGraphEdge]
+ :type nodes: List[Union[WorkflowGraphNode, DecisionNode, LLMAgentNode]]
:param primary_start_node: The primary node to start the workflow from.
- :type primary_start_node: Union[str, WorkflowGraphNode]
+ :type primary_start_node: Union[str, WorkflowGraphNode, LLMAgentNode]
:param common_source_code: Common source code that can be used across all nodes.
:type common_source_code: str
.. py:attribute:: nodes
- :type: List[WorkflowGraphNode]
+ :type: List[Union[WorkflowGraphNode, DecisionNode, LLMAgentNode]]
:value: []
@@ -2495,7 +2939,7 @@ Package Contents
.. py:attribute:: primary_start_node
- :type: Union[str, WorkflowGraphNode]
+ :type: Union[str, WorkflowGraphNode, LLMAgentNode]
:value: None
@@ -2517,11 +2961,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(graph)
:classmethod:
@@ -2563,11 +3002,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:class:: WorkflowNodeTemplateConfig
@@ -2610,11 +3044,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:method:: from_dict(mapping)
:classmethod:
@@ -2634,6 +3063,8 @@ Package Contents
:type is_required: bool
:param description: The description of this input.
:type description: str
+ :param long_description: The detailed description of this input.
+ :type long_description: str
.. py:attribute:: name
@@ -2652,12 +3083,13 @@ Package Contents
- .. py:method:: to_dict()
+ .. py:attribute:: long_description
+ :type: str
+ :value: ''
+
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
+ .. py:method:: to_dict()
.. py:method:: from_dict(mapping)
@@ -2678,6 +3110,8 @@ Package Contents
:type variable_type: WorkflowNodeOutputType
:param description: The description of this output.
:type description: str
+ :param long_description: The detailed description of this output.
+ :type long_description: str
.. py:attribute:: name
@@ -2694,12 +3128,13 @@ Package Contents
- .. py:method:: to_dict()
+ .. py:attribute:: long_description
+ :type: str
+ :value: ''
+
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
+ .. py:method:: to_dict()
.. py:method:: from_dict(mapping)
@@ -3055,6 +3490,8 @@ Package Contents
:param for_eval: If True, the test fold which was created during training and used for metrics calculation will be used as input data. These predictions are hence, used for model evaluation.
:type for_eval: bool
+ :param files_input_location: The input location for the files.
+ :type files_input_location: str
:param files_output_location_prefix: The output location prefix for the files.
:type files_output_location_prefix: str
:param channel_id_to_label_map: JSON string for the map from channel ids to their labels.
@@ -3067,6 +3504,12 @@ Package Contents
+ .. py:attribute:: files_input_location
+ :type: str
+ :value: None
+
+
+
.. py:attribute:: files_output_location_prefix
:type: str
:value: None
@@ -3218,10 +3661,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -3359,10 +3798,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -3409,6 +3844,11 @@ Package Contents
+ .. py:attribute:: COMPREHENSIVE_MARKDOWN
+ :value: 'COMPREHENSIVE_MARKDOWN'
+
+
+
.. py:method:: is_ocr_forced(document_type)
:classmethod:
@@ -3754,6 +4194,8 @@ Package Contents
:type pull_attachments: bool
:param extract_bounding_boxes: Whether to extract bounding boxes from the documents
:type extract_bounding_boxes: bool
+ :param location_type: The type of location to be fetched. Maps values in `location` to content type, example: 'spaceKey/folderTitle/*' -> 'folder'
+ :type location_type: str
.. py:attribute:: location
@@ -3780,6 +4222,12 @@ Package Contents
+ .. py:attribute:: location_type
+ :type: str
+ :value: None
+
+
+
.. py:method:: __post_init__()
@@ -3869,6 +4317,8 @@ Package Contents
:type extract_bounding_boxes: bool
:param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
:type merge_file_schemas: bool
+ :param location_type: The type of path to fetch. 'shared' for shared files. if not provided, it will fetch from the root folder.
+ :type location_type: str
.. py:attribute:: location
@@ -3895,6 +4345,12 @@ Package Contents
+ .. py:attribute:: location_type
+ :type: str
+ :value: None
+
+
+
.. py:method:: __post_init__()
@@ -4001,6 +4457,8 @@ Package Contents
:type extract_bounding_boxes: bool
:param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
:type merge_file_schemas: bool
+ :param add_file_metadata: Signifies if the file metadata should be added to the dataset
+ :type add_file_metadata: bool
.. py:attribute:: location
@@ -4027,6 +4485,12 @@ Package Contents
+ .. py:attribute:: add_file_metadata
+ :type: bool
+ :value: False
+
+
+
.. py:method:: __post_init__()
@@ -4100,6 +4564,8 @@ Package Contents
:type pull_channel_posts: bool
:param pull_transcripts: Whether to pull transcripts for calendar meetings
:type pull_transcripts: bool
+ :param max_days_to_lookback: The maximum number of days to look back for data
+ :type max_days_to_lookback: int
.. py:attribute:: pull_chat_messages
@@ -4120,273 +4586,182 @@ Package Contents
- .. py:method:: __post_init__()
-
-
-.. py:class:: FreshserviceDatasetConfig
-
- Bases: :py:obj:`ApplicationConnectorDatasetConfig`
-
+ .. py:attribute:: max_days_to_lookback
+ :type: int
+ :value: 365
- Dataset config for Freshservice Application Connector
.. py:method:: __post_init__()
-.. py:class:: _ApplicationConnectorDatasetConfigFactory
-
- Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
-
-
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
- .. py:attribute:: config_abstract_class
-
-
- .. py:attribute:: config_class_key
- :value: 'application_connector_type'
+.. py:class:: OutlookDatasetConfig
+ Bases: :py:obj:`ApplicationConnectorDatasetConfig`
- .. py:attribute:: config_class_map
+ Dataset config for Outlook Application Connector
-.. py:class:: PredictionArguments
-
- Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
-
+ .. py:method:: __post_init__()
- An abstract class for prediction arguments specific to problem type.
+.. py:class:: AzureStorageDatasetConfig
- .. py:attribute:: _support_kwargs
- :type: bool
- :value: True
+ Bases: :py:obj:`ApplicationConnectorDatasetConfig`
+ Dataset config for Azure Storage Application Connector
- .. py:attribute:: kwargs
- :type: dict
+ :param location: The location of the files to fetch
+ :type location: str
+ :param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
+ :type merge_file_schemas: bool
- .. py:attribute:: problem_type
- :type: abacusai.api_class.enums.ProblemType
+ .. py:attribute:: location
+ :type: str
:value: None
- .. py:method:: _get_builder()
- :classmethod:
-
-
-
-.. py:class:: OptimizationPredictionArguments
-
- Bases: :py:obj:`PredictionArguments`
-
-
- Prediction arguments for the OPTIMIZATION problem type
-
- :param forced_assignments: Set of assignments to force and resolve before returning query results.
- :type forced_assignments: dict
- :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
- :type solve_time_limit_seconds: float
- :param include_all_assignments: If True, will return all assignments, including assignments with value 0. Default is False.
- :type include_all_assignments: bool
+ .. py:attribute:: merge_file_schemas
+ :type: bool
+ :value: False
- .. py:attribute:: forced_assignments
- :type: dict
- :value: None
+ .. py:method:: __post_init__()
- .. py:attribute:: solve_time_limit_seconds
- :type: float
- :value: None
-
+.. py:class:: FreshserviceDatasetConfig
+ Bases: :py:obj:`ApplicationConnectorDatasetConfig`
- .. py:attribute:: include_all_assignments
- :type: bool
- :value: None
+ Dataset config for Freshservice Application Connector
.. py:method:: __post_init__()
-.. py:class:: TimeseriesAnomalyPredictionArguments
+.. py:class:: SftpDatasetConfig
- Bases: :py:obj:`PredictionArguments`
+ Bases: :py:obj:`ApplicationConnectorDatasetConfig`
- Prediction arguments for the TS_ANOMALY problem type
+ Dataset config for SFTP Application Connector
- :param start_timestamp: Timestamp from which anomalies have to be detected in the training data
- :type start_timestamp: str
- :param end_timestamp: Timestamp to which anomalies have to be detected in the training data
- :type end_timestamp: str
- :param get_all_item_data: If True, anomaly detection has to be performed on all the data related to input ids
- :type get_all_item_data: bool
+ :param location: The regex location of the files to fetch
+ :type location: str
+ :param csv_delimiter: If the file format is CSV, use a specific csv delimiter
+ :type csv_delimiter: str
+ :param extract_bounding_boxes: Signifies whether to extract bounding boxes out of the documents. Only valid if is_documentset if True
+ :type extract_bounding_boxes: bool
+ :param merge_file_schemas: Signifies if the merge file schema policy is enabled. Not applicable if is_documentset is True
+ :type merge_file_schemas: bool
- .. py:attribute:: start_timestamp
+ .. py:attribute:: location
:type: str
:value: None
- .. py:attribute:: end_timestamp
+ .. py:attribute:: csv_delimiter
:type: str
:value: None
- .. py:attribute:: get_all_item_data
+ .. py:attribute:: extract_bounding_boxes
:type: bool
- :value: None
-
-
-
- .. py:method:: __post_init__()
+ :value: False
-.. py:class:: ChatLLMPredictionArguments
- Bases: :py:obj:`PredictionArguments`
+ .. py:attribute:: merge_file_schemas
+ :type: bool
+ :value: False
- Prediction arguments for the CHAT_LLM problem type
- :param llm_name: Name of the specific LLM backend to use to power the chat experience.
- :type llm_name: str
- :param num_completion_tokens: Default for maximum number of tokens for chat answers.
- :type num_completion_tokens: int
- :param system_message: The generative LLM system message.
- :type system_message: str
- :param temperature: The generative LLM temperature.
- :type temperature: float
- :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
- :type search_score_cutoff: float
- :param ignore_documents: If True, will ignore any documents and search results, and only use the messages to generate a response.
- :type ignore_documents: bool
+ .. py:method:: __post_init__()
- .. py:attribute:: llm_name
- :type: str
- :value: None
+.. py:class:: _ApplicationConnectorDatasetConfigFactory
+ Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- .. py:attribute:: num_completion_tokens
- :type: int
- :value: None
+ .. py:attribute:: config_abstract_class
+ .. py:attribute:: config_class_key
+ :value: 'application_connector_type'
- .. py:attribute:: system_message
- :type: str
- :value: None
+ .. py:attribute:: config_class_map
- .. py:attribute:: temperature
- :type: float
- :value: None
+.. py:class:: PredictionArguments
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
- .. py:attribute:: search_score_cutoff
- :type: float
- :value: None
+ An abstract class for prediction arguments specific to problem type.
- .. py:attribute:: ignore_documents
+ .. py:attribute:: _support_kwargs
:type: bool
- :value: None
-
-
-
- .. py:method:: __post_init__()
-
-
-.. py:class:: RegressionPredictionArguments
-
- Bases: :py:obj:`PredictionArguments`
+ :value: True
- Prediction arguments for the PREDICTIVE_MODELING problem type
- :param explain_predictions: If true, will explain predictions.
- :type explain_predictions: bool
- :param explainer_type: Type of explainer to use for explanations.
- :type explainer_type: str
+ .. py:attribute:: kwargs
+ :type: dict
- .. py:attribute:: explain_predictions
- :type: bool
+ .. py:attribute:: problem_type
+ :type: abacusai.api_class.enums.ProblemType
:value: None
- .. py:attribute:: explainer_type
- :type: str
- :value: None
-
-
+ .. py:method:: _get_builder()
+ :classmethod:
- .. py:method:: __post_init__()
-.. py:class:: ForecastingPredictionArguments
+.. py:class:: OptimizationPredictionArguments
Bases: :py:obj:`PredictionArguments`
- Prediction arguments for the FORECASTING problem type
-
- :param num_predictions: The number of timestamps to predict in the future.
- :type num_predictions: int
- :param prediction_start: The start date for predictions (e.g., "2015-08-01T00:00:00" as input for mid-night of 2015-08-01).
- :type prediction_start: str
- :param explain_predictions: If True, explain predictions for forecasting.
- :type explain_predictions: bool
- :param explainer_type: Type of explainer to use for explanations.
- :type explainer_type: str
- :param get_item_data: If True, will return the data corresponding to items as well.
- :type get_item_data: bool
-
-
- .. py:attribute:: num_predictions
- :type: int
- :value: None
-
-
-
- .. py:attribute:: prediction_start
- :type: str
- :value: None
+ Prediction arguments for the OPTIMIZATION problem type
+ :param forced_assignments: Set of assignments to force and resolve before returning query results.
+ :type forced_assignments: dict
+ :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
+ :type solve_time_limit_seconds: float
+ :param include_all_assignments: If True, will return all assignments, including assignments with value 0. Default is False.
+ :type include_all_assignments: bool
- .. py:attribute:: explain_predictions
- :type: bool
+ .. py:attribute:: forced_assignments
+ :type: dict
:value: None
- .. py:attribute:: explainer_type
- :type: str
+ .. py:attribute:: solve_time_limit_seconds
+ :type: float
:value: None
- .. py:attribute:: get_item_data
+ .. py:attribute:: include_all_assignments
:type: bool
:value: None
@@ -4395,50 +4770,34 @@ Package Contents
.. py:method:: __post_init__()
-.. py:class:: CumulativeForecastingPredictionArguments
+.. py:class:: TimeseriesAnomalyPredictionArguments
Bases: :py:obj:`PredictionArguments`
- Prediction arguments for the CUMULATIVE_FORECASTING problem type
-
- :param num_predictions: The number of timestamps to predict in the future.
- :type num_predictions: int
- :param prediction_start: The start date for predictions (e.g., "2015-08-01T00:00:00" as input for mid-night of 2015-08-01).
- :type prediction_start: str
- :param explain_predictions: If True, explain predictions for forecasting.
- :type explain_predictions: bool
- :param explainer_type: Type of explainer to use for explanations.
- :type explainer_type: str
- :param get_item_data: If True, will return the data corresponding to items as well.
- :type get_item_data: bool
-
-
- .. py:attribute:: num_predictions
- :type: int
- :value: None
+ Prediction arguments for the TS_ANOMALY problem type
+ :param start_timestamp: Timestamp from which anomalies have to be detected in the training data
+ :type start_timestamp: str
+ :param end_timestamp: Timestamp to which anomalies have to be detected in the training data
+ :type end_timestamp: str
+ :param get_all_item_data: If True, anomaly detection has to be performed on all the data related to input ids
+ :type get_all_item_data: bool
- .. py:attribute:: prediction_start
+ .. py:attribute:: start_timestamp
:type: str
:value: None
- .. py:attribute:: explain_predictions
- :type: bool
- :value: None
-
-
-
- .. py:attribute:: explainer_type
+ .. py:attribute:: end_timestamp
:type: str
:value: None
- .. py:attribute:: get_item_data
+ .. py:attribute:: get_all_item_data
:type: bool
:value: None
@@ -4447,12 +4806,204 @@ Package Contents
.. py:method:: __post_init__()
-.. py:class:: NaturalLanguageSearchPredictionArguments
+.. py:class:: ChatLLMPredictionArguments
Bases: :py:obj:`PredictionArguments`
- Prediction arguments for the NATURAL_LANGUAGE_SEARCH problem type
+ Prediction arguments for the CHAT_LLM problem type
+
+ :param llm_name: Name of the specific LLM backend to use to power the chat experience.
+ :type llm_name: str
+ :param num_completion_tokens: Default for maximum number of tokens for chat answers.
+ :type num_completion_tokens: int
+ :param system_message: The generative LLM system message.
+ :type system_message: str
+ :param temperature: The generative LLM temperature.
+ :type temperature: float
+ :param search_score_cutoff: Cutoff for the document retriever score. Matching search results below this score will be ignored.
+ :type search_score_cutoff: float
+ :param ignore_documents: If True, will ignore any documents and search results, and only use the messages to generate a response.
+ :type ignore_documents: bool
+
+
+ .. py:attribute:: llm_name
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: num_completion_tokens
+ :type: int
+ :value: None
+
+
+
+ .. py:attribute:: system_message
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: temperature
+ :type: float
+ :value: None
+
+
+
+ .. py:attribute:: search_score_cutoff
+ :type: float
+ :value: None
+
+
+
+ .. py:attribute:: ignore_documents
+ :type: bool
+ :value: None
+
+
+
+ .. py:method:: __post_init__()
+
+
+.. py:class:: RegressionPredictionArguments
+
+ Bases: :py:obj:`PredictionArguments`
+
+
+ Prediction arguments for the PREDICTIVE_MODELING problem type
+
+ :param explain_predictions: If true, will explain predictions.
+ :type explain_predictions: bool
+ :param explainer_type: Type of explainer to use for explanations.
+ :type explainer_type: str
+
+
+ .. py:attribute:: explain_predictions
+ :type: bool
+ :value: None
+
+
+
+ .. py:attribute:: explainer_type
+ :type: str
+ :value: None
+
+
+
+ .. py:method:: __post_init__()
+
+
+.. py:class:: ForecastingPredictionArguments
+
+ Bases: :py:obj:`PredictionArguments`
+
+
+ Prediction arguments for the FORECASTING problem type
+
+ :param num_predictions: The number of timestamps to predict in the future.
+ :type num_predictions: int
+ :param prediction_start: The start date for predictions (e.g., "2015-08-01T00:00:00" as input for mid-night of 2015-08-01).
+ :type prediction_start: str
+ :param explain_predictions: If True, explain predictions for forecasting.
+ :type explain_predictions: bool
+ :param explainer_type: Type of explainer to use for explanations.
+ :type explainer_type: str
+ :param get_item_data: If True, will return the data corresponding to items as well.
+ :type get_item_data: bool
+
+
+ .. py:attribute:: num_predictions
+ :type: int
+ :value: None
+
+
+
+ .. py:attribute:: prediction_start
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: explain_predictions
+ :type: bool
+ :value: None
+
+
+
+ .. py:attribute:: explainer_type
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: get_item_data
+ :type: bool
+ :value: None
+
+
+
+ .. py:method:: __post_init__()
+
+
+.. py:class:: CumulativeForecastingPredictionArguments
+
+ Bases: :py:obj:`PredictionArguments`
+
+
+ Prediction arguments for the CUMULATIVE_FORECASTING problem type
+
+ :param num_predictions: The number of timestamps to predict in the future.
+ :type num_predictions: int
+ :param prediction_start: The start date for predictions (e.g., "2015-08-01T00:00:00" as input for mid-night of 2015-08-01).
+ :type prediction_start: str
+ :param explain_predictions: If True, explain predictions for forecasting.
+ :type explain_predictions: bool
+ :param explainer_type: Type of explainer to use for explanations.
+ :type explainer_type: str
+ :param get_item_data: If True, will return the data corresponding to items as well.
+ :type get_item_data: bool
+
+
+ .. py:attribute:: num_predictions
+ :type: int
+ :value: None
+
+
+
+ .. py:attribute:: prediction_start
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: explain_predictions
+ :type: bool
+ :value: None
+
+
+
+ .. py:attribute:: explainer_type
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: get_item_data
+ :type: bool
+ :value: None
+
+
+
+ .. py:method:: __post_init__()
+
+
+.. py:class:: NaturalLanguageSearchPredictionArguments
+
+ Bases: :py:obj:`PredictionArguments`
+
+
+ Prediction arguments for the NATURAL_LANGUAGE_SEARCH problem type
:param llm_name: Name of the specific LLM backend to use to power the chat experience.
:type llm_name: str
@@ -4532,10 +5083,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -5584,6 +6131,16 @@ Package Contents
+ .. py:attribute:: TS
+ :value: 'TS'
+
+
+
+ .. py:attribute:: TSX
+ :value: 'TSX'
+
+
+
.. py:class:: ExperimentationMode
Bases: :py:obj:`ApiEnum`
@@ -6284,6 +6841,11 @@ Package Contents
+ .. py:attribute:: GOOGLECALENDAR
+ :value: 'GOOGLECALENDAR'
+
+
+
.. py:attribute:: GIT
:value: 'GIT'
@@ -6359,11 +6921,6 @@ Package Contents
- .. py:attribute:: GOOGLECALENDAR
- :value: 'GOOGLECALENDAR'
-
-
-
.. py:attribute:: GOOGLESHEETS
:value: 'GOOGLESHEETS'
@@ -6394,6 +6951,71 @@ Package Contents
+ .. py:attribute:: SFTPAPPLICATION
+ :value: 'SFTPAPPLICATION'
+
+
+
+ .. py:attribute:: OAUTH
+ :value: 'OAUTH'
+
+
+
+ .. py:attribute:: SALESFORCE
+ :value: 'SALESFORCE'
+
+
+
+ .. py:attribute:: TWITTER
+ :value: 'TWITTER'
+
+
+
+ .. py:attribute:: MCP
+ :value: 'MCP'
+
+
+
+ .. py:attribute:: ODBC
+ :value: 'ODBC'
+
+
+
+ .. py:attribute:: DBC
+ :value: 'DBC'
+
+
+
+ .. py:attribute:: GENERIC_OAUTH
+ :value: 'GENERIC_OAUTH'
+
+
+
+ .. py:attribute:: OUTLOOK
+ :value: 'OUTLOOK'
+
+
+
+ .. py:attribute:: BIGQUERY
+ :value: 'BIGQUERY'
+
+
+
+ .. py:attribute:: AZURESTORAGE
+ :value: 'AZURESTORAGE'
+
+
+
+ .. py:method:: user_connectors()
+ :classmethod:
+
+
+
+ .. py:method:: database_connectors()
+ :classmethod:
+
+
+
.. py:class:: StreamingConnectorType
Bases: :py:obj:`ApiEnum`
@@ -6484,6 +7106,16 @@ Package Contents
+ .. py:attribute:: ATTACHMENT
+ :value: 'ATTACHMENT'
+
+
+
+ .. py:method:: to_json_type(type)
+ :staticmethod:
+
+
+
.. py:class:: PythonFunctionOutputArgumentType
Bases: :py:obj:`ApiEnum`
@@ -6559,6 +7191,11 @@ Package Contents
+ .. py:attribute:: ATTACHMENT
+ :value: 'ATTACHMENT'
+
+
+
.. py:class:: LLMName
Bases: :py:obj:`ApiEnum`
@@ -6569,6 +7206,16 @@ Package Contents
Derive from this class to define new enumerations.
+ .. py:attribute:: OPENAI_GPT3_5
+ :value: 'OPENAI_GPT3_5'
+
+
+
+ .. py:attribute:: OPENAI_GPT3_5_TEXT
+ :value: 'OPENAI_GPT3_5_TEXT'
+
+
+
.. py:attribute:: OPENAI_GPT4
:value: 'OPENAI_GPT4'
@@ -6604,48 +7251,78 @@ Package Contents
- .. py:attribute:: OPENAI_GPT3_5
- :value: 'OPENAI_GPT3_5'
+ .. py:attribute:: OPENAI_O3
+ :value: 'OPENAI_O3'
- .. py:attribute:: OPENAI_GPT3_5_TEXT
- :value: 'OPENAI_GPT3_5_TEXT'
+ .. py:attribute:: OPENAI_O3_HIGH
+ :value: 'OPENAI_O3_HIGH'
- .. py:attribute:: LLAMA3_1_405B
- :value: 'LLAMA3_1_405B'
+ .. py:attribute:: OPENAI_O4_MINI
+ :value: 'OPENAI_O4_MINI'
- .. py:attribute:: LLAMA3_1_70B
- :value: 'LLAMA3_1_70B'
+ .. py:attribute:: OPENAI_O4_MINI_HIGH
+ :value: 'OPENAI_O4_MINI_HIGH'
- .. py:attribute:: LLAMA3_1_8B
- :value: 'LLAMA3_1_8B'
+ .. py:attribute:: OPENAI_GPT4_1
+ :value: 'OPENAI_GPT4_1'
- .. py:attribute:: LLAMA3_3_70B
- :value: 'LLAMA3_3_70B'
+ .. py:attribute:: OPENAI_GPT4_1_MINI
+ :value: 'OPENAI_GPT4_1_MINI'
- .. py:attribute:: LLAMA3_LARGE_CHAT
- :value: 'LLAMA3_LARGE_CHAT'
+ .. py:attribute:: OPENAI_GPT4_1_NANO
+ :value: 'OPENAI_GPT4_1_NANO'
- .. py:attribute:: CLAUDE_V3_OPUS
- :value: 'CLAUDE_V3_OPUS'
+ .. py:attribute:: OPENAI_GPT5
+ :value: 'OPENAI_GPT5'
+
+
+ .. py:attribute:: OPENAI_GPT5_MINI
+ :value: 'OPENAI_GPT5_MINI'
- .. py:attribute:: CLAUDE_V3_SONNET
- :value: 'CLAUDE_V3_SONNET'
+
+ .. py:attribute:: OPENAI_GPT5_MINI_HIGH
+ :value: 'OPENAI_GPT5_MINI_HIGH'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_MINI_LOW
+ :value: 'OPENAI_GPT5_MINI_LOW'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_NANO
+ :value: 'OPENAI_GPT5_NANO'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_NANO_HIGH
+ :value: 'OPENAI_GPT5_NANO_HIGH'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_1
+ :value: 'OPENAI_GPT5_1'
+
+
+
+ .. py:attribute:: OPENAI_GPT5_2
+ :value: 'OPENAI_GPT5_2'
@@ -6654,8 +7331,8 @@ Package Contents
- .. py:attribute:: CLAUDE_V3_5_SONNET
- :value: 'CLAUDE_V3_5_SONNET'
+ .. py:attribute:: CLAUDE_V3_OPUS
+ :value: 'CLAUDE_V3_OPUS'
@@ -6664,23 +7341,118 @@ Package Contents
+ .. py:attribute:: CLAUDE_V3_5_SONNET
+ :value: 'CLAUDE_V3_5_SONNET'
+
+
+
+ .. py:attribute:: CLAUDE_V3_7_SONNET
+ :value: 'CLAUDE_V3_7_SONNET'
+
+
+
+ .. py:attribute:: CLAUDE_V4_SONNET
+ :value: 'CLAUDE_V4_SONNET'
+
+
+
+ .. py:attribute:: CLAUDE_V4_OPUS
+ :value: 'CLAUDE_V4_OPUS'
+
+
+
+ .. py:attribute:: CLAUDE_V4_5_SONNET
+ :value: 'CLAUDE_V4_5_SONNET'
+
+
+
.. py:attribute:: GEMINI_1_5_PRO
:value: 'GEMINI_1_5_PRO'
+ .. py:attribute:: GEMINI_1_5_FLASH
+ :value: 'GEMINI_1_5_FLASH'
+
+
+
+ .. py:attribute:: GEMINI_2_PRO
+ :value: 'GEMINI_2_PRO'
+
+
+
.. py:attribute:: GEMINI_2_FLASH
:value: 'GEMINI_2_FLASH'
- .. py:attribute:: GEMINI_2_FLASH_THINKING
- :value: 'GEMINI_2_FLASH_THINKING'
+ .. py:attribute:: GEMINI_2_5_PRO
+ :value: 'GEMINI_2_5_PRO'
- .. py:attribute:: GEMINI_2_PRO
- :value: 'GEMINI_2_PRO'
+ .. py:attribute:: GEMINI_3_PRO
+ :value: 'GEMINI_3_PRO'
+
+
+
+ .. py:attribute:: GEMINI_2_5_FLASH
+ :value: 'GEMINI_2_5_FLASH'
+
+
+
+ .. py:attribute:: GEMINI_3_FLASH
+ :value: 'GEMINI_3_FLASH'
+
+
+
+ .. py:attribute:: XAI_GROK
+ :value: 'XAI_GROK'
+
+
+
+ .. py:attribute:: XAI_GROK_3
+ :value: 'XAI_GROK_3'
+
+
+
+ .. py:attribute:: XAI_GROK_3_MINI
+ :value: 'XAI_GROK_3_MINI'
+
+
+
+ .. py:attribute:: XAI_GROK_4
+ :value: 'XAI_GROK_4'
+
+
+
+ .. py:attribute:: LLAMA4_MAVERICK
+ :value: 'LLAMA4_MAVERICK'
+
+
+
+ .. py:attribute:: LLAMA3_1_405B
+ :value: 'LLAMA3_1_405B'
+
+
+
+ .. py:attribute:: LLAMA3_1_70B
+ :value: 'LLAMA3_1_70B'
+
+
+
+ .. py:attribute:: LLAMA3_1_8B
+ :value: 'LLAMA3_1_8B'
+
+
+
+ .. py:attribute:: LLAMA3_3_70B
+ :value: 'LLAMA3_3_70B'
+
+
+
+ .. py:attribute:: LLAMA3_LARGE_CHAT
+ :value: 'LLAMA3_LARGE_CHAT'
@@ -6714,18 +7486,23 @@ Package Contents
- .. py:attribute:: GEMINI_1_5_FLASH
- :value: 'GEMINI_1_5_FLASH'
+ .. py:attribute:: QWEN3_32B
+ :value: 'QWEN3_32B'
- .. py:attribute:: XAI_GROK
- :value: 'XAI_GROK'
+ .. py:attribute:: QWEN3_235B_A22B
+ :value: 'QWEN3_235B_A22B'
+
+
+
+ .. py:attribute:: QWEN3_CODER
+ :value: 'QWEN3_CODER'
- .. py:attribute:: DEEPSEEK_V3
- :value: 'DEEPSEEK_V3'
+ .. py:attribute:: DEEPSEEK_V3_1
+ :value: 'DEEPSEEK_V3_1'
@@ -6929,6 +7706,11 @@ Package Contents
+ .. py:attribute:: CONNECTOR_TOOL
+ :value: 'CONNECTOR_TOOL'
+
+
+
.. py:class:: EvalArtifactType
Bases: :py:obj:`ApiEnum`
@@ -7434,6 +8216,99 @@ Package Contents
+ .. py:attribute:: CODE_LLM_AGENT
+ :value: 'CODE_LLM_AGENT'
+
+
+
+ .. py:attribute:: CHAT_LLM_TASK
+ :value: 'CHAT_LLM_TASK'
+
+
+
+ .. py:attribute:: COMPUTER_AGENT
+ :value: 'COMPUTER_AGENT'
+
+
+
+ .. py:attribute:: SEARCH_LLM
+ :value: 'SEARCH_LLM'
+
+
+
+ .. py:attribute:: APP_LLM
+ :value: 'APP_LLM'
+
+
+
+ .. py:attribute:: TEST_AGENT
+ :value: 'TEST_AGENT'
+
+
+
+ .. py:attribute:: SUPER_AGENT
+ :value: 'SUPER_AGENT'
+
+
+
+ .. py:attribute:: CODE_LLM_NON_INTERACTIVE
+ :value: 'CODE_LLM_NON_INTERACTIVE'
+
+
+
+ .. py:attribute:: BROWSER_EXTENSION
+ :value: 'BROWSER_EXTENSION'
+
+
+
+.. py:class:: AgentClientType
+
+ Bases: :py:obj:`ApiEnum`
+
+
+ Generic enumeration.
+
+ Derive from this class to define new enumerations.
+
+
+ .. py:attribute:: CHAT_UI
+ :value: 'CHAT_UI'
+
+
+
+ .. py:attribute:: MESSAGING_APP
+ :value: 'MESSAGING_APP'
+
+
+
+ .. py:attribute:: API
+ :value: 'API'
+
+
+
+.. py:class:: OrganizationSecretType
+
+ Bases: :py:obj:`ApiEnum`
+
+
+ Enum for organization secret types
+
+
+ .. py:attribute:: ORG_SECRET
+ :value: 'ORG_SECRET'
+
+
+
+ .. py:attribute:: ORG_API_CREDENTIALS
+ :value: 'ORG_API_CREDENTIALS'
+
+
+
+ .. py:attribute:: USER_API_CREDENTIALS
+ :value: 'USER_API_CREDENTIALS'
+
+
+
.. py:class:: SamplingConfig
Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
@@ -9419,6 +10294,29 @@ Package Contents
.. py:method:: __post_init__()
+.. py:class:: SystemConnectorTool
+
+ Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
+
+
+ System connector tool used to integrate chatbots with external services.
+
+ :param value: The name of the tool.
+ :type value: str
+ :param configs: Optional. A dictionary of key-value pairs that are used to configure the tool.
+ :type configs: dict
+
+
+ .. py:attribute:: value
+ :type: str
+
+
+ .. py:attribute:: configs
+ :type: dict
+ :value: None
+
+
+
.. py:class:: ChatLLMTrainingConfig
Bases: :py:obj:`TrainingConfig`
@@ -9465,6 +10363,8 @@ Package Contents
:type hide_sql_and_code: bool
:param disable_data_summarization: After executing a query summarize the reponse and reply back with only the table and query run.
:type disable_data_summarization: bool
+ :param disable_data_fetch_for_training: Train using only table and column schema metadata without fetching sample data. This speeds up training but may result in less context for the model.
+ :type disable_data_fetch_for_training: bool
:param data_columns_to_ignore: Columns to ignore while encoding information about structured data tables in context for the LLM. A list of strings of format "."
:type data_columns_to_ignore: List[str]
:param search_score_cutoff: Minimum search score to consider a document as a valid search result.
@@ -9473,6 +10373,8 @@ Package Contents
:type include_bm25_retrieval: bool
:param database_connector_id: Database connector ID to use for connecting external database that gives access to structured data to the LLM.
:type database_connector_id: str
+ :param database_connector_ids: List of database connector IDs to use for connecting external databases that give access to structured data to the LLM.
+ :type database_connector_ids: List[str]
:param database_connector_tables: List of tables to use from the database connector for the ChatLLM.
:type database_connector_tables: List[str]
:param enable_code_execution: Enable python code execution in the ChatLLM. This equips the LLM with a python kernel in which all its code is executed.
@@ -9481,7 +10383,7 @@ Package Contents
:type enable_response_caching: bool
:param unknown_answer_phrase: Fallback response when the LLM can't find an answer.
:type unknown_answer_phrase: str
- :param enable_tool_bar: Enable the tool bar in Enterprise ChatLLM to provide additional functionalities like tool_use, web_search, image_gen, etc.
+ :param enable_tool_bar: Enable the tool bar in Enterprise ChatLLM to provide additional functionalities like tool_use, web_search, image_gen, etc. Enabling this requires enable_web_search to be enabled.
:type enable_tool_bar: bool
:param enable_inline_source_citations: Enable inline citations of the sources in the response.
:type enable_inline_source_citations: bool
@@ -9490,6 +10392,16 @@ Package Contents
:type json_response_instructions: str
:param json_response_schema: Specifies the JSON schema that the model should adhere to if `response_format` is set to "JSON". This should be a json-formatted string where each field of the expected schema is mapped to a dictionary containing the fields 'type', 'required' and 'description'. For example - '{"sample_field": {"type": "integer", "required": true, "description": "Sample Field"}}'
:type json_response_schema: str
+ :param mask_pii: Mask PII in the prompts and uploaded documents before sending it to the LLM. Only available for Enterprise users and will cause validation errors if set to True for ChatLLM Teams users.
+ :type mask_pii: bool
+ :param builtin_tools: List of builtin system connector tools to use in the ChatLLM. Using builtin tools does not require enabling tool bar (enable_tool_bar flag).
+ :type builtin_tools: List[SystemConnectorTool]
+ :param config_connectors: List of names of config connectors to use in the ChatLLM. This should not be used with document_retrievers.
+ :type config_connectors: List[str]
+ :param mcp_servers: List of names of MCP servers to use in the ChatLLM. This should not be used with document_retrievers.
+ :type mcp_servers: List[str]
+ :param agentic_loop_mode: Enables use of agentic loop that uses a series of tool calls when needed to respond. If set to False, the agentic loop will not be used. If not set or set to Auto, the agentic loop will be automatically used based on certain conditions like presence of tools in the model.
+ :type agentic_loop_mode: bool
.. py:attribute:: document_retrievers
@@ -9612,6 +10524,12 @@ Package Contents
+ .. py:attribute:: disable_data_fetch_for_training
+ :type: bool
+ :value: None
+
+
+
.. py:attribute:: data_columns_to_ignore
:type: List[str]
:value: None
@@ -9636,6 +10554,12 @@ Package Contents
+ .. py:attribute:: database_connector_ids
+ :type: List[str]
+ :value: None
+
+
+
.. py:attribute:: database_connector_tables
:type: List[str]
:value: None
@@ -9702,6 +10626,36 @@ Package Contents
+ .. py:attribute:: mask_pii
+ :type: bool
+ :value: None
+
+
+
+ .. py:attribute:: builtin_tools
+ :type: List[SystemConnectorTool]
+ :value: None
+
+
+
+ .. py:attribute:: config_connectors
+ :type: List[str]
+ :value: None
+
+
+
+ .. py:attribute:: mcp_servers
+ :type: List[str]
+ :value: None
+
+
+
+ .. py:attribute:: agentic_loop_mode
+ :type: bool
+ :value: None
+
+
+
.. py:method:: __post_init__()
@@ -10290,10 +11244,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -10883,10 +11833,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -10955,10 +11901,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -11003,11 +11945,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:class:: FeatureMappingConfig
@@ -11179,10 +12116,6 @@ Package Contents
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -11213,6 +12146,8 @@ Package Contents
:type pipeline_variable: str
:param description: The description of the argument
:type description: str
+ :param long_description: The detailed description of the argument
+ :type long_description: str
:param item_type: Type of items when variable_type is LIST
:type item_type: str
@@ -11253,6 +12188,12 @@ Package Contents
+ .. py:attribute:: long_description
+ :type: str
+ :value: None
+
+
+
.. py:attribute:: item_type
:type: str
:value: None
@@ -11270,6 +12211,10 @@ Package Contents
:type variable_type: PythonFunctionOutputArgumentType
:param name: The name of the python function variable
:type name: str
+ :param description: The description of the output
+ :type description: str
+ :param long_description: The detailed description of the output
+ :type long_description: str
.. py:attribute:: variable_type
@@ -11284,6 +12229,18 @@ Package Contents
+ .. py:attribute:: description
+ :type: str
+ :value: None
+
+
+
+ .. py:attribute:: long_description
+ :type: str
+ :value: None
+
+
+
.. py:class:: FeatureGroupExportConfig
Bases: :py:obj:`abacusai.api_class.abstract.ApiClass`
@@ -11333,11 +12290,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:class:: DatabaseConnectorExportConfig
@@ -11401,21 +12353,12 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:class:: _FeatureGroupExportConfigFactory
Bases: :py:obj:`abacusai.api_class.abstract._ApiClassFactory`
- Helper class that provides a standard way to create an ABC using
- inheritance.
-
-
.. py:attribute:: config_abstract_class
@@ -11453,11 +12396,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:data:: Segment
@@ -11509,7 +12447,7 @@ Package Contents
:type: int
-.. py:class:: TextResponseSection(text, section_key = None)
+.. py:class:: TextResponseSection(segment = None, section_key = None, text = None)
Bases: :py:obj:`ResponseSection`
@@ -11605,11 +12543,6 @@ Package Contents
.. py:method:: to_dict()
- Standardizes converting an ApiClass to dictionary.
- Keys of response dictionary are converted to camel case.
- This also validates the fields ( type, value, etc ) received in the dictionary.
-
-
.. py:class:: ListResponseSection(items, section_key = None)
@@ -11661,7 +12594,7 @@ Package Contents
:type: str
-.. py:class:: ApiEndpoint(client, apiEndpoint=None, predictEndpoint=None, proxyEndpoint=None, llmEndpoint=None, externalChatEndpoint=None, dashboardEndpoint=None)
+.. py:class:: ApiEndpoint(client, apiEndpoint=None, predictEndpoint=None, proxyEndpoint=None, llmEndpoint=None, externalChatEndpoint=None, dashboardEndpoint=None, hostingDomain=None, publicHostingDomain=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -11682,6 +12615,10 @@ Package Contents
:type externalChatEndpoint: str
:param dashboardEndpoint: The URI that the external chat will use to go back to the dashboard
:type dashboardEndpoint: str
+ :param hostingDomain: The domain for hosted app deployments
+ :type hostingDomain: str
+ :param publicHostingDomain: The domain for publicly hosted app deployments
+ :type publicHostingDomain: str
.. py:attribute:: api_endpoint
@@ -11714,6 +12651,16 @@ Package Contents
+ .. py:attribute:: hosting_domain
+ :value: None
+
+
+
+ .. py:attribute:: public_hosting_domain
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
@@ -11746,7 +12693,7 @@ Package Contents
:type apiKeySuffix: str
:param tag: A user-friendly tag for the API key.
:type tag: str
- :param type: The type of the API key, either 'default', 'code-llm', or 'computer-use'.
+ :param type: The type of the API key, either 'default', 'code-llm', 'terminal', or 'computer-use'.
:type type: str
:param createdAt: The timestamp when the API key was created.
:type createdAt: str
@@ -11820,7 +12767,7 @@ Package Contents
-.. py:class:: AppUserGroup(client, name=None, userGroupId=None, externalApplicationIds=None, invitedUserEmails=None, publicUserGroup=None, hasExternalApplicationReporting=None, isExternalServiceGroup=None, externalServiceGroupId=None, users={})
+.. py:class:: AppUserGroup(client, name=None, userGroupId=None, externalApplicationIds=None, invitedUserEmails=None, publicUserGroup=None, hasExternalApplicationReporting=None, isExternalServiceGroup=None, externalServiceGroupId=None, creationSource=None, users={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -11845,6 +12792,8 @@ Package Contents
:type isExternalServiceGroup: bool
:param externalServiceGroupId: The identifier that corresponds to the app user group's external service group representation
:type externalServiceGroupId: str
+ :param creationSource: The source of the app user group
+ :type creationSource: str
:param users: The users in the user group.
:type users: User
@@ -11889,6 +12838,11 @@ Package Contents
+ .. py:attribute:: creation_source
+ :value: None
+
+
+
.. py:attribute:: users
@@ -11940,7 +12894,7 @@ Package Contents
-.. py:class:: ApplicationConnector(client, applicationConnectorId=None, service=None, name=None, createdAt=None, status=None, auth=None)
+.. py:class:: ApplicationConnector(client, applicationConnectorId=None, service=None, serviceName=None, name=None, createdAt=None, status=None, auth=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -11953,6 +12907,8 @@ Package Contents
:type applicationConnectorId: str
:param service: The service this connection connects to
:type service: str
+ :param serviceName: For OAuth services, the specific provider name (e.g., 'spotify')
+ :type serviceName: str
:param name: A user-friendly name for the service
:type name: str
:param createdAt: When the API key was created
@@ -11973,6 +12929,11 @@ Package Contents
+ .. py:attribute:: service_name
+ :value: None
+
+
+
.. py:attribute:: name
:value: None
@@ -12044,7 +13005,7 @@ Package Contents
-.. py:class:: AudioGenSettings(client, model=None)
+.. py:class:: AudioGenSettings(client, model=None, settings=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -12053,8 +13014,10 @@ Package Contents
:param client: An authenticated API Client instance
:type client: ApiClient
- :param model: Dropdown for models available for audio generation.
+ :param model: names of models available for audio generation.
:type model: dict
+ :param settings: settings for each model.
+ :type settings: dict
.. py:attribute:: model
@@ -12062,6 +13025,126 @@ Package Contents
+ .. py:attribute:: settings
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: AudioUrlResult(client, audioUrl=None, creditsUsed=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ TTS result
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param audioUrl: The audio url.
+ :type audioUrl: str
+ :param creditsUsed: The credits used.
+ :type creditsUsed: float
+
+
+ .. py:attribute:: audio_url
+ :value: None
+
+
+
+ .. py:attribute:: credits_used
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: AuditLog(client, createdAt=None, userId=None, objectId=None, action=None, source=None, refreshPolicyId=None, pipelineId=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ An audit log entry
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param createdAt: The timestamp when the audit log entry was created
+ :type createdAt: str
+ :param userId: The hashed ID of the user who performed the action
+ :type userId: str
+ :param objectId: The hashed ID of the object that was affected by the action
+ :type objectId: str
+ :param action: The action performed (create, modify, start, stop, delete, share, hide, credential_change, login)
+ :type action: str
+ :param source: The source of the action (api, ui, pipeline, cli, system)
+ :type source: str
+ :param refreshPolicyId: The hashed ID of the refresh policy if applicable
+ :type refreshPolicyId: str
+ :param pipelineId: The hashed ID of the pipeline if applicable
+ :type pipelineId: str
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: user_id
+ :value: None
+
+
+
+ .. py:attribute:: object_id
+ :value: None
+
+
+
+ .. py:attribute:: action
+ :value: None
+
+
+
+ .. py:attribute:: source
+ :value: None
+
+
+
+ .. py:attribute:: refresh_policy_id
+ :value: None
+
+
+
+ .. py:attribute:: pipeline_id
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
@@ -13052,7 +14135,7 @@ Package Contents
-.. py:class:: ChatMessage(client, role=None, text=None, timestamp=None, isUseful=None, feedback=None, docIds=None, hotkeyTitle=None, tasks=None, keywordArguments=None)
+.. py:class:: ChatMessage(client, role=None, text=None, timestamp=None, isUseful=None, feedback=None, docIds=None, hotkeyTitle=None, tasks=None, keywordArguments=None, computePointsUsed=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -13079,6 +14162,8 @@ Package Contents
:type tasks: list[str]
:param keywordArguments: A dict of kwargs used to generate the response.
:type keywordArguments: dict
+ :param computePointsUsed: The number of compute points used for the message.
+ :type computePointsUsed: int
.. py:attribute:: role
@@ -13126,6 +14211,11 @@ Package Contents
+ .. py:attribute:: compute_points_used
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
@@ -13326,6 +14416,229 @@ Package Contents
+.. py:class:: ChatllmComputerStatus(client, computerId=None, vncEndpoint=None, computerStarted=None, restartRequired=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ ChatLLM Computer Status
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param computerId: The ID of the computer, it can be a deployment_conversation_id or a computer_id (TODO: add separate field for deployment_conversation_id)
+ :type computerId: str
+ :param vncEndpoint: The VNC endpoint of the computer
+ :type vncEndpoint: str
+ :param computerStarted: Whether the computer has started
+ :type computerStarted: bool
+ :param restartRequired: Whether the computer needs to be restarted
+ :type restartRequired: bool
+
+
+ .. py:attribute:: computer_id
+ :value: None
+
+
+
+ .. py:attribute:: vnc_endpoint
+ :value: None
+
+
+
+ .. py:attribute:: computer_started
+ :value: None
+
+
+
+ .. py:attribute:: restart_required
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: ChatllmMemory(client, chatllmMemoryId=None, memory=None, sourceDeploymentConversationId=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ An LLM created memory in ChatLLM
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param chatllmMemoryId: The ID of the chatllm memory.
+ :type chatllmMemoryId: str
+ :param memory: The text of the ChatLLM memory.
+ :type memory: str
+ :param sourceDeploymentConversationId: The deployment conversation where this memory was created.
+ :type sourceDeploymentConversationId: str
+
+
+ .. py:attribute:: chatllm_memory_id
+ :value: None
+
+
+
+ .. py:attribute:: memory
+ :value: None
+
+
+
+ .. py:attribute:: source_deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: ChatllmProject(client, chatllmProjectId=None, name=None, description=None, customInstructions=None, createdAt=None, updatedAt=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ ChatLLM Project
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param chatllmProjectId: The ID of the chatllm project.
+ :type chatllmProjectId: id
+ :param name: The name of the chatllm project.
+ :type name: str
+ :param description: The description of the chatllm project.
+ :type description: str
+ :param customInstructions: The custom instructions of the chatllm project.
+ :type customInstructions: str
+ :param createdAt: The creation time of the chatllm project.
+ :type createdAt: str
+ :param updatedAt: The update time of the chatllm project.
+ :type updatedAt: str
+
+
+ .. py:attribute:: chatllm_project_id
+ :value: None
+
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: description
+ :value: None
+
+
+
+ .. py:attribute:: custom_instructions
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: updated_at
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: ChatllmProjectPermissions(client, chatllmProjectId=None, accessLevel=None, userPermissions=None, userGroupPermissions=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ ChatLLM Project Permissions
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param chatllmProjectId: The ID of the chatllm project.
+ :type chatllmProjectId: id
+ :param accessLevel: The access level of the chatllm project.
+ :type accessLevel: str
+ :param userPermissions: List of tuples containing (user_id, permission).
+ :type userPermissions: list
+ :param userGroupPermissions: List of tuples containing (user_group_id, permission).
+ :type userGroupPermissions: list
+
+
+ .. py:attribute:: chatllm_project_id
+ :value: None
+
+
+
+ .. py:attribute:: access_level
+ :value: None
+
+
+
+ .. py:attribute:: user_permissions
+ :value: None
+
+
+
+ .. py:attribute:: user_group_permissions
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: ChatllmReferralInvite(client, userAlreadyExists=None, successfulInvites=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -13366,7 +14679,7 @@ Package Contents
-.. py:class:: ChatllmTask(client, chatllmTaskId=None, name=None, instructions=None, lifecycle=None, scheduleInfo=None, externalApplicationId=None, deploymentConversationId=None, enableEmailAlerts=None, email=None)
+.. py:class:: ChatllmTask(client, chatllmTaskId=None, daemonTaskId=None, taskType=None, name=None, instructions=None, description=None, lifecycle=None, scheduleInfo=None, externalApplicationId=None, deploymentConversationId=None, sourceDeploymentConversationId=None, enableEmailAlerts=None, email=None, numUnreadTaskInstances=None, computePointsUsed=None, displayMarkdown=None, requiresNewConversation=None, executionMode=None, taskDefinition=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -13377,10 +14690,16 @@ Package Contents
:type client: ApiClient
:param chatllmTaskId: The id of the chatllm task.
:type chatllmTaskId: str
+ :param daemonTaskId: The id of the daemon task.
+ :type daemonTaskId: str
+ :param taskType: The type of task ('chatllm' or 'daemon').
+ :type taskType: str
:param name: The name of the chatllm task.
:type name: str
:param instructions: The instructions of the chatllm task.
:type instructions: str
+ :param description: The description of the chatllm task.
+ :type description: str
:param lifecycle: The lifecycle of the chatllm task.
:type lifecycle: str
:param scheduleInfo: The schedule info of the chatllm task.
@@ -13389,10 +14708,24 @@ Package Contents
:type externalApplicationId: str
:param deploymentConversationId: The deployment conversation id associated with the chatllm task.
:type deploymentConversationId: str
+ :param sourceDeploymentConversationId: The source deployment conversation id associated with the chatllm task.
+ :type sourceDeploymentConversationId: str
:param enableEmailAlerts: Whether email alerts are enabled for the chatllm task.
:type enableEmailAlerts: bool
:param email: The email to send alerts to.
:type email: str
+ :param numUnreadTaskInstances: The number of unread task instances for the chatllm task.
+ :type numUnreadTaskInstances: int
+ :param computePointsUsed: The compute points used for the chatllm task.
+ :type computePointsUsed: int
+ :param displayMarkdown: The display markdown for the chatllm task.
+ :type displayMarkdown: str
+ :param requiresNewConversation: Whether a new conversation is required for the chatllm task.
+ :type requiresNewConversation: bool
+ :param executionMode: The execution mode of the chatllm task.
+ :type executionMode: str
+ :param taskDefinition: The task definition (for web_service_trigger tasks).
+ :type taskDefinition: dict
.. py:attribute:: chatllm_task_id
@@ -13400,6 +14733,16 @@ Package Contents
+ .. py:attribute:: daemon_task_id
+ :value: None
+
+
+
+ .. py:attribute:: task_type
+ :value: None
+
+
+
.. py:attribute:: name
:value: None
@@ -13410,6 +14753,11 @@ Package Contents
+ .. py:attribute:: description
+ :value: None
+
+
+
.. py:attribute:: lifecycle
:value: None
@@ -13430,6 +14778,11 @@ Package Contents
+ .. py:attribute:: source_deployment_conversation_id
+ :value: None
+
+
+
.. py:attribute:: enable_email_alerts
:value: None
@@ -13440,6 +14793,36 @@ Package Contents
+ .. py:attribute:: num_unread_task_instances
+ :value: None
+
+
+
+ .. py:attribute:: compute_points_used
+ :value: None
+
+
+
+ .. py:attribute:: display_markdown
+ :value: None
+
+
+
+ .. py:attribute:: requires_new_conversation
+ :value: None
+
+
+
+ .. py:attribute:: execution_mode
+ :value: None
+
+
+
+ .. py:attribute:: task_definition
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
@@ -13525,6 +14908,47 @@ Package Contents
+ .. py:method:: get_assignments_online_with_new_inputs(deployment_token, deployment_id, assignments_df = None, constraints_df = None, constraint_equations_df = None, feature_mapping_dict = None, solve_time_limit_seconds = None, optimality_gap_limit = None)
+
+ Get alternative positive assignments for given query. Optimal assignments are ignored and the alternative assignments are returned instead.
+
+ :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ :type deployment_token: str
+ :param deployment_id: The unique identifier of a deployment created under the project.
+ :type deployment_id: ID
+ :param assignments_df: A dataframe with all the variables involved in the optimization problem
+ :type assignments_df: pd.DataFrame
+ :param constraints_df: A dataframe of individual constraints, and variables in them
+ :type constraints_df: pd.DataFrame
+ :param constraint_equations_df: A dataframe which tells us about the operator / constant / penalty etc of a constraint
+ This gives us some data which is needed to make sense of the constraints_df.
+ :type constraint_equations_df: pd.DataFrame
+ :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
+ :type solve_time_limit_seconds: float
+ :param optimality_gap_limit: Optimality gap we want to come within, after which we accept the solution as valid. (0 means we only want an optimal solution). it is abs(best_solution_found - best_bound) / abs(best_solution_found)
+ :type optimality_gap_limit: float
+
+ :returns: The assignments for a given query.
+ :rtype: Dict
+
+
+
+ .. py:method:: get_optimization_input_dataframes_with_new_inputs(deployment_token, deployment_id, query_data)
+
+ Get assignments for given query, with new inputs
+
+ :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ :type deployment_token: str
+ :param deployment_id: The unique identifier of a deployment created under the project.
+ :type deployment_id: str
+ :param query_data: a dictionary with various key: value pairs corresponding to various updated FGs in the FG tree, which we want to update to compute new top level FGs for online solve. values are dataframes and keys are their names. Names should be same as the ones used during training.
+ :type query_data: dict
+
+ :returns: The output dataframes for a given query. These will be in serialized form. So, effectively we would have a dict of keys, and serialized dataframes.
+ :rtype: Dict
+
+
+
.. py:method:: create_dataset_version_from_pandas(table_name_or_id, df, clean_column_names = False)
[Deprecated]
@@ -13788,7 +15212,7 @@ Package Contents
- .. py:method:: create_python_function_from_function(name, function, function_variable_mappings = None, package_requirements = None, function_type = PythonFunctionType.FEATURE_GROUP.value)
+ .. py:method:: create_python_function_from_function(name, function, function_variable_mappings = None, package_requirements = None, function_type = PythonFunctionType.FEATURE_GROUP.value, description = None)
Creates a custom Python function
@@ -13802,6 +15226,8 @@ Package Contents
:type package_requirements: List
:param function_type: Type of Python function to create. Default is FEATURE_GROUP, but can also be PLOTLY_FIG.
:type function_type: PythonFunctionType
+ :param description: Description of the Python function.
+ :type description: str
@@ -14095,7 +15521,7 @@ Package Contents
.. py:method:: execute_workflow_node(node, inputs)
- Execute the workflow node given input arguments.
+ Execute the workflow node given input arguments. This is to be used for testing purposes only.
:param node: The workflow node to be executed.
:type node: WorkflowGraphNode
@@ -14172,6 +15598,15 @@ Package Contents
+ .. py:method:: _get_agent_client_type()
+
+ Returns the client type for the current request context.
+
+ :returns: The client type for the current request context.
+ :rtype: AgentClientType
+
+
+
.. py:method:: get_agent_context_chat_history()
Gets a history of chat messages from the current request context. Applicable within a AIAgent
@@ -14289,7 +15724,7 @@ Package Contents
.. py:method:: get_agent_context_user_info()
- Gets information about the user interacting with the agent.
+ Gets information about the user interacting with the agent and user action if applicable.
Applicable within a AIAgent execute function.
:returns: Containing email and name of the end user.
@@ -14297,14 +15732,16 @@ Package Contents
- .. py:method:: get_agent_runtime_config(key)
+ .. py:method:: get_runtime_config(key)
- Gets the deployment level runtime config for the agent
+ Retrieve the value of a specified configuration key from the deployment's runtime settings.
+ These settings can be configured in the deployment details page in the UI.
+ Currently supported for AI Agents, Custom Python Model and Prediction Operators.
- :param key: Key for which the config value is to be fetched
+ :param key: The configuration key whose value is to be fetched.
:type key: str
- :returns: Config value for the input key
+ :returns: The value associated with the specified configuration key, or None if the key does not exist.
:rtype: str
@@ -14388,9 +15825,9 @@ Package Contents
- .. py:method:: _get_agent_async_app_caller()
+ .. py:method:: _get_agent_caller()
- Gets the caller for the current request context of async app. Applicable within a AIAgent execute function.
+ Gets the caller for the current request context. Applicable within a AIAgent execute function.
:returns: The caller for the current request being processed by the Agent.
:rtype: str
@@ -14399,9 +15836,18 @@ Package Contents
.. py:method:: _is_proxy_app_caller()
- Gets the caller for the current request context of async app. Applicable within a AIAgent execute function.
+ Checks if the caller is cluster-proxy app.
- :returns: True if the caller is proxy app.
+ :returns: True if the caller is cluster-proxy app.
+ :rtype: bool
+
+
+
+ .. py:method:: _is_async_app_caller()
+
+ Checks if the caller is async app.
+
+ :returns: True if the caller is async app.
:rtype: bool
@@ -14610,7 +16056,7 @@ Package Contents
- .. py:method:: get_streaming_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, ignore_documents = False, include_search_results = False)
+ .. py:method:: get_streaming_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, ignore_documents = False, include_search_results = False, user_info = None)
Return an asynchronous generator which continues the conversation based on the input messages and search results.
@@ -14638,10 +16084,12 @@ Package Contents
:type ignore_documents: bool
:param include_search_results: If True, will also return search results, if relevant.
:type include_search_results: bool
+ :param user_info: The information of the user to act on behalf of for user restricted data sources.
+ :type user_info: dict
- .. py:method:: get_streaming_conversation_response(deployment_token, deployment_id, message, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, ignore_documents = False, include_search_results = False)
+ .. py:method:: get_streaming_conversation_response(deployment_token, deployment_id, message, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, ignore_documents = False, include_search_results = False, user_info = None)
Return an asynchronous generator which continues the conversation based on the input messages and search results.
@@ -14673,6 +16121,8 @@ Package Contents
:type ignore_documents: bool
:param include_search_results: If True, will also return search results, if relevant.
:type include_search_results: bool
+ :param user_info: The information of the user to act on behalf of for user restricted data sources.
+ :type user_info: dict
@@ -14781,6 +16231,30 @@ Package Contents
+ .. py:method:: execute_agent_with_binary_data(deployment_token, deployment_id, arguments = None, keyword_arguments = None, deployment_conversation_id = None, external_session_id = None, blobs = None)
+
+ Executes a deployed AI agent function with binary data as inputs.
+
+ :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
+ :type deployment_token: str
+ :param deployment_id: A unique string identifier for the deployment created under the project.
+ :type deployment_id: str
+ :param arguments: Positional arguments to the agent execute function.
+ :type arguments: list
+ :param keyword_arguments: A dictionary where each 'key' represents the parameter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
+ :type keyword_arguments: dict
+ :param deployment_conversation_id: A unique string identifier for the deployment conversation used for the conversation.
+ :type deployment_conversation_id: str
+ :param external_session_id: A unique string identifier for the session used for the conversation. If both deployment_conversation_id and external_session_id are not provided, a new session will be created.
+ :type external_session_id: str
+ :param blobs: A dictionary of binary data to use as inputs to the agent execute function.
+ :type blobs: None
+
+ :returns: The result of the agent execution
+ :rtype: AgentDataExecutionResult
+
+
+
.. py:method:: add_user_to_organization(email)
Invite a user to your organization. This method will send the specified email address an invitation link to join your organization.
@@ -14903,6 +16377,17 @@ Package Contents
+ .. py:method:: set_user_role_to_platform(workspace, email)
+
+ Set the user's role to have the Abacus.AI platform access.
+
+ :param workspace: The workspace for the user that's getting platform access.
+ :type workspace: str
+ :param email: The email for the user that's getting platform access.
+ :type email: str
+
+
+
.. py:method:: create_deployment_webhook(deployment_id, endpoint, webhook_event_type, payload_template = None)
Create a webhook attached to a given deployment ID.
@@ -14951,7 +16436,7 @@ Package Contents
:param name: The project's name.
:type name: str
- :param use_case: The use case that the project solves. Refer to our [guide on use cases](https://api.abacus.ai/app/help/useCases) for further details of each use case. The following enums are currently available for you to choose from: LANGUAGE_DETECTION, NLP_SENTIMENT, NLP_SEARCH, NLP_CHAT, CHAT_LLM, NLP_SENTENCE_BOUNDARY_DETECTION, NLP_CLASSIFICATION, NLP_SUMMARIZATION, NLP_DOCUMENT_VISUALIZATION, AI_AGENT, EMBEDDINGS_ONLY, MODEL_WITH_EMBEDDINGS, TORCH_MODEL, TORCH_MODEL_WITH_EMBEDDINGS, PYTHON_MODEL, NOTEBOOK_PYTHON_MODEL, DOCKER_MODEL, DOCKER_MODEL_WITH_EMBEDDINGS, CUSTOMER_CHURN, ENERGY, EVENT_ANOMALY_DETECTION, FINANCIAL_METRICS, CUMULATIVE_FORECASTING, FRAUD_ACCOUNT, FRAUD_TRANSACTIONS, CLOUD_SPEND, TIMESERIES_ANOMALY, OPERATIONS_MAINTENANCE, PERS_PROMOTIONS, PREDICTING, FEATURE_STORE, RETAIL, SALES_FORECASTING, SALES_SCORING, FEED_RECOMMEND, USER_RANKINGS, NAMED_ENTITY_RECOGNITION, USER_RECOMMENDATIONS, USER_RELATED, VISION, VISION_REGRESSION, VISION_OBJECT_DETECTION, FEATURE_DRIFT, SCHEDULING, GENERIC_FORECASTING, PRETRAINED_IMAGE_TEXT_DESCRIPTION, PRETRAINED_SPEECH_RECOGNITION, PRETRAINED_STYLE_TRANSFER, PRETRAINED_TEXT_TO_IMAGE_GENERATION, PRETRAINED_OCR_DOCUMENT_TO_TEXT, THEME_ANALYSIS, CLUSTERING, CLUSTERING_TIMESERIES, FINETUNED_LLM, PRETRAINED_INSTRUCT_PIX2PIX, PRETRAINED_TEXT_CLASSIFICATION.
+ :param use_case: The use case that the project solves. Refer to our [guide on use cases](https://api.abacus.ai/app/help/developer-platform/useCases) for further details of each use case. The following enums are currently available for you to choose from: LANGUAGE_DETECTION, NLP_SENTIMENT, NLP_SEARCH, NLP_CHAT, CHAT_LLM, NLP_SENTENCE_BOUNDARY_DETECTION, NLP_CLASSIFICATION, NLP_SUMMARIZATION, NLP_DOCUMENT_VISUALIZATION, AI_AGENT, EMBEDDINGS_ONLY, MODEL_WITH_EMBEDDINGS, TORCH_MODEL, TORCH_MODEL_WITH_EMBEDDINGS, PYTHON_MODEL, NOTEBOOK_PYTHON_MODEL, DOCKER_MODEL, DOCKER_MODEL_WITH_EMBEDDINGS, CUSTOMER_CHURN, ENERGY, EVENT_ANOMALY_DETECTION, FINANCIAL_METRICS, CUMULATIVE_FORECASTING, FRAUD_ACCOUNT, FRAUD_TRANSACTIONS, CLOUD_SPEND, TIMESERIES_ANOMALY, OPERATIONS_MAINTENANCE, PERS_PROMOTIONS, PREDICTING, FEATURE_STORE, RETAIL, SALES_FORECASTING, SALES_SCORING, FEED_RECOMMEND, USER_RANKINGS, NAMED_ENTITY_RECOGNITION, USER_RECOMMENDATIONS, USER_RELATED, VISION, VISION_REGRESSION, VISION_OBJECT_DETECTION, FEATURE_DRIFT, SCHEDULING, GENERIC_FORECASTING, PRETRAINED_IMAGE_TEXT_DESCRIPTION, PRETRAINED_SPEECH_RECOGNITION, PRETRAINED_STYLE_TRANSFER, PRETRAINED_TEXT_TO_IMAGE_GENERATION, PRETRAINED_OCR_DOCUMENT_TO_TEXT, THEME_ANALYSIS, CLUSTERING, CLUSTERING_TIMESERIES, FINETUNED_LLM, PRETRAINED_INSTRUCT_PIX2PIX, PRETRAINED_TEXT_CLASSIFICATION.
:type use_case: str
:returns: This object represents the newly created project.
@@ -14959,6 +16444,27 @@ Package Contents
+ .. py:method:: list_projects_dashboard(updated_filter = None, limit = 50, since_project_id = None, search = None, starred = None, tag = None, tags = None)
+
+ Retrieves a list of projects for dashboard display with filtering and pagination support.
+
+ :param updated_filter: Unix timestamp to filter projects updated after this time.
+ :type updated_filter: int
+ :param limit: The maximum number of projects to return (default: 50).
+ :type limit: int
+ :param since_project_id: The ID of the project after which the list starts for pagination.
+ :type since_project_id: str
+ :param search: Search term to filter projects by name, creator email, or use case.
+ :type search: str
+ :param starred: Filter by starred status (True for starred, False for non-starred).
+ :type starred: bool
+ :param tag: Filter by a single tag (deprecated, use tags instead).
+ :type tag: str
+ :param tags: List of tags to filter projects by.
+ :type tags: list
+
+
+
.. py:method:: rename_project(project_id, name)
This method renames a project after it is created.
@@ -15010,6 +16516,23 @@ Package Contents
+ .. py:method:: get_raw_data_from_realtime_dataset(dataset_id, check_permissions = False, start_time = None, end_time = None, column_filter = None)
+
+ Returns raw data from a realtime dataset. Only Microsoft Teams datasets are supported currently due to data size constraints in realtime datasets.
+
+ :param dataset_id: The unique ID associated with the dataset.
+ :type dataset_id: str
+ :param check_permissions: If True, checks user permissions using session email.
+ :type check_permissions: bool
+ :param start_time: Start time filter (inclusive) for created_date_time_t in ISO 8601 format (e.g. 2025-05-13T08:25:11Z or 2025-05-13T08:25:11+00:00).
+ :type start_time: str
+ :param end_time: End time filter (inclusive) for created_date_time_t in ISO 8601 format (e.g. 2025-05-13T08:25:11Z or 2025-05-13T08:25:11+00:00).
+ :type end_time: str
+ :param column_filter: Dictionary mapping column names to filter values. Only rows matching all column filters will be returned.
+ :type column_filter: dict
+
+
+
.. py:method:: add_feature_group_to_project(feature_group_id, project_id, feature_group_type = 'CUSTOM_TABLE')
Adds a feature group to a project.
@@ -15080,6 +16603,19 @@ Package Contents
+ .. py:method:: add_project_scope_for_user(project_id, email, scope)
+
+ Add a user to a project.
+
+ :param project_id: The project's id.
+ :type project_id: str
+ :param email: The user's email.
+ :type email: str
+ :param scope: The list of project scopes.
+ :type scope: list
+
+
+
.. py:method:: add_annotation(annotation, feature_group_id, feature_name, doc_id = None, feature_group_row_identifier = None, annotation_source = 'ui', status = None, comments = None, project_id = None, save_metadata = False, pages = None)
Add an annotation entry to the database.
@@ -15294,16 +16830,17 @@ Package Contents
Creates a new feature group defined as the union of other feature group versions.
- Args:
- source_feature_group_id (str): Unique string identifier corresponding to the dataset feature group that will have its versions merged into this feature group.
- table_name (str): Unique string identifier to be given to this merge feature group. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
- merge_config (MergeConfig): JSON object defining the merging method and its parameters.
- description (str): Human-readable description of this feature group.
+ :param source_feature_group_id: Unique string identifier corresponding to the dataset feature group that will have its versions merged into this feature group.
+ :type source_feature_group_id: str
+ :param table_name: Unique string identifier to be given to this merge feature group. Can be up to 120 characters long and can only contain alphanumeric characters and underscores.
+ :type table_name: str
+ :param merge_config: JSON object defining the merging method and its parameters.
+ :type merge_config: MergeConfig
+ :param description: Human-readable description of this feature group.
+ :type description: str
- Returns:
- FeatureGroup: The created feature group.
- Description:
- Creates a new feature group defined as the union of other feature group versions.
+ :returns: The created feature group.
+ :rtype: FeatureGroup
@@ -16070,7 +17607,7 @@ Package Contents
:type database_connector_id: str
:param object_name: Name of the database object to write to.
:type object_name: str
- :param write_mode: Enum string indicating whether to use INSERT or UPSERT.
+ :param write_mode: Enum string indicating whether to use INSERT or UPSERT or REPLACE.
:type write_mode: str
:param database_feature_mapping: Key/value pair JSON object of "database connector column" -> "feature name" pairs.
:type database_feature_mapping: dict
@@ -16437,7 +17974,7 @@ Package Contents
- .. py:method:: create_dataset_from_application_connector(table_name, application_connector_id, dataset_config = None, refresh_schedule = None, version_limit = 30)
+ .. py:method:: create_dataset_from_application_connector(table_name, application_connector_id, dataset_config = None, refresh_schedule = None, version_limit = 30, incremental = False)
Creates a dataset from an Application Connector.
@@ -16451,6 +17988,8 @@ Package Contents
:type refresh_schedule: str
:param version_limit: The number of recent versions to preserve for the dataset (minimum 30).
:type version_limit: int
+ :param incremental: Signifies if the dataset is an incremental dataset.
+ :type incremental: bool
:returns: The created dataset.
:rtype: Dataset
@@ -18497,7 +20036,7 @@ Package Contents
- .. py:method:: get_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None)
+ .. py:method:: get_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, user_info = None)
Return a chat response which continues the conversation based on the input messages and search results.
@@ -18553,7 +20092,7 @@ Package Contents
- .. py:method:: get_conversation_response(deployment_id, message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None)
+ .. py:method:: get_conversation_response(deployment_id, message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None, user_info = None, execute_usercode_tool = False)
Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).
@@ -18583,6 +20122,8 @@ Package Contents
:type chat_config: dict
:param doc_infos: An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
:type doc_infos: list
+ :param execute_usercode_tool: If True, will return the tool output in the response.
+ :type execute_usercode_tool: bool
@@ -18619,6 +20160,17 @@ Package Contents
+ .. py:method:: get_deep_agent_response(message, deployment_conversation_id = None)
+
+ Return a DeepAgent response with generated files if any, based on the input message.
+
+ :param message: The user's message/task for DeepAgent to complete
+ :type message: str
+ :param deployment_conversation_id: The unique identifier of a deployment conversation to continue. If not specified, a new one will be created.
+ :type deployment_conversation_id: str
+
+
+
.. py:method:: get_search_results(deployment_token, deployment_id, query_data, num = 15)
Return the most relevant search results to the search query from the uploaded documents.
@@ -18737,6 +20289,36 @@ Package Contents
+ .. py:method:: get_optimization_inputs_from_serialized(deployment_token, deployment_id, query_data = None)
+
+ Get assignments for given query, with new inputs
+
+ :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ :type deployment_token: str
+ :param deployment_id: The unique identifier of a deployment created under the project.
+ :type deployment_id: str
+ :param query_data: a dictionary with various key: value pairs corresponding to various updated FGs in the FG tree, which we want to update to compute new top level FGs for online solve. (query data will be dict of names: serialized dataframes)
+ :type query_data: dict
+
+
+
+ .. py:method:: get_assignments_online_with_new_serialized_inputs(deployment_token, deployment_id, query_data = None, solve_time_limit_seconds = None, optimality_gap_limit = None)
+
+ Get assignments for given query, with new inputs
+
+ :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ :type deployment_token: str
+ :param deployment_id: The unique identifier of a deployment created under the project.
+ :type deployment_id: str
+ :param query_data: a dictionary with assignment, constraint and constraint_equations_df (under these specific keys)
+ :type query_data: dict
+ :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
+ :type solve_time_limit_seconds: float
+ :param optimality_gap_limit: Optimality gap we want to come within, after which we accept the solution as valid. (0 means we only want an optimal solution). it is abs(best_solution_found - best_bound) / abs(best_solution_found)
+ :type optimality_gap_limit: float
+
+
+
.. py:method:: check_constraints(deployment_token, deployment_id, query_data)
Check for any constraints violated by the overrides.
@@ -18894,19 +20476,6 @@ Package Contents
- .. py:method:: generate_image(deployment_token, deployment_id, query_data)
-
- Generate an image from text prompt.
-
- :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
- :type deployment_token: str
- :param deployment_id: A unique identifier to a deployment created under the project.
- :type deployment_id: str
- :param query_data: Specifies the text prompt. For example, {'prompt': 'a cat'}
- :type query_data: dict
-
-
-
.. py:method:: execute_agent(deployment_token, deployment_id, arguments = None, keyword_arguments = None)
Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
@@ -19024,30 +20593,6 @@ Package Contents
- .. py:method:: execute_agent_with_binary_data(deployment_token, deployment_id, arguments = None, keyword_arguments = None, deployment_conversation_id = None, external_session_id = None, blobs = None)
-
- Executes a deployed AI agent function with binary data as inputs.
-
- :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
- :type deployment_token: str
- :param deployment_id: A unique string identifier for the deployment created under the project.
- :type deployment_id: str
- :param arguments: Positional arguments to the agent execute function.
- :type arguments: list
- :param keyword_arguments: A dictionary where each 'key' represents the parameter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
- :type keyword_arguments: dict
- :param deployment_conversation_id: A unique string identifier for the deployment conversation used for the conversation.
- :type deployment_conversation_id: str
- :param external_session_id: A unique string identifier for the session used for the conversation. If both deployment_conversation_id and external_session_id are not provided, a new session will be created.
- :type external_session_id: str
- :param blobs: A dictionary of binary data to use as inputs to the agent execute function.
- :type blobs: None
-
- :returns: The result of the agent execution
- :rtype: AgentDataExecutionResult
-
-
-
.. py:method:: start_autonomous_agent(deployment_token, deployment_id, arguments = None, keyword_arguments = None, save_conversations = True)
Starts a deployed Autonomous agent associated with the given deployment_conversation_id using the arguments and keyword arguments as inputs for execute function of trigger node.
@@ -19429,7 +20974,7 @@ Package Contents
- .. py:method:: create_python_function(name, source_code = None, function_name = None, function_variable_mappings = None, package_requirements = None, function_type = 'FEATURE_GROUP', description = None, examples = None)
+ .. py:method:: create_python_function(name, source_code = None, function_name = None, function_variable_mappings = None, package_requirements = None, function_type = 'FEATURE_GROUP', description = None, examples = None, user_level_connectors = None, org_level_connectors = None, output_variable_mappings = None)
Creates a custom Python function that is reusable.
@@ -19447,15 +20992,21 @@ Package Contents
:type function_type: str
:param description: Description of the Python function. This should include details about the function's purpose, expected inputs and outputs, and any important usage considerations or limitations.
:type description: str
- :param examples: Dictionary containing example use cases and anti-patterns. Should include 'positive_examples' showing recommended usage and 'negative_examples' showing cases to avoid.
+ :param examples: Dictionary containing example use cases and anti-patterns. Should include 'positive_examples' showing recommended usage and 'negative_examples' showing cases to avoid.])
:type examples: dict
+ :param user_level_connectors: Dictionary containing user level connectors.
+ :type user_level_connectors: Dict
+ :param org_level_connectors: List containing organization level connectors.
+ :type org_level_connectors: List
+ :param output_variable_mappings: List of output variable mappings that defines the elements of the function's return value.
+ :type output_variable_mappings: List
:returns: The Python function that can be used (e.g. for feature group transform).
:rtype: PythonFunction
- .. py:method:: update_python_function(name, source_code = None, function_name = None, function_variable_mappings = None, package_requirements = None, description = None, examples = None)
+ .. py:method:: update_python_function(name, source_code = None, function_name = None, function_variable_mappings = None, package_requirements = None, description = None, examples = None, user_level_connectors = None, org_level_connectors = None, output_variable_mappings = None)
Update custom python function with user inputs for the given python function.
@@ -19473,6 +21024,12 @@ Package Contents
:type description: str
:param examples: Dictionary containing example use cases and anti-patterns. Should include 'positive_examples' showing recommended usage and 'negative_examples' showing cases to avoid.
:type examples: dict
+ :param user_level_connectors: Dictionary containing user level connectors.
+ :type user_level_connectors: Dict
+ :param org_level_connectors: List of organization level connectors.
+ :type org_level_connectors: List
+ :param output_variable_mappings: List of output variable mappings that defines the elements of the function's return value.
+ :type output_variable_mappings: List
:returns: The Python function object.
:rtype: PythonFunction
@@ -20054,7 +21611,7 @@ Package Contents
- .. py:method:: create_organization_secret(secret_key, value)
+ .. py:method:: create_organization_secret(secret_key, value, secret_type = OrganizationSecretType.ORG_SECRET, metadata = None)
Creates a secret which can be accessed in functions and notebooks.
@@ -20062,6 +21619,10 @@ Package Contents
:type secret_key: str
:param value: The secret value.
:type value: str
+ :param secret_type: The type of secret. Use OrganizationSecretType enum values.
+ :type secret_type: OrganizationSecretType
+ :param metadata: Additional metadata for the secret.
+ :type metadata: dict
:returns: The created secret.
:rtype: OrganizationSecret
@@ -20247,6 +21808,49 @@ Package Contents
+ .. py:method:: add_user_group_object_permission(object_id, user_group_id, object_type, permission = 'ALL')
+
+ Add user group object permission for any object type.
+
+ :param object_id: The ID of the object (dataset_id, project_id, external_connection_id, feature_group_id, etc.)
+ :type object_id: str
+ :param user_group_id: The ID of the user group
+ :type user_group_id: str
+ :param object_type: The type of object ('dataset', 'project', 'external_connection', 'feature_group')
+ :type object_type: str
+ :param permission: The permission to grant (default: 'ALL')
+ :type permission: str
+
+
+
+ .. py:method:: update_user_group_object_permission(object_id, user_group_id, object_type, permission = 'ALL')
+
+ Update user group object permission for any object type.
+
+ :param object_id: The ID of the object (dataset_id, project_id, external_connection_id, feature_group_id, etc.)
+ :type object_id: str
+ :param user_group_id: The ID of the user group
+ :type user_group_id: str
+ :param object_type: The type of object ('dataset', 'project', 'external_connection', 'feature_group')
+ :type object_type: str
+ :param permission: The permission to grant (default: 'ALL')
+ :type permission: str
+
+
+
+ .. py:method:: remove_user_group_object_permission(object_id, user_group_id, object_type)
+
+ Remove user group object permission for any object type.
+
+ :param object_id: The ID of the object (dataset_id, project_id, external_connection_id, feature_group_id, etc.)
+ :type object_id: str
+ :param user_group_id: The ID of the user group
+ :type user_group_id: str
+ :param object_type: The type of object ('dataset', 'project', 'external_connection', 'feature_group')
+ :type object_type: str
+
+
+
.. py:method:: create_app_user_group(name)
Creates a new App User Group. This User Group is used to have permissions to access the external chatbots.
@@ -20347,6 +21951,24 @@ Package Contents
+ .. py:method:: add_developers_to_external_application(external_application_id)
+
+ Adds a permission for the platform App User Group to access an External Application.
+
+ :param external_application_id: The ID of the External Application.
+ :type external_application_id: str
+
+
+
+ .. py:method:: remove_developers_from_external_application(external_application_id)
+
+ Removes a permission for the platform App User Group to access an External Application.
+
+ :param external_application_id: The ID of the External Application.
+ :type external_application_id: str
+
+
+
.. py:method:: create_external_application(deployment_id, name = None, description = None, logo = None, theme = None)
Creates a new External Application from an existing ChatLLM Deployment.
@@ -20806,6 +22428,25 @@ Package Contents
+ .. py:method:: query_mcp_server(task, server_name = None, mcp_server_connection_id = None)
+
+ Query a Remote MCP server. For a given task, it runs the required tools of the MCP
+
+ server with appropriate arguments and returns the output.
+
+
+ :param task: a comprehensive description of the task to perform using the MCP server tools.
+ :type task: str
+ :param server_name: The name of the MCP server.
+ :type server_name: str
+ :param mcp_server_connection_id: connection id of the MCP server to query.
+ :type mcp_server_connection_id: str
+
+ :returns: The execution logs as well as the final output of the task execution.
+ :rtype: McpServerQueryResult
+
+
+
.. py:exception:: ApiException(message, http_status, exception = None, request_id = None)
Bases: :py:obj:`Exception`
@@ -20882,10 +22523,13 @@ Package Contents
:type skip_version_check: bool
- .. py:method:: list_api_keys()
+ .. py:method:: list_api_keys(type = 'default')
Lists all of the user's API keys
+ :param type: The type of API keys to list.
+ :type type: str
+
:returns: List of API Keys for the current user's organization.
:rtype: list[ApiKey]
@@ -21562,6 +23206,32 @@ Package Contents
+ .. py:method:: query_database_connector_datallm(database_connector_id, query)
+
+ Runs a read-only query in the specified database connector. This API is specifically designed for DataLLM
+
+ and enforces read-only access with user-specific access control.
+
+
+ :param database_connector_id: A unique string identifier for the database connector.
+ :type database_connector_id: str
+ :param query: The query to be run in the database connector (must be read-only).
+ :type query: str
+
+
+
+ .. py:method:: get_database_connector_auth(database_connector_id)
+
+ Get the authentication details for a given database connector.
+
+ :param database_connector_id: The unique ID associated with the database connector.
+ :type database_connector_id: str
+
+ :returns: The database connector with the authentication details.
+ :rtype: DatabaseConnector
+
+
+
.. py:method:: list_application_connectors()
Retrieves a list of all application connectors along with their associated attributes.
@@ -21580,7 +23250,7 @@ Package Contents
- .. py:method:: get_connector_auth(service = None, application_connector_id = None, scopes = None)
+ .. py:method:: get_connector_auth(service = None, application_connector_id = None, scopes = None, generic_oauth_service = None)
Get the authentication details for a given connector. For user level connectors, the service is required. For org level connectors, the application_connector_id is required.
@@ -21590,8 +23260,40 @@ Package Contents
:type application_connector_id: str
:param scopes: The scopes to request for the connector.
:type scopes: List
+ :param generic_oauth_service: For GENERIC_OAUTH service, specify the OAuth provider (e.g., 'spotify').
+ :type generic_oauth_service: str
:returns: The application connector with the authentication details.
+ :rtype: UnifiedConnector
+
+
+
+ .. py:method:: get_user_connector_auth(service, scopes = None, generic_oauth_service = None, config_connector = None)
+
+ Get the authentication details for a given user level connector.
+
+ :param service: The service name.
+ :type service: ApplicationConnectorType
+ :param scopes: The scopes to request for the connector.
+ :type scopes: List
+ :param generic_oauth_service: For GENERIC_OAUTH service, specify the OAuth provider (e.g., 'spotify').
+ :type generic_oauth_service: str
+ :param config_connector: The config connector id.
+ :type config_connector: str
+
+ :returns: The application connector with the authentication details.
+ :rtype: UnifiedConnector
+
+
+
+ .. py:method:: get_user_mcp_connector_auth(mcp_server_name = None)
+
+ Get the auth for a MCP connector
+
+ :param mcp_server_name: The name of the MCP server
+ :type mcp_server_name: str
+
+ :returns: The MCP connector
:rtype: ApplicationConnector
@@ -21909,12 +23611,14 @@ Package Contents
- .. py:method:: list_model_monitors(project_id)
+ .. py:method:: list_model_monitors(project_id, limit = None)
Retrieves the list of model monitors in the specified project.
:param project_id: Unique string identifier associated with the project.
:type project_id: str
+ :param limit: Maximum number of model monitors to return. We'll have internal limit if not set.
+ :type limit: int
:returns: A list of model monitors.
:rtype: list[ModelMonitor]
@@ -22533,12 +24237,14 @@ Package Contents
- .. py:method:: list_batch_predictions(project_id)
+ .. py:method:: list_batch_predictions(project_id, limit = None)
Retrieves a list of batch predictions in the project.
:param project_id: Unique string identifier of the project.
:type project_id: str
+ :param limit: Maximum number of batch predictions to return. We'll have internal limit if not set.
+ :type limit: int
:returns: List of batch prediction jobs.
:rtype: list[BatchPrediction]
@@ -22902,11 +24608,16 @@ Package Contents
- .. py:method:: list_organization_secrets()
+ .. py:method:: list_organization_secrets(decrypt_value = False, secret_type = OrganizationSecretType.ORG_SECRET)
Lists all secrets for an organization.
- :returns: list of secrets belonging to the organization.
+ :param decrypt_value: Whether to decrypt the secret values.
+ :type decrypt_value: bool
+ :param secret_type: Filter secrets by type. Use OrganizationSecretType enum values.
+ :type secret_type: OrganizationSecretType
+
+ :returns: List of secrets.
:rtype: list[OrganizationSecret]
@@ -22999,7 +24710,7 @@ Package Contents
- .. py:method:: get_deployment_conversation(deployment_conversation_id = None, external_session_id = None, deployment_id = None, filter_intermediate_conversation_events = True, get_unused_document_uploads = False)
+ .. py:method:: get_deployment_conversation(deployment_conversation_id = None, external_session_id = None, deployment_id = None, filter_intermediate_conversation_events = True, get_unused_document_uploads = False, start = None, limit = None, include_all_versions = False)
Gets a deployment conversation.
@@ -23013,13 +24724,30 @@ Package Contents
:type filter_intermediate_conversation_events: bool
:param get_unused_document_uploads: If true, unused document uploads will be returned. Default is false.
:type get_unused_document_uploads: bool
+ :param start: The start index of the conversation.
+ :type start: int
+ :param limit: The limit of the conversation.
+ :type limit: int
+ :param include_all_versions: If True, includes all versions of the last bot message for version switching functionality.
+ :type include_all_versions: bool
:returns: The deployment conversation.
:rtype: DeploymentConversation
- .. py:method:: list_deployment_conversations(deployment_id = None, external_application_id = None, conversation_type = None, fetch_last_llm_info = False)
+ .. py:method:: get_deployment_conversation_file(deployment_conversation_id, file_path)
+
+ Gets a deployment conversation file.
+
+ :param deployment_conversation_id: Unique ID of the conversation.
+ :type deployment_conversation_id: str
+ :param file_path: The path of the file to get.
+ :type file_path: str
+
+
+
+ .. py:method:: list_deployment_conversations(deployment_id = None, external_application_id = None, conversation_type = None, fetch_last_llm_info = False, limit = None, search = None)
Lists all conversations for the given deployment and current user.
@@ -23031,6 +24759,10 @@ Package Contents
:type conversation_type: DeploymentConversationType
:param fetch_last_llm_info: If true, the LLM info for the most recent conversation will be fetched. Only applicable for system-created bots.
:type fetch_last_llm_info: bool
+ :param limit: The number of conversations to return. Defaults to 600.
+ :type limit: int
+ :param search: The search query to filter conversations by title.
+ :type search: str
:returns: The deployment conversations.
:rtype: list[DeploymentConversation]
@@ -23051,6 +24783,18 @@ Package Contents
+ .. py:method:: list_user_group_object_permissions()
+
+ List all user groups permissions associated with the objects in the organization.
+
+ If no associated user groups, the object is public among developers and admins.
+
+
+ :returns: List of user group object permissions in the organization.
+ :rtype: list[UserGroupObjectPermission]
+
+
+
.. py:method:: get_app_user_group(user_group_id)
Gets an App User Group.
@@ -23063,6 +24807,18 @@ Package Contents
+ .. py:method:: get_app_user_group_from_name(name = None)
+
+ Gets an App User Group by name.
+
+ :param name: The name of the app user group to retrieve.
+ :type name: str
+
+ :returns: The app user group with the specified name.
+ :rtype: AppUserGroup
+
+
+
.. py:method:: describe_external_application(external_application_id)
Describes an External Application.
@@ -23256,6 +25012,22 @@ Package Contents
+ .. py:method:: list_route_llm_models()
+
+ This function is used to list all the models and their rates, for routellm api.
+
+ :returns: list of dicts, each dict contains the model name, description, id, and api price info.
+
+
+
+.. py:class:: ToolResponse(*args, **kwargs)
+
+ Bases: :py:obj:`AgentResponse`
+
+
+ Response object for tool to support non-text response sections
+
+
.. py:data:: _request_context
.. py:class:: CodeAgentResponse(client, deploymentConversationId=None, messages=None, toolUseRequest=None)
@@ -23305,6 +25077,46 @@ Package Contents
+.. py:class:: CodeAutocompleteEditPredictionResponse(client, autocompleteResponse=None, showAutocomplete=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A autocomplete response from an LLM
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param autocompleteResponse: autocomplete code
+ :type autocompleteResponse: str
+ :param showAutocomplete: Whether to show autocomplete in the client
+ :type showAutocomplete: bool
+
+
+ .. py:attribute:: autocomplete_response
+ :value: None
+
+
+
+ .. py:attribute:: show_autocomplete
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: CodeAutocompleteResponse(client, autocompleteResponse=None, showAutocomplete=None, lineNumber=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -23352,7 +25164,7 @@ Package Contents
-.. py:class:: CodeBot(client, llmName=None, name=None, imageUploadSupported=None, codeAgentSupported=None, isPremium=None)
+.. py:class:: CodeBot(client, llmName=None, name=None, imageUploadSupported=None, codeAgentSupported=None, codeEditSupported=None, isPremium=None, llmBotIcon=None, provider=None, isUserApiKeyAllowed=None, isRateLimited=None, apiKeyUrl=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -23369,8 +25181,20 @@ Package Contents
:type imageUploadSupported: bool
:param codeAgentSupported: Whether the LLM supports code agent.
:type codeAgentSupported: bool
+ :param codeEditSupported: Whether the LLM supports code edit.
+ :type codeEditSupported: bool
:param isPremium: Whether the LLM is a premium LLM.
:type isPremium: bool
+ :param llmBotIcon: The icon of the LLM bot.
+ :type llmBotIcon: str
+ :param provider: The provider of the LLM.
+ :type provider: str
+ :param isUserApiKeyAllowed: Whether the LLM supports user API key.
+ :type isUserApiKeyAllowed: bool
+ :param isRateLimited: Whether the LLM is rate limited.
+ :type isRateLimited: bool
+ :param apiKeyUrl: The URL to get the API key.
+ :type apiKeyUrl: str
.. py:attribute:: llm_name
@@ -23393,11 +25217,41 @@ Package Contents
+ .. py:attribute:: code_edit_supported
+ :value: None
+
+
+
.. py:attribute:: is_premium
:value: None
+ .. py:attribute:: llm_bot_icon
+ :value: None
+
+
+
+ .. py:attribute:: provider
+ :value: None
+
+
+
+ .. py:attribute:: is_user_api_key_allowed
+ :value: None
+
+
+
+ .. py:attribute:: is_rate_limited
+ :value: None
+
+
+
+ .. py:attribute:: api_key_url
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
@@ -23540,6 +25394,93 @@ Package Contents
+.. py:class:: CodeEmbeddings(client, embeddings=None, chunkingScheme=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Code embeddings
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param embeddings: A dictionary mapping the file name to its embeddings.
+ :type embeddings: dict
+ :param chunkingScheme: The scheme used for chunking the embeddings.
+ :type chunkingScheme: str
+
+
+ .. py:attribute:: embeddings
+ :value: None
+
+
+
+ .. py:attribute:: chunking_scheme
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: CodeLlmChangedFiles(client, addedFiles=None, updatedFiles=None, deletedFiles=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Code changed files
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param addedFiles: A list of added file paths.
+ :type addedFiles: list
+ :param updatedFiles: A list of updated file paths.
+ :type updatedFiles: list
+ :param deletedFiles: A list of deleted file paths.
+ :type deletedFiles: list
+
+
+ .. py:attribute:: added_files
+ :value: None
+
+
+
+ .. py:attribute:: updated_files
+ :value: None
+
+
+
+ .. py:attribute:: deleted_files
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: CodeSource(client, sourceType=None, sourceCode=None, applicationConnectorId=None, applicationConnectorInfo=None, packageRequirements=None, status=None, error=None, publishingMsg=None, moduleDependencies=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -23635,7 +25576,134 @@ Package Contents
-.. py:class:: ComputePointInfo(client, updatedAt=None, last24HoursUsage=None, last7DaysUsage=None, currMonthAvailPoints=None, currMonthUsage=None, lastThrottlePopUp=None)
+.. py:class:: CodeSuggestionValidationResponse(client, isValid=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A response from an LLM to validate a code suggestion.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param isValid: Whether the code suggestion is valid.
+ :type isValid: bool
+
+
+ .. py:attribute:: is_valid
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: CodeSummaryResponse(client, summary=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A summary response from an LLM
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param summary: The summary of the code.
+ :type summary: str
+
+
+ .. py:attribute:: summary
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: CodellmEmbeddingConstants(client, maxSupportedWorkspaceFiles=None, maxSupportedWorkspaceChunks=None, maxConcurrentRequests=None, fileExtensionToChunkingScheme=None, idleTimeoutSeconds=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A dictionary of constants to be used in the autocomplete.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param maxSupportedWorkspaceFiles: Max supported workspace files
+ :type maxSupportedWorkspaceFiles: int
+ :param maxSupportedWorkspaceChunks: Max supported workspace chunks
+ :type maxSupportedWorkspaceChunks: int
+ :param maxConcurrentRequests: Max concurrent requests
+ :type maxConcurrentRequests: int
+ :param fileExtensionToChunkingScheme: Map between the file extensions and their chunking schema
+ :type fileExtensionToChunkingScheme: dict
+ :param idleTimeoutSeconds: The idle timeout without any activity before the workspace is refreshed.
+ :type idleTimeoutSeconds: int
+
+
+ .. py:attribute:: max_supported_workspace_files
+ :value: None
+
+
+
+ .. py:attribute:: max_supported_workspace_chunks
+ :value: None
+
+
+
+ .. py:attribute:: max_concurrent_requests
+ :value: None
+
+
+
+ .. py:attribute:: file_extension_to_chunking_scheme
+ :value: None
+
+
+
+ .. py:attribute:: idle_timeout_seconds
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: ComputePointInfo(client, updatedAt=None, last24HoursUsage=None, last7DaysUsage=None, currMonthAvailPoints=None, currMonthUsage=None, lastThrottlePopUp=None, alwaysDisplay=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -23656,6 +25724,8 @@ Package Contents
:type currMonthUsage: int
:param lastThrottlePopUp: The last time the organization was throttled
:type lastThrottlePopUp: str
+ :param alwaysDisplay: Whether to always display the compute point toggle
+ :type alwaysDisplay: bool
.. py:attribute:: updated_at
@@ -23688,6 +25758,11 @@ Package Contents
+ .. py:attribute:: always_display
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
@@ -23757,6 +25832,305 @@ Package Contents
+.. py:class:: ConstantsAutocompleteResponse(client, maxPendingRequests=None, acceptanceDelay=None, debounceDelay=None, recordUserAction=None, validateSuggestion=None, validationLinesThreshold=None, maxTrackedRecentChanges=None, diffThreshold=None, derivativeThreshold=None, defaultSurroundingLines=None, maxTrackedVisitChanges=None, selectionCooldownMs=None, viewingCooldownMs=None, maxLines=None, editCooldownMs=None, scrollDebounceMs=None, lspDeadline=None, diagnosticsThreshold=None, diagnosticEachThreshold=None, numVsCodeSuggestions=None, minReindexingInterval=None, minRefreshSummaryInterval=None, summaryBatchSize=None, jobReorderInterval=None, stopRapidChanges=None, delaySummaryBatches=None, delaySummaryBatchesRateLimit=None, maxSymbolsFuzzyMatch=None, fuzzySymbolMatchThreshold=None, symbolsCacheUpdateInterval=None, symbolsStorageUpdateInterval=None, editPredictionSimilarityThreshold=None, minSearchWordLength=None, maxOccurrencesPerWord=None, maxWordsContentMatches=None, editPredictionEnabled=None, snapshotIntervalMs=None, linesForSnapshot=None, embeddingConstants=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A dictionary of constants to be used in the autocomplete.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param maxPendingRequests: The maximum number of pending requests.
+ :type maxPendingRequests: int
+ :param acceptanceDelay: The acceptance delay.
+ :type acceptanceDelay: int
+ :param debounceDelay: The debounce delay.
+ :type debounceDelay: int
+ :param recordUserAction: Whether to record user action.
+ :type recordUserAction: bool
+ :param validateSuggestion: Whether to validate the suggestion.
+ :type validateSuggestion: bool
+ :param validationLinesThreshold: The number of lines to validate the suggestion.
+ :type validationLinesThreshold: int
+ :param maxTrackedRecentChanges: The maximum number of recent file changes to track.
+ :type maxTrackedRecentChanges: int
+ :param diffThreshold: The diff operations threshold.
+ :type diffThreshold: int
+ :param derivativeThreshold: The derivative threshold for deletions
+ :type derivativeThreshold: int
+ :param defaultSurroundingLines: The default number of surrounding lines to include in the recently visited context.
+ :type defaultSurroundingLines: int
+ :param maxTrackedVisitChanges: The maximum number of recently visited ranges to track.
+ :type maxTrackedVisitChanges: int
+ :param selectionCooldownMs: The cooldown time in milliseconds for selection changes.
+ :type selectionCooldownMs: int
+ :param viewingCooldownMs: The cooldown time in milliseconds for viewing changes.
+ :type viewingCooldownMs: int
+ :param maxLines: The maximum number of lines to include in recently visited context.
+ :type maxLines: int
+ :param editCooldownMs: The cooldown time in milliseconds after last edit.
+ :type editCooldownMs: int
+ :param scrollDebounceMs: The debounce time in milliseconds for scroll events.
+ :type scrollDebounceMs: int
+ :param lspDeadline: The deadline in milliseconds for LSP context.
+ :type lspDeadline: int
+ :param diagnosticsThreshold: The max number of diagnostics to show.
+ :type diagnosticsThreshold: int
+ :param diagnosticEachThreshold: The max number of characters to show for each diagnostic type.
+ :type diagnosticEachThreshold: int
+ :param numVsCodeSuggestions: The number of VS Code suggestions to show.
+ :type numVsCodeSuggestions: int
+ :param minReindexingInterval: The minimum interval between reindexes in ms.
+ :type minReindexingInterval: int
+ :param minRefreshSummaryInterval: The minimum interval between refresh summary in ms.
+ :type minRefreshSummaryInterval: int
+ :param summaryBatchSize: The batch size for code summary in autocomplete.
+ :type summaryBatchSize: int
+ :param jobReorderInterval: The interval in ms to reorder jobs in the job queue for summary.
+ :type jobReorderInterval: int
+ :param stopRapidChanges: Whether to stop rapid changes in autocomplete.
+ :type stopRapidChanges: bool
+ :param delaySummaryBatches: The delay in ms between summary batches.
+ :type delaySummaryBatches: int
+ :param delaySummaryBatchesRateLimit: The delay in ms in case of rate limit for delay summary batches.
+ :type delaySummaryBatchesRateLimit: int
+ :param maxSymbolsFuzzyMatch: The max number of symbols to fuzzy match.
+ :type maxSymbolsFuzzyMatch: int
+ :param fuzzySymbolMatchThreshold: The threshold for fuzzy symbol match.
+ :type fuzzySymbolMatchThreshold: int
+ :param symbolsCacheUpdateInterval: The interval in ms to update the symbols cache.
+ :type symbolsCacheUpdateInterval: int
+ :param symbolsStorageUpdateInterval: The interval in ms to update the symbols storage.
+ :type symbolsStorageUpdateInterval: int
+ :param editPredictionSimilarityThreshold: The threshold for edit prediction similarity.
+ :type editPredictionSimilarityThreshold: int
+ :param minSearchWordLength: The minimum length of the word to be searched.
+ :type minSearchWordLength: int
+ :param maxOccurrencesPerWord: The maximum occurrences of a particular search word present in the file.
+ :type maxOccurrencesPerWord: int
+ :param maxWordsContentMatches: The maximum number of content matches from the client.
+ :type maxWordsContentMatches: int
+ :param editPredictionEnabled: Whether to enable edit prediction.
+ :type editPredictionEnabled: bool
+ :param snapshotIntervalMs: The interval in ms to snapshot the file for recent file changes.
+ :type snapshotIntervalMs: int
+ :param linesForSnapshot: Limit of max number of lines to snapshot for recent file changes.
+ :type linesForSnapshot: int
+ :param embeddingConstants: Embedding constants
+ :type embeddingConstants: codellmembeddingconstants
+
+
+ .. py:attribute:: max_pending_requests
+ :value: None
+
+
+
+ .. py:attribute:: acceptance_delay
+ :value: None
+
+
+
+ .. py:attribute:: debounce_delay
+ :value: None
+
+
+
+ .. py:attribute:: record_user_action
+ :value: None
+
+
+
+ .. py:attribute:: validate_suggestion
+ :value: None
+
+
+
+ .. py:attribute:: validation_lines_threshold
+ :value: None
+
+
+
+ .. py:attribute:: max_tracked_recent_changes
+ :value: None
+
+
+
+ .. py:attribute:: diff_threshold
+ :value: None
+
+
+
+ .. py:attribute:: derivative_threshold
+ :value: None
+
+
+
+ .. py:attribute:: default_surrounding_lines
+ :value: None
+
+
+
+ .. py:attribute:: max_tracked_visit_changes
+ :value: None
+
+
+
+ .. py:attribute:: selection_cooldown_ms
+ :value: None
+
+
+
+ .. py:attribute:: viewing_cooldown_ms
+ :value: None
+
+
+
+ .. py:attribute:: max_lines
+ :value: None
+
+
+
+ .. py:attribute:: edit_cooldown_ms
+ :value: None
+
+
+
+ .. py:attribute:: scroll_debounce_ms
+ :value: None
+
+
+
+ .. py:attribute:: lsp_deadline
+ :value: None
+
+
+
+ .. py:attribute:: diagnostics_threshold
+ :value: None
+
+
+
+ .. py:attribute:: diagnostic_each_threshold
+ :value: None
+
+
+
+ .. py:attribute:: num_vs_code_suggestions
+ :value: None
+
+
+
+ .. py:attribute:: min_reindexing_interval
+ :value: None
+
+
+
+ .. py:attribute:: min_refresh_summary_interval
+ :value: None
+
+
+
+ .. py:attribute:: summary_batch_size
+ :value: None
+
+
+
+ .. py:attribute:: job_reorder_interval
+ :value: None
+
+
+
+ .. py:attribute:: stop_rapid_changes
+ :value: None
+
+
+
+ .. py:attribute:: delay_summary_batches
+ :value: None
+
+
+
+ .. py:attribute:: delay_summary_batches_rate_limit
+ :value: None
+
+
+
+ .. py:attribute:: max_symbols_fuzzy_match
+ :value: None
+
+
+
+ .. py:attribute:: fuzzy_symbol_match_threshold
+ :value: None
+
+
+
+ .. py:attribute:: symbols_cache_update_interval
+ :value: None
+
+
+
+ .. py:attribute:: symbols_storage_update_interval
+ :value: None
+
+
+
+ .. py:attribute:: edit_prediction_similarity_threshold
+ :value: None
+
+
+
+ .. py:attribute:: min_search_word_length
+ :value: None
+
+
+
+ .. py:attribute:: max_occurrences_per_word
+ :value: None
+
+
+
+ .. py:attribute:: max_words_content_matches
+ :value: None
+
+
+
+ .. py:attribute:: edit_prediction_enabled
+ :value: None
+
+
+
+ .. py:attribute:: snapshot_interval_ms
+ :value: None
+
+
+
+ .. py:attribute:: lines_for_snapshot
+ :value: None
+
+
+
+ .. py:attribute:: embedding_constants
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: CpuGpuMemorySpecs(client, default=None, data=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -23797,7 +26171,7 @@ Package Contents
-.. py:class:: CustomChatInstructions(client, userInformationInstructions=None, responseInstructions=None, enableCodeExecution=None, enableImageGeneration=None, enableWebSearch=None, enablePlayground=None, experimentalFeatures=None)
+.. py:class:: CustomChatInstructions(client, userInformationInstructions=None, responseInstructions=None, enableCodeExecution=None, enableImageGeneration=None, enableWebSearch=None, enablePlayground=None, enableMemories=None, experimentalFeatures=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -23818,6 +26192,8 @@ Package Contents
:type enableWebSearch: bool
:param enablePlayground: Whether or not playground is enabled.
:type enablePlayground: bool
+ :param enableMemories: Whether or not memories are enabled.
+ :type enableMemories: bool
:param experimentalFeatures: Experimental features.
:type experimentalFeatures: dict
@@ -23852,6 +26228,11 @@ Package Contents
+ .. py:attribute:: enable_memories
+ :value: None
+
+
+
.. py:attribute:: experimental_features
:value: None
@@ -23872,6 +26253,60 @@ Package Contents
+.. py:class:: CustomDomain(client, status=None, message=None, expectedNameservers=None, currentNameservers=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Result of adding a custom domain to a hosted app
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param status: Whether the custom domain was added successfully
+ :type status: bool
+ :param message: The message from the custom domain
+ :type message: str
+ :param expectedNameservers: The expected nameservers for the custom domain
+ :type expectedNameservers: list
+ :param currentNameservers: The current nameservers for the custom domain
+ :type currentNameservers: list
+
+
+ .. py:attribute:: status
+ :value: None
+
+
+
+ .. py:attribute:: message
+ :value: None
+
+
+
+ .. py:attribute:: expected_nameservers
+ :value: None
+
+
+
+ .. py:attribute:: current_nameservers
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: CustomLossFunction(client, notebookId=None, name=None, createdAt=None, lossFunctionName=None, lossFunctionType=None, codeSource={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -24138,6 +26573,67 @@ Package Contents
+.. py:class:: DaemonTaskConversation(client, deploymentConversationId=None, createdAt=None, updatedAt=None, deploymentConversationName=None, externalApplicationId=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Daemon Task Conversation
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param deploymentConversationId: The ID of the deployment conversation
+ :type deploymentConversationId: id
+ :param createdAt: The creation timestamp
+ :type createdAt: str
+ :param updatedAt: The last update timestamp
+ :type updatedAt: str
+ :param deploymentConversationName: The name of the conversation
+ :type deploymentConversationName: str
+ :param externalApplicationId: The external application ID
+ :type externalApplicationId: str
+
+
+ .. py:attribute:: deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: updated_at
+ :value: None
+
+
+
+ .. py:attribute:: deployment_conversation_name
+ :value: None
+
+
+
+ .. py:attribute:: external_application_id
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: DataConsistencyDuplication(client, totalCount=None, numDuplicates=None, sample={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -24517,6 +27013,30 @@ Package Contents
+ .. py:method:: query_datallm(query)
+
+ Runs a read-only query in the specified database connector. This API is specifically designed for DataLLM
+
+ and enforces read-only access with user-specific access control.
+
+
+ :param query: The query to be run in the database connector (must be read-only).
+ :type query: str
+
+
+
+ .. py:method:: get_auth()
+
+ Get the authentication details for a given database connector.
+
+ :param database_connector_id: The unique ID associated with the database connector.
+ :type database_connector_id: str
+
+ :returns: The database connector with the authentication details.
+ :rtype: DatabaseConnector
+
+
+
.. py:class:: DatabaseConnectorColumn(client, name=None, externalDataType=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -24784,6 +27304,21 @@ Package Contents
+ .. py:method:: get_raw_data_from_realtime(check_permissions = False, start_time = None, end_time = None, column_filter = None)
+
+ Returns raw data from a realtime dataset. Only Microsoft Teams datasets are supported currently due to data size constraints in realtime datasets.
+
+ :param check_permissions: If True, checks user permissions using session email.
+ :type check_permissions: bool
+ :param start_time: Start time filter (inclusive) for created_date_time_t in ISO 8601 format (e.g. 2025-05-13T08:25:11Z or 2025-05-13T08:25:11+00:00).
+ :type start_time: str
+ :param end_time: End time filter (inclusive) for created_date_time_t in ISO 8601 format (e.g. 2025-05-13T08:25:11Z or 2025-05-13T08:25:11+00:00).
+ :type end_time: str
+ :param column_filter: Dictionary mapping column names to filter values. Only rows matching all column filters will be returned.
+ :type column_filter: dict
+
+
+
.. py:method:: create_version_from_file_connector(location = None, file_format = None, csv_delimiter = None, merge_file_schemas = None, parsing_config = None, sql_query = None)
Creates a new version of the specified dataset.
@@ -25372,7 +27907,47 @@ Package Contents
-.. py:class:: Deployment(client, deploymentId=None, name=None, status=None, description=None, deployedAt=None, createdAt=None, projectId=None, modelId=None, modelVersion=None, featureGroupId=None, featureGroupVersion=None, callsPerSecond=None, autoDeploy=None, skipMetricsCheck=None, algoName=None, regions=None, error=None, batchStreamingUpdates=None, algorithm=None, pendingModelVersion=None, modelDeploymentConfig=None, predictionOperatorId=None, predictionOperatorVersion=None, pendingPredictionOperatorVersion=None, onlineFeatureGroupId=None, outputOnlineFeatureGroupId=None, realtimeMonitorId=None, runtimeConfigs=None, refreshSchedules={}, featureGroupExportConfig={}, defaultPredictionArguments={})
+.. py:class:: DefaultLlm(client, name=None, enum=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A default LLM.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param name: The name of the LLM.
+ :type name: str
+ :param enum: The enum of the LLM.
+ :type enum: str
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: enum
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: Deployment(client, deploymentId=None, name=None, status=None, description=None, deployedAt=None, createdAt=None, projectId=None, modelId=None, modelVersion=None, featureGroupId=None, featureGroupVersion=None, callsPerSecond=None, autoDeploy=None, skipMetricsCheck=None, algoName=None, regions=None, error=None, batchStreamingUpdates=None, algorithm=None, pendingModelVersion=None, modelDeploymentConfig=None, predictionOperatorId=None, predictionOperatorVersion=None, pendingPredictionOperatorVersion=None, onlineFeatureGroupId=None, outputOnlineFeatureGroupId=None, realtimeMonitorId=None, runtimeConfigs=None, isSystemCreated=None, refreshSchedules={}, featureGroupExportConfig={}, defaultPredictionArguments={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -25437,6 +28012,8 @@ Package Contents
:type realtimeMonitorId: id
:param runtimeConfigs: The runtime configurations of a deployment which is used by some of the usecases during prediction.
:type runtimeConfigs: dict
+ :param isSystemCreated: Whether the deployment is system created.
+ :type isSystemCreated: bool
:param refreshSchedules: A list of refresh schedules that indicate when the deployment will be updated to the latest model version.
:type refreshSchedules: RefreshSchedule
:param featureGroupExportConfig: The export config (file connector or database connector information) for feature group deployment exports.
@@ -25585,6 +28162,11 @@ Package Contents
+ .. py:attribute:: is_system_created
+ :value: None
+
+
+
.. py:attribute:: refresh_schedules
@@ -25864,7 +28446,7 @@ Package Contents
- .. py:method:: get_conversation_response(message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None)
+ .. py:method:: get_conversation_response(message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None, user_info = None, execute_usercode_tool = False)
Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).
@@ -25892,6 +28474,8 @@ Package Contents
:type chat_config: dict
:param doc_infos: An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
:type doc_infos: list
+ :param execute_usercode_tool: If True, will return the tool output in the response.
+ :type execute_usercode_tool: bool
@@ -26054,7 +28638,7 @@ Package Contents
- .. py:method:: list_conversations(external_application_id = None, conversation_type = None, fetch_last_llm_info = False)
+ .. py:method:: list_conversations(external_application_id = None, conversation_type = None, fetch_last_llm_info = False, limit = None, search = None)
Lists all conversations for the given deployment and current user.
@@ -26064,6 +28648,10 @@ Package Contents
:type conversation_type: DeploymentConversationType
:param fetch_last_llm_info: If true, the LLM info for the most recent conversation will be fetched. Only applicable for system-created bots.
:type fetch_last_llm_info: bool
+ :param limit: The number of conversations to return. Defaults to 600.
+ :type limit: int
+ :param search: The search query to filter conversations by title.
+ :type search: str
:returns: The deployment conversations.
:rtype: list[DeploymentConversation]
@@ -26195,7 +28783,7 @@ Package Contents
-.. py:class:: DeploymentConversation(client, deploymentConversationId=None, name=None, deploymentId=None, createdAt=None, lastEventCreatedAt=None, hasHistory=None, externalSessionId=None, regenerateAttempt=None, externalApplicationId=None, unusedDocumentUploadIds=None, humanizeInstructions=None, conversationWarning=None, conversationType=None, metadata=None, llmDisplayName=None, llmBotIcon=None, searchSuggestions=None, chatllmTaskId=None, history={})
+.. py:class:: DeploymentConversation(client, deploymentConversationId=None, name=None, deploymentId=None, ownerUserId=None, ownerOrgId=None, createdAt=None, lastEventCreatedAt=None, hasHistory=None, externalSessionId=None, regenerateAttempt=None, externalApplicationId=None, unusedDocumentUploadIds=None, humanizeInstructions=None, conversationWarning=None, conversationType=None, metadata=None, llmDisplayName=None, llmBotIcon=None, searchSuggestions=None, chatllmTaskId=None, conversationStatus=None, computerStatus=None, filesystemStatus=None, totalEvents=None, contestNames=None, daemonTaskId=None, parentDeploymentConversationId=None, introMessage=None, previewInfo=None, latestContext=None, shareType=None, history={}, hostedArtifacts={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -26210,6 +28798,10 @@ Package Contents
:type name: str
:param deploymentId: The deployment id associated with the deployment conversation.
:type deploymentId: str
+ :param ownerUserId: The user id of the owner of the deployment conversation.
+ :type ownerUserId: str
+ :param ownerOrgId: The organization id of the owner of the deployment conversation.
+ :type ownerOrgId: str
:param createdAt: The timestamp at which the deployment conversation was created.
:type createdAt: str
:param lastEventCreatedAt: The timestamp at which the most recent corresponding deployment conversation event was created at.
@@ -26240,8 +28832,32 @@ Package Contents
:type searchSuggestions: list
:param chatllmTaskId: The chatllm task id associated with the deployment conversation.
:type chatllmTaskId: str
+ :param conversationStatus: The status of the deployment conversation (used for deep agent conversations).
+ :type conversationStatus: str
+ :param computerStatus: The status of the computer associated with the deployment conversation (used for deep agent conversations).
+ :type computerStatus: str
+ :param filesystemStatus: The status of the filesystem associated with the deployment conversation (used for deep agent conversations).
+ :type filesystemStatus: str
+ :param totalEvents: The total number of events in the deployment conversation.
+ :type totalEvents: int
+ :param contestNames: Names of contests that this deployment is a part of.
+ :type contestNames: list[str]
+ :param daemonTaskId: The daemon task id associated with the deployment conversation.
+ :type daemonTaskId: str
+ :param parentDeploymentConversationId: The parent deployment conversation id associated with the deployment conversation.
+ :type parentDeploymentConversationId: str
+ :param introMessage: The intro message for the deployment conversation.
+ :type introMessage: str
+ :param previewInfo: App preview info.
+ :type previewInfo: dict
+ :param latestContext: dict containing a list of important segments for pagination in deepagent.
+ :type latestContext: dict
+ :param shareType: The sharing status of the deployment conversation (PUBLIC or PRIVATE).
+ :type shareType: str
:param history: The history of the deployment conversation.
:type history: DeploymentConversationEvent
+ :param hostedArtifacts: Artifacts that have been deployed by this conversation.
+ :type hostedArtifacts: HostedArtifact
.. py:attribute:: deployment_conversation_id
@@ -26259,6 +28875,16 @@ Package Contents
+ .. py:attribute:: owner_user_id
+ :value: None
+
+
+
+ .. py:attribute:: owner_org_id
+ :value: None
+
+
+
.. py:attribute:: created_at
:value: None
@@ -26334,9 +28960,67 @@ Package Contents
+ .. py:attribute:: conversation_status
+ :value: None
+
+
+
+ .. py:attribute:: computer_status
+ :value: None
+
+
+
+ .. py:attribute:: filesystem_status
+ :value: None
+
+
+
+ .. py:attribute:: total_events
+ :value: None
+
+
+
+ .. py:attribute:: contest_names
+ :value: None
+
+
+
+ .. py:attribute:: daemon_task_id
+ :value: None
+
+
+
+ .. py:attribute:: parent_deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: intro_message
+ :value: None
+
+
+
+ .. py:attribute:: preview_info
+ :value: None
+
+
+
+ .. py:attribute:: latest_context
+ :value: None
+
+
+
+ .. py:attribute:: share_type
+ :value: None
+
+
+
.. py:attribute:: history
+ .. py:attribute:: hosted_artifacts
+
+
.. py:attribute:: deprecated_keys
@@ -26352,7 +29036,7 @@ Package Contents
- .. py:method:: get(external_session_id = None, deployment_id = None, filter_intermediate_conversation_events = True, get_unused_document_uploads = False)
+ .. py:method:: get(external_session_id = None, deployment_id = None, filter_intermediate_conversation_events = True, get_unused_document_uploads = False, start = None, limit = None, include_all_versions = False)
Gets a deployment conversation.
@@ -26364,12 +29048,27 @@ Package Contents
:type filter_intermediate_conversation_events: bool
:param get_unused_document_uploads: If true, unused document uploads will be returned. Default is false.
:type get_unused_document_uploads: bool
+ :param start: The start index of the conversation.
+ :type start: int
+ :param limit: The limit of the conversation.
+ :type limit: int
+ :param include_all_versions: If True, includes all versions of the last bot message for version switching functionality.
+ :type include_all_versions: bool
:returns: The deployment conversation.
:rtype: DeploymentConversation
+ .. py:method:: get_file(file_path)
+
+ Gets a deployment conversation file.
+
+ :param file_path: The path of the file to get.
+ :type file_path: str
+
+
+
.. py:method:: delete(deployment_id = None)
Delete a Deployment Conversation.
@@ -26448,7 +29147,7 @@ Package Contents
-.. py:class:: DeploymentConversationEvent(client, role=None, text=None, timestamp=None, messageIndex=None, regenerateAttempt=None, modelVersion=None, searchResults=None, isUseful=None, feedback=None, feedbackType=None, docInfos=None, keywordArguments=None, inputParams=None, attachments=None, responseVersion=None, agentWorkflowNodeId=None, nextAgentWorkflowNodeId=None, chatType=None, agentResponse=None, error=None, segments=None, streamedData=None, streamedSectionData=None, highlights=None, llmDisplayName=None, llmBotIcon=None, formResponse=None, routedLlm=None, computePointsUsed=None, computerFiles=None, toolUseRequest=None, verificationSummary=None)
+.. py:class:: DeploymentConversationEvent(client, role=None, text=None, timestamp=None, messageIndex=None, regenerateAttempt=None, modelVersion=None, searchResults=None, isUseful=None, feedback=None, feedbackType=None, docInfos=None, keywordArguments=None, inputParams=None, attachments=None, responseVersion=None, agentWorkflowNodeId=None, nextAgentWorkflowNodeId=None, chatType=None, agentResponse=None, error=None, segments=None, streamedData=None, streamedSectionData=None, highlights=None, llmDisplayName=None, deepagentMode=None, llmBotIcon=None, formResponse=None, routedLlm=None, computePointsUsed=None, computerFiles=None, toolUseRequest=None, toolUseResult=None, verificationSummary=None, attachedUserFileNames=None, oldFileContent=None, allVersions=None, selectedVersionIndex=None, totalVersions=None, isLastMessage=None, allowTaskAutomation=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -26507,6 +29206,8 @@ Package Contents
:type highlights: dict
:param llmDisplayName: The display name of the LLM model used to generate the response. Only used for system-created bots.
:type llmDisplayName: str
+ :param deepagentMode: The Deep Agent run mode (e.g. normal or high) associated with the message.
+ :type deepagentMode: str
:param llmBotIcon: The icon location of the LLM model used to generate the response. Only used for system-created bots.
:type llmBotIcon: str
:param formResponse: Contains form data response from the user when a Form Segment is given out by the bot.
@@ -26519,8 +29220,24 @@ Package Contents
:type computerFiles: list
:param toolUseRequest: The tool use request for the message.
:type toolUseRequest: dict
+ :param toolUseResult: The tool use response for the message.
+ :type toolUseResult: dict
:param verificationSummary: The summary of the verification process for the message.
:type verificationSummary: str
+ :param attachedUserFileNames: The list of files attached by the user on the message.
+ :type attachedUserFileNames: list
+ :param oldFileContent: The content of the file associated with an edit tool request before the file was edited
+ :type oldFileContent: str
+ :param allVersions: All versions of the last bot message for version switching functionality.
+ :type allVersions: list
+ :param selectedVersionIndex: The index of the currently selected version of the last bot message.
+ :type selectedVersionIndex: int
+ :param totalVersions: The total number of versions available for the last bot message.
+ :type totalVersions: int
+ :param isLastMessage: Whether this message is the last message in the conversation.
+ :type isLastMessage: bool
+ :param allowTaskAutomation: Whether to show the automate task button. Only set for the last message in the conversation.
+ :type allowTaskAutomation: bool
.. py:attribute:: role
@@ -26648,6 +29365,11 @@ Package Contents
+ .. py:attribute:: deepagent_mode
+ :value: None
+
+
+
.. py:attribute:: llm_bot_icon
:value: None
@@ -26678,11 +29400,51 @@ Package Contents
+ .. py:attribute:: tool_use_result
+ :value: None
+
+
+
.. py:attribute:: verification_summary
:value: None
+ .. py:attribute:: attached_user_file_names
+ :value: None
+
+
+
+ .. py:attribute:: old_file_content
+ :value: None
+
+
+
+ .. py:attribute:: all_versions
+ :value: None
+
+
+
+ .. py:attribute:: selected_version_index
+ :value: None
+
+
+
+ .. py:attribute:: total_versions
+ :value: None
+
+
+
+ .. py:attribute:: is_last_message
+ :value: None
+
+
+
+ .. py:attribute:: allow_task_automation
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
@@ -27146,7 +29908,7 @@ Package Contents
-.. py:class:: DocumentRetrieverLookupResult(client, document=None, score=None, properties=None, pages=None, boundingBoxes=None, documentSource=None, imageIds=None)
+.. py:class:: DocumentRetrieverLookupResult(client, document=None, score=None, properties=None, pages=None, boundingBoxes=None, documentSource=None, imageIds=None, metadata=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -27169,6 +29931,8 @@ Package Contents
:type documentSource: str
:param imageIds: List of Image IDs for all the pages.
:type imageIds: list
+ :param metadata: Metadata column values for the retrieved documents.
+ :type metadata: dict
.. py:attribute:: document
@@ -27206,6 +29970,11 @@ Package Contents
+ .. py:attribute:: metadata
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
@@ -27409,6 +30178,140 @@ Package Contents
+.. py:class:: DocumentRetrieverVersionLogs(client, logs=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Logs from document retriever version.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param logs: List of logs from document retriever version.
+ :type logs: list[str]
+
+
+ .. py:attribute:: logs
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: DomainPurchaseResult(client, domain=None, billedCredits=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Domain purchase result
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param domain: The domain name being purchased.
+ :type domain: str
+ :param billedCredits: The total credits used for the purchase.
+ :type billedCredits: int
+
+
+ .. py:attribute:: domain
+ :value: None
+
+
+
+ .. py:attribute:: billed_credits
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: DomainSearchResult(client, domain=None, registerCredits=None, registerYears=None, renewalCredits=None, renewalYears=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Domain search result
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param domain: name of the domain in a search result
+ :type domain: str
+ :param registerCredits: number of credits required for registration
+ :type registerCredits: int
+ :param registerYears: number of years the domain will get registered for
+ :type registerYears: int
+ :param renewalCredits: number of credits required for renewal
+ :type renewalCredits: int
+ :param renewalYears: number of years the domain will be renewed for
+ :type renewalYears: int
+
+
+ .. py:attribute:: domain
+ :value: None
+
+
+
+ .. py:attribute:: register_credits
+ :value: None
+
+
+
+ .. py:attribute:: register_years
+ :value: None
+
+
+
+ .. py:attribute:: renewal_credits
+ :value: None
+
+
+
+ .. py:attribute:: renewal_years
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: DriftDistribution(client, trainColumn=None, predictedColumn=None, metrics=None, distribution={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -28292,6 +31195,46 @@ Package Contents
+.. py:class:: EditImageModels(client, models=None, default=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Edit image models
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param models: The models available for edit image.
+ :type models: list
+ :param default: The default model for edit image.
+ :type default: str
+
+
+ .. py:attribute:: models
+ :value: None
+
+
+
+ .. py:attribute:: default
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: EmbeddingFeatureDriftDistribution(client, distance=None, jsDistance=None, wsDistance=None, ksStatistic=None, psi=None, csi=None, chiSquare=None, averageDrift={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -28372,6 +31315,53 @@ Package Contents
+.. py:class:: EntriAuthToken(client, token=None, applicationId=None, ttl=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Entri Auth Token
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param token: The authentication token for Entri
+ :type token: str
+ :param applicationId: application Id from Entri dashboard
+ :type applicationId: str
+ :param ttl: The duration in milliseconds for which the token is valid
+ :type ttl: int
+
+
+ .. py:attribute:: token
+ :value: None
+
+
+
+ .. py:attribute:: application_id
+ :value: None
+
+
+
+ .. py:attribute:: ttl
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: ExecuteFeatureGroupOperation(client, featureGroupOperationRunId=None, status=None, error=None, query=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -28490,7 +31480,7 @@ Package Contents
-.. py:class:: ExternalApplication(client, name=None, externalApplicationId=None, deploymentId=None, description=None, logo=None, theme=None, userGroupIds=None, useCase=None, isAgent=None, status=None, deploymentConversationRetentionHours=None, managedUserService=None, predictionOverrides=None, isSystemCreated=None, isCustomizable=None, isDeprecated=None, isVisible=None)
+.. py:class:: ExternalApplication(client, name=None, externalApplicationId=None, deploymentId=None, description=None, logo=None, theme=None, userGroupIds=None, useCase=None, isAgent=None, status=None, deploymentConversationRetentionHours=None, managedUserService=None, predictionOverrides=None, isSystemCreated=None, isCustomizable=None, isDeprecated=None, isVisible=None, hasThinkingOption=None, hasFastMode=None, onlyImageGenEnabled=None, projectId=None, isCodellmChatmodeSupported=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -28533,6 +31523,16 @@ Package Contents
:type isDeprecated: bool
:param isVisible: Whether the external application should be shown in the dropdown.
:type isVisible: bool
+ :param hasThinkingOption: Whether to show the thinking option in the toolbar.
+ :type hasThinkingOption: bool
+ :param hasFastMode: Whether the external application has fast mode.
+ :type hasFastMode: bool
+ :param onlyImageGenEnabled: Whether to LLM only allows image generation.
+ :type onlyImageGenEnabled: bool
+ :param projectId: The project id associated with the external application.
+ :type projectId: str
+ :param isCodellmChatmodeSupported: Whether the external application is codellm chatmode supported
+ :type isCodellmChatmodeSupported: bool
.. py:attribute:: name
@@ -28620,6 +31620,31 @@ Package Contents
+ .. py:attribute:: has_thinking_option
+ :value: None
+
+
+
+ .. py:attribute:: has_fast_mode
+ :value: None
+
+
+
+ .. py:attribute:: only_image_gen_enabled
+ :value: None
+
+
+
+ .. py:attribute:: project_id
+ :value: None
+
+
+
+ .. py:attribute:: is_codellm_chatmode_supported
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
@@ -28635,6 +31660,24 @@ Package Contents
+ .. py:method:: add_developers_to()
+
+ Adds a permission for the platform App User Group to access an External Application.
+
+ :param external_application_id: The ID of the External Application.
+ :type external_application_id: str
+
+
+
+ .. py:method:: remove_developers_from()
+
+ Removes a permission for the platform App User Group to access an External Application.
+
+ :param external_application_id: The ID of the External Application.
+ :type external_application_id: str
+
+
+
.. py:method:: update(name = None, description = None, theme = None, deployment_id = None, deployment_conversation_retention_hours = None, reset_retention_policy = False)
Updates an External Application.
@@ -31942,7 +34985,7 @@ Package Contents
:type database_connector_id: str
:param object_name: Name of the database object to write to.
:type object_name: str
- :param write_mode: Enum string indicating whether to use INSERT or UPSERT.
+ :param write_mode: Enum string indicating whether to use INSERT or UPSERT or REPLACE.
:type write_mode: str
:param database_feature_mapping: Key/value pair JSON object of "database connector column" -> "feature name" pairs.
:type database_feature_mapping: dict
@@ -32208,7 +35251,7 @@ Package Contents
:param client: An authenticated API Client instance
:type client: ApiClient
- :param featureMapping: The mapping of the feature. The possible values will be based on the project's use-case. See the (Use Case Documentation)[https://api.abacus.ai/app/help/useCases] for more details.
+ :param featureMapping: The mapping of the feature. The possible values will be based on the project's use-case. See the (Use Case Documentation)[https://api.abacus.ai/app/help/developer-platform/useCases] for more details.
:type featureMapping: str
:param featureName: The unique name of the feature.
:type featureName: str
@@ -32286,40 +35329,7 @@ Package Contents
-.. py:class:: FeatureRecord(client, data=None)
-
- Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
-
-
- A feature record
-
- :param client: An authenticated API Client instance
- :type client: ApiClient
- :param data: the record's current data
- :type data: dict
-
-
- .. py:attribute:: data
- :value: None
-
-
-
- .. py:attribute:: deprecated_keys
-
-
- .. py:method:: __repr__()
-
-
- .. py:method:: to_dict()
-
- Get a dict representation of the parameters in this class
-
- :returns: The dict value representation of the class parameters
- :rtype: dict
-
-
-
-.. py:class:: FileConnector(client, bucket=None, verified=None, writePermission=None, authExpiresAt=None)
+.. py:class:: FileConnector(client, bucket=None, verified=None, writePermission=None, authExpiresAt=None, createdAt=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -32336,6 +35346,8 @@ Package Contents
:type writePermission: bool
:param authExpiresAt: The time when the file connector's auth expires, if applicable
:type authExpiresAt: str
+ :param createdAt: The timestamp at which the file connector was created
+ :type createdAt: str
.. py:attribute:: bucket
@@ -32358,6 +35370,11 @@ Package Contents
+ .. py:attribute:: created_at
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
@@ -32785,6 +35802,81 @@ Package Contents
+.. py:class:: FsEntry(client, name=None, type=None, path=None, size=None, modified=None, isFolderEmpty=None, children=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ File system entry.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param name: The name of the file/folder
+ :type name: str
+ :param type: The type of entry (file/folder)
+ :type type: str
+ :param path: The path of the entry
+ :type path: str
+ :param size: The size of the entry in bytes
+ :type size: int
+ :param modified: The last modified timestamp
+ :type modified: int
+ :param isFolderEmpty: Whether the folder is empty (only for folders)
+ :type isFolderEmpty: bool
+ :param children: List of child FSEntry objects (only for folders)
+ :type children: list
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: type
+ :value: None
+
+
+
+ .. py:attribute:: path
+ :value: None
+
+
+
+ .. py:attribute:: size
+ :value: None
+
+
+
+ .. py:attribute:: modified
+ :value: None
+
+
+
+ .. py:attribute:: isFolderEmpty
+ :value: None
+
+
+
+ .. py:attribute:: children
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: FunctionLogs(client, function=None, stats=None, stdout=None, stderr=None, algorithm=None, exception={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -33288,81 +36380,41 @@ Package Contents
-.. py:class:: HostedModelToken(client, createdAt=None, tag=None, trailingAuthToken=None, hostedModelTokenId=None)
+.. py:class:: HostedApp(client, hostedAppId=None, deploymentConversationId=None, name=None, createdAt=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
- A hosted model authentication token that is used to authenticate requests to an abacus hosted model
+ Hosted App
:param client: An authenticated API Client instance
:type client: ApiClient
- :param createdAt: When the token was created
+ :param hostedAppId: The ID of the hosted app
+ :type hostedAppId: id
+ :param deploymentConversationId: The ID of the deployment conversation
+ :type deploymentConversationId: id
+ :param name: The name of the hosted app
+ :type name: str
+ :param createdAt: The creation timestamp
:type createdAt: str
- :param tag: A user-friendly tag for the API key.
- :type tag: str
- :param trailingAuthToken: The last four characters of the un-encrypted auth token
- :type trailingAuthToken: str
- :param hostedModelTokenId: The unique identifier attached to this authenticaion token
- :type hostedModelTokenId: str
-
-
- .. py:attribute:: created_at
- :value: None
-
- .. py:attribute:: tag
- :value: None
-
-
-
- .. py:attribute:: trailing_auth_token
+ .. py:attribute:: hosted_app_id
:value: None
- .. py:attribute:: hosted_model_token_id
+ .. py:attribute:: deployment_conversation_id
:value: None
- .. py:attribute:: deprecated_keys
-
-
- .. py:method:: __repr__()
-
-
- .. py:method:: to_dict()
-
- Get a dict representation of the parameters in this class
-
- :returns: The dict value representation of the class parameters
- :rtype: dict
-
-
-
-.. py:class:: HumeVoice(client, name=None, gender=None)
-
- Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
-
-
- Hume Voice
-
- :param client: An authenticated API Client instance
- :type client: ApiClient
- :param name: The name of the voice.
- :type name: str
- :param gender: The gender of the voice.
- :type gender: str
-
-
.. py:attribute:: name
:value: None
- .. py:attribute:: gender
+ .. py:attribute:: created_at
:value: None
@@ -33382,7 +36434,727 @@ Package Contents
-.. py:class:: ImageGenSettings(client, model=None, settings=None)
+.. py:class:: HostedAppContainer(client, hostedAppContainerId=None, hostedAppId=None, deploymentConversationId=None, hostedAppVersion=None, name=None, userId=None, email=None, createdAt=None, updatedAt=None, containerImage=None, route=None, appConfig=None, isDev=None, isDeployable=None, isPreviewAvailable=None, lifecycle=None, status=None, deployedStatus=None, accessLevel=None, hostname=None, llmArtifactId=None, artifactType=None, deployedLlmArtifactId=None, hasDatabase=None, hasStorage=None, webAppProjectId=None, parentConversationId=None, projectMetadata=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Hosted app + Deep agent container information.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param hostedAppContainerId: The ID of the hosted app container
+ :type hostedAppContainerId: id
+ :param hostedAppId: The ID of the hosted app
+ :type hostedAppId: id
+ :param deploymentConversationId: The deployment conversation ID
+ :type deploymentConversationId: id
+ :param hostedAppVersion: The instance of the hosted app
+ :type hostedAppVersion: id
+ :param name: The name of the hosted app
+ :type name: str
+ :param userId: The ID of the creation user
+ :type userId: id
+ :param email: The email of the creation user
+ :type email: str
+ :param createdAt: Creation timestamp
+ :type createdAt: str
+ :param updatedAt: Last update timestamp
+ :type updatedAt: str
+ :param containerImage: Container image name
+ :type containerImage: str
+ :param route: Container route
+ :type route: str
+ :param appConfig: App configuration
+ :type appConfig: dict
+ :param isDev: Whether this is a dev container
+ :type isDev: bool
+ :param isDeployable: Can this version be deployed
+ :type isDeployable: bool
+ :param isPreviewAvailable: Is the dev preview available on the container
+ :type isPreviewAvailable: bool
+ :param lifecycle: Container lifecycle status (PENDING/DEPLOYING/ACTIVE/FAILED/STOPPED/DELETING)
+ :type lifecycle: str
+ :param status: Container status (RUNNING/STOPPED/DEPLOYING/FAILED)
+ :type status: str
+ :param deployedStatus: Deployment status (PENDING/ACTIVE/STOPPED/NOT_DEPLOYED)
+ :type deployedStatus: str
+ :param accessLevel: Access Level (PUBLIC/PRIVATE/DEDICATED)
+ :type accessLevel: str
+ :param hostname: Hostname of the deployed app
+ :type hostname: str
+ :param llmArtifactId: The ID of the LLM artifact
+ :type llmArtifactId: id
+ :param artifactType: The type of the artifact
+ :type artifactType: str
+ :param deployedLlmArtifactId: The ID of the deployed LLM artifact
+ :type deployedLlmArtifactId: id
+ :param hasDatabase: Whether the app has a database associated to it
+ :type hasDatabase: bool
+ :param hasStorage: Whether the app has a cloud storage associated to it
+ :type hasStorage: bool
+ :param webAppProjectId: The ID of the web app project
+ :type webAppProjectId: id
+ :param parentConversationId: The ID of the parent conversation
+ :type parentConversationId: id
+ :param projectMetadata: The metadata of the web app project
+ :type projectMetadata: dict
+
+
+ .. py:attribute:: hosted_app_container_id
+ :value: None
+
+
+
+ .. py:attribute:: hosted_app_id
+ :value: None
+
+
+
+ .. py:attribute:: deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: hosted_app_version
+ :value: None
+
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: user_id
+ :value: None
+
+
+
+ .. py:attribute:: email
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: updated_at
+ :value: None
+
+
+
+ .. py:attribute:: container_image
+ :value: None
+
+
+
+ .. py:attribute:: route
+ :value: None
+
+
+
+ .. py:attribute:: app_config
+ :value: None
+
+
+
+ .. py:attribute:: is_dev
+ :value: None
+
+
+
+ .. py:attribute:: is_deployable
+ :value: None
+
+
+
+ .. py:attribute:: is_preview_available
+ :value: None
+
+
+
+ .. py:attribute:: lifecycle
+ :value: None
+
+
+
+ .. py:attribute:: status
+ :value: None
+
+
+
+ .. py:attribute:: deployed_status
+ :value: None
+
+
+
+ .. py:attribute:: access_level
+ :value: None
+
+
+
+ .. py:attribute:: hostname
+ :value: None
+
+
+
+ .. py:attribute:: llm_artifact_id
+ :value: None
+
+
+
+ .. py:attribute:: artifact_type
+ :value: None
+
+
+
+ .. py:attribute:: deployed_llm_artifact_id
+ :value: None
+
+
+
+ .. py:attribute:: has_database
+ :value: None
+
+
+
+ .. py:attribute:: has_storage
+ :value: None
+
+
+
+ .. py:attribute:: web_app_project_id
+ :value: None
+
+
+
+ .. py:attribute:: parent_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: project_metadata
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: HostedAppFileRead(client, content=None, start=None, end=None, retcode=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Result of reading file content from a hosted app container.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param content: The contents of the file or a portion of it.
+ :type content: str
+ :param start: If present, the starting position of the read.
+ :type start: int
+ :param end: If present, the last position in the file returned in this read.
+ :type end: int
+ :param retcode: If the read is associated with a log the return code of the command.
+ :type retcode: int
+
+
+ .. py:attribute:: content
+ :value: None
+
+
+
+ .. py:attribute:: start
+ :value: None
+
+
+
+ .. py:attribute:: end
+ :value: None
+
+
+
+ .. py:attribute:: retcode
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: HostedArtifact(client, hostname=None, artifactType=None, llmArtifactId=None, lifecycle=None, externalApplicationId=None, deploymentConversationId=None, conversationSequenceNumber=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A hosted artifact being served by the platform.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param hostname: The url at which the application is being hosted.
+ :type hostname: str
+ :param artifactType: The type of artifact being hosted.
+ :type artifactType: str
+ :param llmArtifactId: The artifact id being hosted.
+ :type llmArtifactId: str
+ :param lifecycle: The lifecycle of the artifact.
+ :type lifecycle: str
+ :param externalApplicationId: Agent that deployed this application.
+ :type externalApplicationId: str
+ :param deploymentConversationId: Conversation that created deployed this artifact, null if not applicable.
+ :type deploymentConversationId: str
+ :param conversationSequenceNumber: Conversation event associated with this artifact, null if not applicable.
+ :type conversationSequenceNumber: number(integer)
+
+
+ .. py:attribute:: hostname
+ :value: None
+
+
+
+ .. py:attribute:: artifact_type
+ :value: None
+
+
+
+ .. py:attribute:: llm_artifact_id
+ :value: None
+
+
+
+ .. py:attribute:: lifecycle
+ :value: None
+
+
+
+ .. py:attribute:: external_application_id
+ :value: None
+
+
+
+ .. py:attribute:: deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: conversation_sequence_number
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: HostedDatabase(client, hostedDatabaseId=None, displayName=None, createdAt=None, updatedAt=None, lifecycle=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Hosted Database
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param hostedDatabaseId: The ID of the hosted database
+ :type hostedDatabaseId: id
+ :param displayName: The name of the hosted database
+ :type displayName: str
+ :param createdAt: The creation timestamp
+ :type createdAt: str
+ :param updatedAt: The last update timestamp
+ :type updatedAt: str
+ :param lifecycle: The lifecycle of the hosted database
+ :type lifecycle: str
+
+
+ .. py:attribute:: hosted_database_id
+ :value: None
+
+
+
+ .. py:attribute:: display_name
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: updated_at
+ :value: None
+
+
+
+ .. py:attribute:: lifecycle
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: HostedDatabaseSnapshot(client, hostedDatabaseSnapshotId=None, srcHostedDatabaseId=None, createdAt=None, updatedAt=None, lifecycle=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Hosted Database Snapshot
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param hostedDatabaseSnapshotId: The ID of the hosted database snapshot
+ :type hostedDatabaseSnapshotId: id
+ :param srcHostedDatabaseId: The ID of the source hosted database
+ :type srcHostedDatabaseId: id
+ :param createdAt: The creation timestamp
+ :type createdAt: str
+ :param updatedAt: The last update timestamp
+ :type updatedAt: str
+ :param lifecycle: The lifecycle of the hosted database snapshot
+ :type lifecycle: str
+
+
+ .. py:attribute:: hosted_database_snapshot_id
+ :value: None
+
+
+
+ .. py:attribute:: src_hosted_database_id
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: updated_at
+ :value: None
+
+
+
+ .. py:attribute:: lifecycle
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: HostedModelToken(client, createdAt=None, tag=None, trailingAuthToken=None, hostedModelTokenId=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A hosted model authentication token that is used to authenticate requests to an abacus hosted model
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param createdAt: When the token was created
+ :type createdAt: str
+ :param tag: A user-friendly tag for the API key.
+ :type tag: str
+ :param trailingAuthToken: The last four characters of the un-encrypted auth token
+ :type trailingAuthToken: str
+ :param hostedModelTokenId: The unique identifier attached to this authenticaion token
+ :type hostedModelTokenId: str
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: tag
+ :value: None
+
+
+
+ .. py:attribute:: trailing_auth_token
+ :value: None
+
+
+
+ .. py:attribute:: hosted_model_token_id
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: HostnameInfo(client, isRootDomain=None, hasRootNameserversConfigured=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Hostname Info
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param isRootDomain: Whether the hostname is a root domain
+ :type isRootDomain: bool
+ :param hasRootNameserversConfigured: Whether the root domain has Abacus nameservers configured.
+ :type hasRootNameserversConfigured: bool
+
+
+ .. py:attribute:: is_root_domain
+ :value: None
+
+
+
+ .. py:attribute:: has_root_nameservers_configured
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: HumeVoice(client, name=None, gender=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Hume Voice
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param name: The name of the voice.
+ :type name: str
+ :param gender: The gender of the voice.
+ :type gender: str
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: gender
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: ImageGenModel(client, displayName=None, type=None, valueType=None, optional=None, default=None, helptext=None, options={})
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Image generation model
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param displayName: The display name for the UI component.
+ :type displayName: str
+ :param type: The type of the UI component.
+ :type type: str
+ :param valueType: The data type of the values within the UI component.
+ :type valueType: str
+ :param optional: Whether the selection of a value is optional.
+ :type optional: bool
+ :param default: The default value for the image generation model.
+ :type default: str
+ :param helptext: The helptext for the UI component.
+ :type helptext: str
+ :param options: The options of models available for image generation.
+ :type options: ImageGenModelOptions
+
+
+ .. py:attribute:: display_name
+ :value: None
+
+
+
+ .. py:attribute:: type
+ :value: None
+
+
+
+ .. py:attribute:: value_type
+ :value: None
+
+
+
+ .. py:attribute:: optional
+ :value: None
+
+
+
+ .. py:attribute:: default
+ :value: None
+
+
+
+ .. py:attribute:: helptext
+ :value: None
+
+
+
+ .. py:attribute:: options
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: ImageGenModelOptions(client, keys=None, values=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Image generation model options
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param keys: The keys of the image generation model options represented as the enum values.
+ :type keys: list
+ :param values: The display names of the image generation model options.
+ :type values: list
+
+
+ .. py:attribute:: keys
+ :value: None
+
+
+
+ .. py:attribute:: values
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: ImageGenSettings(client, settings=None, warnings=None, model={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -33391,22 +37163,27 @@ Package Contents
:param client: An authenticated API Client instance
:type client: ApiClient
- :param model: Dropdown for models available for image generation.
- :type model: dict
:param settings: The settings for each model.
:type settings: dict
+ :param warnings: The warnings for each model.
+ :type warnings: dict
+ :param model: Dropdown for models available for image generation.
+ :type model: ImageGenModel
- .. py:attribute:: model
+ .. py:attribute:: settings
:value: None
- .. py:attribute:: settings
+ .. py:attribute:: warnings
:value: None
+ .. py:attribute:: model
+
+
.. py:attribute:: deprecated_keys
@@ -33641,6 +37418,46 @@ Package Contents
+.. py:class:: LipSyncGenSettings(client, model=None, settings=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Lip sync generation settings
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param model: The model settings.
+ :type model: dict
+ :param settings: The settings for each model.
+ :type settings: dict
+
+
+ .. py:attribute:: model
+ :value: None
+
+
+
+ .. py:attribute:: settings
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: LlmApp(client, llmAppId=None, name=None, description=None, projectId=None, deploymentId=None, createdAt=None, updatedAt=None, status=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -33723,6 +37540,130 @@ Package Contents
+.. py:class:: LlmArtifact(client, llmArtifactId=None, info=None, description=None, createdAt=None, webAppDeploymentId=None, deploymentStatus=None, isLatest=None, deploymentConversationId=None, webAppProjectId=None, hasDatabase=None, hasStorage=None, projectMetadata=None, hasDatabaseSnapshot=None, supportsPreviewRestore=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ LLM Artifact
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param llmArtifactId: The ID of the LLM artifact
+ :type llmArtifactId: id
+ :param info: The info of the LLM artifact
+ :type info: dict
+ :param description: The description of the LLM artifact
+ :type description: str
+ :param createdAt: The creation timestamp
+ :type createdAt: str
+ :param webAppDeploymentId: The ID of the associated web app deployment
+ :type webAppDeploymentId: id
+ :param deploymentStatus: The status of the associated web app deployment
+ :type deploymentStatus: str
+ :param isLatest: Whether it is the most recent version of the artifact
+ :type isLatest: bool
+ :param deploymentConversationId: The ID of the deployment conversation
+ :type deploymentConversationId: id
+ :param webAppProjectId: The ID of the web app project
+ :type webAppProjectId: id
+ :param hasDatabase: Whether the app associated with the artifact has a database
+ :type hasDatabase: bool
+ :param hasStorage: Whether the app associated with the artifact has a storage
+ :type hasStorage: bool
+ :param projectMetadata: The metadata of the web app project
+ :type projectMetadata: dict
+ :param hasDatabaseSnapshot: Whether the artifact database has a snapshot
+ :type hasDatabaseSnapshot: bool
+ :param supportsPreviewRestore: Whether the artifact supports preview restore
+ :type supportsPreviewRestore: bool
+
+
+ .. py:attribute:: llm_artifact_id
+ :value: None
+
+
+
+ .. py:attribute:: info
+ :value: None
+
+
+
+ .. py:attribute:: description
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: web_app_deployment_id
+ :value: None
+
+
+
+ .. py:attribute:: deployment_status
+ :value: None
+
+
+
+ .. py:attribute:: is_latest
+ :value: None
+
+
+
+ .. py:attribute:: deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: web_app_project_id
+ :value: None
+
+
+
+ .. py:attribute:: has_database
+ :value: None
+
+
+
+ .. py:attribute:: has_storage
+ :value: None
+
+
+
+ .. py:attribute:: project_metadata
+ :value: None
+
+
+
+ .. py:attribute:: has_database_snapshot
+ :value: None
+
+
+
+ .. py:attribute:: supports_preview_restore
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: LlmCodeBlock(client, language=None, code=None, start=None, end=None, valid=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -33824,39 +37765,138 @@ Package Contents
-.. py:class:: LlmExecutionResult(client, status=None, error=None, execution={}, preview={})
+.. py:class:: LlmExecutionResult(client, status=None, error=None, execution={}, preview={})
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Results of executing queries using LLM.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param status: The status of the execution.
+ :type status: str
+ :param error: The error message if the execution failed.
+ :type error: str
+ :param execution: Information on execution of the query.
+ :type execution: ExecuteFeatureGroupOperation
+ :param preview: Preview of executing queries using LLM.
+ :type preview: LlmExecutionPreview
+
+
+ .. py:attribute:: status
+ :value: None
+
+
+
+ .. py:attribute:: error
+ :value: None
+
+
+
+ .. py:attribute:: execution
+
+
+ .. py:attribute:: preview
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: LlmGeneratedCode(client, sql=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Code generated by LLM.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param sql: SQL query generated by LLM.
+ :type sql: str
+
+
+ .. py:attribute:: sql
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: LlmInput(client, content=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
- Results of executing queries using LLM.
+ The result of encoding an object as input for a language model.
:param client: An authenticated API Client instance
:type client: ApiClient
- :param status: The status of the execution.
- :type status: str
- :param error: The error message if the execution failed.
- :type error: str
- :param execution: Information on execution of the query.
- :type execution: ExecuteFeatureGroupOperation
- :param preview: Preview of executing queries using LLM.
- :type preview: LlmExecutionPreview
+ :param content: Content of the response
+ :type content: str
- .. py:attribute:: status
+ .. py:attribute:: content
:value: None
- .. py:attribute:: error
- :value: None
+ .. py:attribute:: deprecated_keys
+ .. py:method:: __repr__()
- .. py:attribute:: execution
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: LlmParameters(client, parameters=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ The parameters of LLM for given inputs.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param parameters: The parameters of LLM for given inputs.
+ :type parameters: dict
+
+
+ .. py:attribute:: parameters
+ :value: None
- .. py:attribute:: preview
.. py:attribute:: deprecated_keys
@@ -33874,24 +37914,71 @@ Package Contents
-.. py:class:: LlmGeneratedCode(client, sql=None)
+.. py:class:: LlmResponse(client, content=None, tokens=None, stopReason=None, llmName=None, inputTokens=None, outputTokens=None, totalTokens=None, codeBlocks={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
- Code generated by LLM.
+ The response returned by LLM
:param client: An authenticated API Client instance
:type client: ApiClient
- :param sql: SQL query generated by LLM.
- :type sql: str
+ :param content: Full response from LLM.
+ :type content: str
+ :param tokens: The number of tokens in the response.
+ :type tokens: int
+ :param stopReason: The reason due to which the response generation stopped.
+ :type stopReason: str
+ :param llmName: The name of the LLM model used to generate the response.
+ :type llmName: str
+ :param inputTokens: The number of input tokens used in the LLM call.
+ :type inputTokens: int
+ :param outputTokens: The number of output tokens generated in the LLM response.
+ :type outputTokens: int
+ :param totalTokens: The total number of tokens (input + output) used in the LLM interaction.
+ :type totalTokens: int
+ :param codeBlocks: A list of parsed code blocks from raw LLM Response
+ :type codeBlocks: LlmCodeBlock
- .. py:attribute:: sql
+ .. py:attribute:: content
+ :value: None
+
+
+
+ .. py:attribute:: tokens
+ :value: None
+
+
+
+ .. py:attribute:: stop_reason
+ :value: None
+
+
+
+ .. py:attribute:: llm_name
+ :value: None
+
+
+
+ .. py:attribute:: input_tokens
+ :value: None
+
+
+
+ .. py:attribute:: output_tokens
+ :value: None
+
+
+
+ .. py:attribute:: total_tokens
:value: None
+ .. py:attribute:: code_blocks
+
+
.. py:attribute:: deprecated_keys
@@ -33907,20 +37994,20 @@ Package Contents
-.. py:class:: LlmInput(client, content=None)
+.. py:class:: McpConfig(client, mcpConfig=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
- The result of encoding an object as input for a language model.
+ Model Context Protocol Config
:param client: An authenticated API Client instance
:type client: ApiClient
- :param content: Content of the response
- :type content: str
+ :param mcpConfig: The MCP configuration for the current user
+ :type mcpConfig: dict
- .. py:attribute:: content
+ .. py:attribute:: mcp_config
:value: None
@@ -33940,20 +38027,69 @@ Package Contents
-.. py:class:: LlmParameters(client, parameters=None)
+.. py:class:: McpServer(client, name=None, description=None, envVars=None, config=None, envVarInstructions=None, url=None, isActive=None, metadata=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
- The parameters of LLM for given inputs.
+ Model Context Protocol Server
:param client: An authenticated API Client instance
:type client: ApiClient
- :param parameters: The parameters of LLM for given inputs.
- :type parameters: dict
+ :param name: The name of the MCP server.
+ :type name: str
+ :param description: description of what the MCP server does.
+ :type description: str
+ :param envVars: list of api_keys or credentials required by the MCP server.
+ :type envVars: list
+ :param config: a json string containing the command and arguments for the MCP server.
+ :type config: str
+ :param envVarInstructions: instructions for the user to get the environment variables.
+ :type envVarInstructions: str
+ :param url: The url of the MCP server github repository or webpage.
+ :type url: str
+ :param isActive: Whether the MCP server is active.
+ :type isActive: bool
+ :param metadata: additional information about the MCP server including github_stars, etc.
+ :type metadata: dict
- .. py:attribute:: parameters
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: description
+ :value: None
+
+
+
+ .. py:attribute:: env_vars
+ :value: None
+
+
+
+ .. py:attribute:: config
+ :value: None
+
+
+
+ .. py:attribute:: env_var_instructions
+ :value: None
+
+
+
+ .. py:attribute:: url
+ :value: None
+
+
+
+ .. py:attribute:: is_active
+ :value: None
+
+
+
+ .. py:attribute:: metadata
:value: None
@@ -33973,69 +38109,139 @@ Package Contents
-.. py:class:: LlmResponse(client, content=None, tokens=None, stopReason=None, llmName=None, inputTokens=None, outputTokens=None, totalTokens=None, codeBlocks={})
+.. py:class:: McpServerConnection(client, mcpServerConnectionId=None, createdAt=None, updatedAt=None, name=None, config=None, description=None, transport=None, authType=None, externalConnectionId=None, inactive=None, tools=None, errorMsg=None, metadata=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
- The response returned by LLM
+ Model Context Protocol Server Connection
:param client: An authenticated API Client instance
:type client: ApiClient
- :param content: Full response from LLM.
- :type content: str
- :param tokens: The number of tokens in the response.
- :type tokens: int
- :param stopReason: The reason due to which the response generation stopped.
- :type stopReason: str
- :param llmName: The name of the LLM model used to generate the response.
- :type llmName: str
- :param inputTokens: The number of input tokens used in the LLM call.
- :type inputTokens: int
- :param outputTokens: The number of output tokens generated in the LLM response.
- :type outputTokens: int
- :param totalTokens: The total number of tokens (input + output) used in the LLM interaction.
- :type totalTokens: int
- :param codeBlocks: A list of parsed code blocks from raw LLM Response
- :type codeBlocks: LlmCodeBlock
+ :param mcpServerConnectionId: the id of the MCP server connection.
+ :type mcpServerConnectionId: id
+ :param createdAt: the date and time the MCP server connection was created.
+ :type createdAt: str
+ :param updatedAt: the date and time the MCP server connection was updated.
+ :type updatedAt: str
+ :param name: The name of the MCP server.
+ :type name: str
+ :param config: a dictionary containing the config for the MCP server.
+ :type config: dict
+ :param description: description of what the MCP server does.
+ :type description: str
+ :param transport: the transport type for the MCP server.
+ :type transport: str
+ :param authType: the auth type for the MCP server.
+ :type authType: str
+ :param externalConnectionId: the external connection id for the MCP server.
+ :type externalConnectionId: id
+ :param inactive: whether the MCP server is inactive.
+ :type inactive: bool
+ :param tools: the tools for the MCP server.
+ :type tools: list
+ :param errorMsg: the error message for the MCP server.
+ :type errorMsg: str
+ :param metadata: the metadata for the MCP server.
+ :type metadata: dict
- .. py:attribute:: content
+ .. py:attribute:: mcp_server_connection_id
:value: None
- .. py:attribute:: tokens
+ .. py:attribute:: created_at
:value: None
- .. py:attribute:: stop_reason
+ .. py:attribute:: updated_at
:value: None
- .. py:attribute:: llm_name
+ .. py:attribute:: name
:value: None
- .. py:attribute:: input_tokens
+ .. py:attribute:: config
:value: None
- .. py:attribute:: output_tokens
+ .. py:attribute:: description
:value: None
- .. py:attribute:: total_tokens
+ .. py:attribute:: transport
:value: None
- .. py:attribute:: code_blocks
+ .. py:attribute:: auth_type
+ :value: None
+
+
+
+ .. py:attribute:: external_connection_id
+ :value: None
+
+
+
+ .. py:attribute:: inactive
+ :value: None
+
+
+
+ .. py:attribute:: tools
+ :value: None
+
+
+
+ .. py:attribute:: error_msg
+ :value: None
+
+
+
+ .. py:attribute:: metadata
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: McpServerQueryResult(client, output=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Result of a MCP server query
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param output: The execution logs as well as the final output of the MCP server query
+ :type output: dict
+
+
+ .. py:attribute:: output
+ :value: None
+
.. py:attribute:: deprecated_keys
@@ -34171,7 +38377,7 @@ Package Contents
-.. py:class:: Model(client, name=None, modelId=None, modelConfigType=None, modelPredictionConfig=None, createdAt=None, projectId=None, shared=None, sharedAt=None, trainFunctionName=None, predictFunctionName=None, predictManyFunctionName=None, initializeFunctionName=None, trainingInputTables=None, sourceCode=None, cpuSize=None, memory=None, trainingFeatureGroupIds=None, algorithmModelConfigs=None, trainingVectorStoreVersions=None, documentRetrievers=None, documentRetrieverIds=None, isPythonModel=None, defaultAlgorithm=None, customAlgorithmConfigs=None, restrictedAlgorithms=None, useGpu=None, notebookId=None, trainingRequired=None, location={}, refreshSchedules={}, codeSource={}, databaseConnector={}, dataLlmFeatureGroups={}, latestModelVersion={}, modelConfig={})
+.. py:class:: Model(client, name=None, modelId=None, modelConfigType=None, modelPredictionConfig=None, createdAt=None, projectId=None, trainFunctionName=None, predictFunctionName=None, predictManyFunctionName=None, initializeFunctionName=None, trainingInputTables=None, sourceCode=None, cpuSize=None, memory=None, trainingFeatureGroupIds=None, algorithmModelConfigs=None, trainingVectorStoreVersions=None, documentRetrievers=None, documentRetrieverIds=None, isPythonModel=None, defaultAlgorithm=None, customAlgorithmConfigs=None, restrictedAlgorithms=None, useGpu=None, notebookId=None, trainingRequired=None, location={}, refreshSchedules={}, codeSource={}, databaseConnector={}, dataLlmFeatureGroups={}, latestModelVersion={}, modelConfig={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -34192,10 +38398,6 @@ Package Contents
:type createdAt: str
:param projectId: The project this model belongs to.
:type projectId: str
- :param shared: If model is shared to the Abacus.AI model showcase.
- :type shared: bool
- :param sharedAt: The date and time at which the model was shared to the model showcase
- :type sharedAt: str
:param trainFunctionName: Name of the function found in the source code that will be executed to train the model. It is not executed when this function is run.
:type trainFunctionName: str
:param predictFunctionName: Name of the function found in the source code that will be executed run predictions through model. It is not executed when this function is run.
@@ -34282,16 +38484,6 @@ Package Contents
- .. py:attribute:: shared
- :value: None
-
-
-
- .. py:attribute:: shared_at
- :value: None
-
-
-
.. py:attribute:: train_function_name
:value: None
@@ -37588,7 +41780,7 @@ Package Contents
-.. py:class:: OrganizationExternalApplicationSettings(client, logo=None, theme=None, managedUserService=None, passwordsDisabled=None)
+.. py:class:: OrganizationExternalApplicationSettings(client, logo=None, theme=None, managedUserService=None, passwordsDisabled=None, externalServiceForRoles=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -37605,6 +41797,8 @@ Package Contents
:type managedUserService: str
:param passwordsDisabled: Whether or not passwords are disabled for this organization's domain.
:type passwordsDisabled: bool
+ :param externalServiceForRoles: The external service that is managing the user roles.
+ :type externalServiceForRoles: str
.. py:attribute:: logo
@@ -37627,6 +41821,11 @@ Package Contents
+ .. py:attribute:: external_service_for_roles
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
@@ -37835,7 +42034,7 @@ Package Contents
-.. py:class:: OrganizationSecret(client, secretKey=None, value=None, createdAt=None)
+.. py:class:: OrganizationSecret(client, secretKey=None, value=None, createdAt=None, metadata=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -37850,6 +42049,8 @@ Package Contents
:type value: str
:param createdAt: The date and time when the secret was created, in ISO-8601 format.
:type createdAt: str
+ :param metadata: Additional metadata for the secret (e.g., web_app_project_ids).
+ :type metadata: dict
.. py:attribute:: secret_key
@@ -37867,6 +42068,11 @@ Package Contents
+ .. py:attribute:: metadata
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
@@ -39926,7 +44132,7 @@ Package Contents
- .. py:method:: get_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None)
+ .. py:method:: get_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, user_info = None)
Return a chat response which continues the conversation based on the input messages and search results.
@@ -39982,7 +44188,7 @@ Package Contents
- .. py:method:: get_conversation_response(deployment_id, message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None)
+ .. py:method:: get_conversation_response(deployment_id, message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None, user_info = None, execute_usercode_tool = False)
Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).
@@ -40012,6 +44218,8 @@ Package Contents
:type chat_config: dict
:param doc_infos: An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
:type doc_infos: list
+ :param execute_usercode_tool: If True, will return the tool output in the response.
+ :type execute_usercode_tool: bool
@@ -40048,6 +44256,17 @@ Package Contents
+ .. py:method:: get_deep_agent_response(message, deployment_conversation_id = None)
+
+ Return a DeepAgent response with generated files if any, based on the input message.
+
+ :param message: The user's message/task for DeepAgent to complete
+ :type message: str
+ :param deployment_conversation_id: The unique identifier of a deployment conversation to continue. If not specified, a new one will be created.
+ :type deployment_conversation_id: str
+
+
+
.. py:method:: get_search_results(deployment_token, deployment_id, query_data, num = 15)
Return the most relevant search results to the search query from the uploaded documents.
@@ -40166,6 +44385,36 @@ Package Contents
+ .. py:method:: get_optimization_inputs_from_serialized(deployment_token, deployment_id, query_data = None)
+
+ Get assignments for given query, with new inputs
+
+ :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ :type deployment_token: str
+ :param deployment_id: The unique identifier of a deployment created under the project.
+ :type deployment_id: str
+ :param query_data: a dictionary with various key: value pairs corresponding to various updated FGs in the FG tree, which we want to update to compute new top level FGs for online solve. (query data will be dict of names: serialized dataframes)
+ :type query_data: dict
+
+
+
+ .. py:method:: get_assignments_online_with_new_serialized_inputs(deployment_token, deployment_id, query_data = None, solve_time_limit_seconds = None, optimality_gap_limit = None)
+
+ Get assignments for given query, with new inputs
+
+ :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ :type deployment_token: str
+ :param deployment_id: The unique identifier of a deployment created under the project.
+ :type deployment_id: str
+ :param query_data: a dictionary with assignment, constraint and constraint_equations_df (under these specific keys)
+ :type query_data: dict
+ :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
+ :type solve_time_limit_seconds: float
+ :param optimality_gap_limit: Optimality gap we want to come within, after which we accept the solution as valid. (0 means we only want an optimal solution). it is abs(best_solution_found - best_bound) / abs(best_solution_found)
+ :type optimality_gap_limit: float
+
+
+
.. py:method:: check_constraints(deployment_token, deployment_id, query_data)
Check for any constraints violated by the overrides.
@@ -40323,34 +44572,6 @@ Package Contents
- .. py:method:: generate_image(deployment_token, deployment_id, query_data)
-
- Generate an image from text prompt.
-
- :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
- :type deployment_token: str
- :param deployment_id: A unique identifier to a deployment created under the project.
- :type deployment_id: str
- :param query_data: Specifies the text prompt. For example, {'prompt': 'a cat'}
- :type query_data: dict
-
-
-
- .. py:method:: execute_agent(deployment_token, deployment_id, arguments = None, keyword_arguments = None)
-
- Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
-
- :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
- :type deployment_token: str
- :param deployment_id: A unique string identifier for the deployment created under the project.
- :type deployment_id: str
- :param arguments: Positional arguments to the agent execute function.
- :type arguments: list
- :param keyword_arguments: A dictionary where each 'key' represents the paramter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
- :type keyword_arguments: dict
-
-
-
.. py:method:: get_matrix_agent_schema(deployment_token, deployment_id, query, doc_infos = None, deployment_conversation_id = None, external_session_id = None)
Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
@@ -40370,31 +44591,6 @@ Package Contents
- .. py:method:: execute_conversation_agent(deployment_token, deployment_id, arguments = None, keyword_arguments = None, deployment_conversation_id = None, external_session_id = None, regenerate = False, doc_infos = None, agent_workflow_node_id = None)
-
- Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
-
- :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
- :type deployment_token: str
- :param deployment_id: A unique string identifier for the deployment created under the project.
- :type deployment_id: str
- :param arguments: Positional arguments to the agent execute function.
- :type arguments: list
- :param keyword_arguments: A dictionary where each 'key' represents the paramter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
- :type keyword_arguments: dict
- :param deployment_conversation_id: A unique string identifier for the deployment conversation used for the conversation.
- :type deployment_conversation_id: str
- :param external_session_id: A unique string identifier for the session used for the conversation. If both deployment_conversation_id and external_session_id are not provided, a new session will be created.
- :type external_session_id: str
- :param regenerate: If True, will regenerate the response from the last query.
- :type regenerate: bool
- :param doc_infos: An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
- :type doc_infos: list
- :param agent_workflow_node_id: An optional agent workflow node id to trigger agent execution from an intermediate node.
- :type agent_workflow_node_id: str
-
-
-
.. py:method:: lookup_matches(deployment_token, deployment_id, data = None, filters = None, num = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None, required_phrases = None, filter_clause = None, crowding_limits = None, include_text_search = False)
Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
@@ -41083,6 +45279,156 @@ Package Contents
+.. py:class:: PresentationExportResult(client, filePath=None, webViewLink=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Export Presentation
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param filePath: The path to the exported presentation
+ :type filePath: str
+ :param webViewLink: The web view link to the exported presentation (if applicable)
+ :type webViewLink: str
+
+
+ .. py:attribute:: file_path
+ :value: None
+
+
+
+ .. py:attribute:: web_view_link
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: PrivateWebAppDeployment(client, hostname=None, llmArtifactId=None, hostedAppVersion=None, applicationType=None, lifecycle=None, deploymentConversationId=None, conversationName=None, userId=None, email=None, conversationType=None, source=None, conversationCreatedAt=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Private web app deployment list.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param hostname: The hostname of the web app deployment.
+ :type hostname: str
+ :param llmArtifactId: The ID of the LLM artifact.
+ :type llmArtifactId: id
+ :param hostedAppVersion: The version of the hosted app.
+ :type hostedAppVersion: id
+ :param applicationType: The type of application.
+ :type applicationType: str
+ :param lifecycle: The lifecycle of the web app deployment.
+ :type lifecycle: str
+ :param deploymentConversationId: The ID of the deployment conversation.
+ :type deploymentConversationId: id
+ :param conversationName: The name of the conversation.
+ :type conversationName: str
+ :param userId: The ID of the user who created the deployment.
+ :type userId: id
+ :param email: The email of the user who created the deployment.
+ :type email: str
+ :param conversationType: The type of conversation.
+ :type conversationType: str
+ :param source: The source of the conversation.
+ :type source: str
+ :param conversationCreatedAt: The timestamp when the conversation was created.
+ :type conversationCreatedAt: str
+
+
+ .. py:attribute:: hostname
+ :value: None
+
+
+
+ .. py:attribute:: llm_artifact_id
+ :value: None
+
+
+
+ .. py:attribute:: hosted_app_version
+ :value: None
+
+
+
+ .. py:attribute:: application_type
+ :value: None
+
+
+
+ .. py:attribute:: lifecycle
+ :value: None
+
+
+
+ .. py:attribute:: deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: conversation_name
+ :value: None
+
+
+
+ .. py:attribute:: user_id
+ :value: None
+
+
+
+ .. py:attribute:: email
+ :value: None
+
+
+
+ .. py:attribute:: conversation_type
+ :value: None
+
+
+
+ .. py:attribute:: source
+ :value: None
+
+
+
+ .. py:attribute:: conversation_created_at
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: ProblemType(client, problemType=None, requiredFeatureGroupType=None, optionalFeatureGroupTypes=None, useCasesSupportCustomAlgorithm=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -41311,6 +45657,17 @@ Package Contents
+ .. py:method:: add_scope_for_user(email, scope)
+
+ Add a user to a project.
+
+ :param email: The user's email.
+ :type email: str
+ :param scope: The list of project scopes.
+ :type scope: list
+
+
+
.. py:method:: describe_feature_group(feature_group_id)
Describe a feature group associated with a project
@@ -41532,12 +45889,12 @@ Package Contents
- .. py:method:: list_model_monitors()
+ .. py:method:: list_model_monitors(limit = None)
Retrieves the list of model monitors in the specified project.
- :param project_id: Unique string identifier associated with the project.
- :type project_id: str
+ :param limit: Maximum number of model monitors to return. We'll have internal limit if not set.
+ :type limit: int
:returns: A list of model monitors.
:rtype: list[ModelMonitor]
@@ -41779,12 +46136,12 @@ Package Contents
- .. py:method:: list_batch_predictions()
+ .. py:method:: list_batch_predictions(limit = None)
Retrieves a list of batch predictions in the project.
- :param project_id: Unique string identifier of the project.
- :type project_id: str
+ :param limit: Maximum number of batch predictions to return. We'll have internal limit if not set.
+ :type limit: int
:returns: List of batch prediction jobs.
:rtype: list[BatchPrediction]
@@ -42493,7 +46850,7 @@ Package Contents
-.. py:class:: PythonFunction(client, notebookId=None, name=None, createdAt=None, functionVariableMappings=None, outputVariableMappings=None, functionName=None, pythonFunctionId=None, functionType=None, packageRequirements=None, description=None, examples=None, codeSource={})
+.. py:class:: PythonFunction(client, notebookId=None, name=None, createdAt=None, functionVariableMappings=None, outputVariableMappings=None, functionName=None, pythonFunctionId=None, functionType=None, packageRequirements=None, description=None, examples=None, connectors=None, configurations=None, codeSource={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -42524,6 +46881,10 @@ Package Contents
:type description: str
:param examples: Dictionary containing example use cases and anti-patterns. Includes 'positive' examples showing recommended usage and 'negative' examples showing cases to avoid.
:type examples: dict[str, list[str]]
+ :param connectors: Dictionary containing user-level and organization-level connectors
+ :type connectors: dict
+ :param configurations: Dictionary containing configurations for the Python function
+ :type configurations: dict
:param codeSource: Information about the source code of the Python function.
:type codeSource: CodeSource
@@ -42583,6 +46944,16 @@ Package Contents
+ .. py:attribute:: connectors
+ :value: None
+
+
+
+ .. py:attribute:: configurations
+ :value: None
+
+
+
.. py:attribute:: code_source
@@ -43363,19 +47734,68 @@ Package Contents
-.. py:class:: RegenerateLlmExternalApplication(client, name=None, externalApplicationId=None)
+.. py:class:: RegenerateLlmExternalApplication(client, name=None, llmBotIcon=None, llmName=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ An external application that specifies an LLM user can regenerate with in RouteLLM.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param name: The display name of the LLM (ie GPT-5)
+ :type name: str
+ :param llmBotIcon: The bot icon of the LLM.
+ :type llmBotIcon: str
+ :param llmName: The external name of the LLM (ie OPENAI_GPT5)
+ :type llmName: str
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: llm_bot_icon
+ :value: None
+
+
+
+ .. py:attribute:: llm_name
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: RegenerateLlmOption(client, name=None, llmBotIcon=None, llmName=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
- An external application that specifies an LLM user can regenerate with in RouteLLM.
+ An option that specifies an LLM user can regenerate with in RouteLLM.
:param client: An authenticated API Client instance
:type client: ApiClient
- :param name: The external name of the LLM.
+ :param name: The display name of the LLM (ie GPT-5)
:type name: str
- :param externalApplicationId: The unique identifier of the external application.
- :type externalApplicationId: str
+ :param llmBotIcon: The bot icon of the LLM.
+ :type llmBotIcon: str
+ :param llmName: The external name of the LLM (ie OPENAI_GPT5)
+ :type llmName: str
.. py:attribute:: name
@@ -43383,7 +47803,12 @@ Package Contents
- .. py:attribute:: external_application_id
+ .. py:attribute:: llm_bot_icon
+ :value: None
+
+
+
+ .. py:attribute:: llm_name
:value: None
@@ -43564,7 +47989,7 @@ Package Contents
:type client: ApiClient
:param name: The unique name of the feature.
:type name: str
- :param featureMapping: The mapping of the feature. The possible values will be based on the project's use-case. See the (Use Case Documentation)[https://api.abacus.ai/app/help/useCases] for more details.
+ :param featureMapping: The mapping of the feature. The possible values will be based on the project's use-case. See the (Use Case Documentation)[https://api.abacus.ai/app/help/developer-platform/useCases] for more details.
:type featureMapping: str
:param detectedFeatureMapping: Detected feature mapping for this feature
:type detectedFeatureMapping: str
@@ -43638,6 +48063,112 @@ Package Contents
+.. py:class:: SessionSummary(client, summary=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A session summary
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param summary: The summary of the session.
+ :type summary: str
+
+
+ .. py:attribute:: summary
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: SessionTranscript(client, role=None, content=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A session transcript
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param role: The role of the transcript.
+ :type role: str
+ :param content: The content of the transcript.
+ :type content: str
+
+
+ .. py:attribute:: role
+ :value: None
+
+
+
+ .. py:attribute:: content
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: SessionTranscripts(client, transcripts=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A list of session transcripts
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param transcripts: A list of session transcripts.
+ :type transcripts: list[sessiontranscript]
+
+
+ .. py:attribute:: transcripts
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: SftpKey(client, keyName=None, publicKey=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -43984,6 +48515,197 @@ Package Contents
+.. py:class:: StsGenSettings(client, model=None, settings=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ STS generation settings
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param model: The model settings.
+ :type model: dict
+ :param settings: The settings for each model.
+ :type settings: dict
+
+
+ .. py:attribute:: model
+ :value: None
+
+
+
+ .. py:attribute:: settings
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: SttGenModel(client, displayName=None, type=None, valueType=None, optional=None, default=None, helptext=None, options={})
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ STT generation model
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param displayName: The display name for the UI component.
+ :type displayName: str
+ :param type: The type of the UI component.
+ :type type: str
+ :param valueType: The data type of the values within the UI component.
+ :type valueType: str
+ :param optional: Whether the selection of a value is optional.
+ :type optional: bool
+ :param default: The default value for the STT generation model.
+ :type default: str
+ :param helptext: The helptext for the UI component.
+ :type helptext: str
+ :param options: The options of models available for STT generation.
+ :type options: SttGenModelOptions
+
+
+ .. py:attribute:: display_name
+ :value: None
+
+
+
+ .. py:attribute:: type
+ :value: None
+
+
+
+ .. py:attribute:: value_type
+ :value: None
+
+
+
+ .. py:attribute:: optional
+ :value: None
+
+
+
+ .. py:attribute:: default
+ :value: None
+
+
+
+ .. py:attribute:: helptext
+ :value: None
+
+
+
+ .. py:attribute:: options
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: SttGenModelOptions(client, keys=None, values=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ STT generation model options
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param keys: The keys of the image generation model options represented as the enum values.
+ :type keys: list
+ :param values: The display names of the image generation model options.
+ :type values: list
+
+
+ .. py:attribute:: keys
+ :value: None
+
+
+
+ .. py:attribute:: values
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: SttGenSettings(client, settings=None, model={})
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ STT generation settings
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param settings: The settings for each model.
+ :type settings: dict
+ :param model: Dropdown for models available for STT generation.
+ :type model: SttGenModel
+
+
+ .. py:attribute:: settings
+ :value: None
+
+
+
+ .. py:attribute:: model
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: TemplateNodeDetails(client, notebookCode=None, workflowGraphNode={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -44275,6 +48997,46 @@ Package Contents
+.. py:class:: TtsGenSettings(client, model=None, settings=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ TTS generation settings
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param model: The model settings.
+ :type model: dict
+ :param settings: The settings for each model.
+ :type settings: dict
+
+
+ .. py:attribute:: model
+ :value: None
+
+
+
+ .. py:attribute:: settings
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: TwitterSearchResult(client, title=None, url=None, twitterName=None, twitterHandle=None, thumbnailUrl=None, thumbnailWidth=None, thumbnailHeight=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -44350,6 +49112,88 @@ Package Contents
+.. py:class:: UnifiedConnector(client, applicationConnectorId=None, databaseConnectorId=None, service=None, name=None, createdAt=None, status=None, auth=None, isUserLevel=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Lightweight unified connector filter that skips expensive auth transformations.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param applicationConnectorId: The unique ID for the connection.
+ :type applicationConnectorId: str
+ :param databaseConnectorId: The unique ID for the connection.
+ :type databaseConnectorId: str
+ :param service: The service this connection connects to
+ :type service: str
+ :param name: A user-friendly name for the service
+ :type name: str
+ :param createdAt: When the API key was created
+ :type createdAt: str
+ :param status: The status of the Application Connector
+ :type status: str
+ :param auth: Non-secret connection information for this connector
+ :type auth: dict
+ :param isUserLevel: Whether this is a user-level connector
+ :type isUserLevel: bool
+
+
+ .. py:attribute:: application_connector_id
+ :value: None
+
+
+
+ .. py:attribute:: database_connector_id
+ :value: None
+
+
+
+ .. py:attribute:: service
+ :value: None
+
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: status
+ :value: None
+
+
+
+ .. py:attribute:: auth
+ :value: None
+
+
+
+ .. py:attribute:: is_user_level
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: Upload(client, uploadId=None, datasetUploadId=None, status=None, datasetId=None, datasetVersion=None, modelId=None, modelVersion=None, batchPredictionId=None, parts=None, createdAt=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -44511,7 +49355,7 @@ Package Contents
- .. py:method:: upload_file(file, threads=10, chunksize=1024 * 1024 * 10, wait_timeout=600)
+ .. py:method:: upload_file(file, threads=10, chunksize=None, wait_timeout=600)
Uploads the file in the specified chunk size using the specified number of workers.
@@ -44519,7 +49363,7 @@ Package Contents
:type file: IOBase
:param threads: The max number of workers to use while uploading the file
:type threads: int
- :param chunksize: The number of bytes to use for each chunk while uploading the file. Defaults to 10 MB
+ :param chunksize: The number of bytes to use for each chunk while uploading the file. If None, dynamically calculated based on file size (defaults to 20 MB.)
:type chunksize: int
:param wait_timeout: The max number of seconds to wait for the file parts to be joined on Abacus.AI. Defaults to 600.
:type wait_timeout: int
@@ -44529,6 +49373,9 @@ Package Contents
+ .. py:method:: _calculate_chunk_size(file_size)
+
+
.. py:method:: _yield_upload_part(file, chunksize)
@@ -44719,48 +49566,354 @@ Package Contents
-.. py:class:: User(client, name=None, email=None, createdAt=None, status=None, organizationGroups={})
+.. py:class:: User(client, userId=None, name=None, email=None, createdAt=None, status=None, organizationGroups={})
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ An Abacus.AI User with a unique user_id.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param userId: The unique identifier of the user.
+ :type userId: id
+ :param name: The User's name.
+ :type name: str
+ :param email: The User's primary email address.
+ :type email: str
+ :param createdAt: The date and time when the user joined Abacus.AI.
+ :type createdAt: str
+ :param status: `ACTIVE` when the user has accepted an invite to join the organization, else `INVITED`.
+ :type status: str
+ :param organizationGroups: List of Organization Groups this user belongs to.
+ :type organizationGroups: OrganizationGroup
+
+
+ .. py:attribute:: user_id
+ :value: None
+
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: email
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: status
+ :value: None
+
+
+
+ .. py:attribute:: organization_groups
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: UserException(client, type=None, value=None, traceback=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Exception information for errors in usercode.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param type: The type of exception
+ :type type: str
+ :param value: The value of the exception
+ :type value: str
+ :param traceback: The traceback of the exception
+ :type traceback: str
+
+
+ .. py:attribute:: type
+ :value: None
+
+
+
+ .. py:attribute:: value
+ :value: None
+
+
+
+ .. py:attribute:: traceback
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: UserGroupObjectPermission(client, userGroupId=None, userGroupName=None, objectId=None, permission=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A user group object permission
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param userGroupId: The unique identifier of the user group.
+ :type userGroupId: str
+ :param userGroupName: The name of the user group.
+ :type userGroupName: str
+ :param objectId: The unique identifier of the object.
+ :type objectId: str
+ :param permission: The permission level (e.g., 'ALL').
+ :type permission: str
+
+
+ .. py:attribute:: user_group_id
+ :value: None
+
+
+
+ .. py:attribute:: user_group_name
+ :value: None
+
+
+
+ .. py:attribute:: object_id
+ :value: None
+
+
+
+ .. py:attribute:: permission
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: VideoGenCosts(client, modelCosts=None, expensiveModels=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ The most expensive price for each video gen model in credits
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param modelCosts: The costs of the video gen models in credits
+ :type modelCosts: dict
+ :param expensiveModels: The list of video gen models that are expensive
+ :type expensiveModels: list
+
+
+ .. py:attribute:: model_costs
+ :value: None
+
+
+
+ .. py:attribute:: expensive_models
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: VideoGenModel(client, displayName=None, type=None, valueType=None, optional=None, default=None, helptext=None, options={})
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Video generation model
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param displayName: The display name for the UI component.
+ :type displayName: str
+ :param type: The type of the UI component.
+ :type type: str
+ :param valueType: The data type of the values within the UI component.
+ :type valueType: str
+ :param optional: Whether the selection of a value is optional.
+ :type optional: bool
+ :param default: The default value for the video generation model.
+ :type default: str
+ :param helptext: The helptext for the UI component.
+ :type helptext: str
+ :param options: The options of models available for video generation.
+ :type options: VideoGenModelOptions
+
+
+ .. py:attribute:: display_name
+ :value: None
+
+
+
+ .. py:attribute:: type
+ :value: None
+
+
+
+ .. py:attribute:: value_type
+ :value: None
+
+
+
+ .. py:attribute:: optional
+ :value: None
+
+
+
+ .. py:attribute:: default
+ :value: None
+
+
+
+ .. py:attribute:: helptext
+ :value: None
+
+
+
+ .. py:attribute:: options
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: VideoGenModelOptions(client, keys=None, values=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
- An Abacus.AI User
+ Video generation model options
:param client: An authenticated API Client instance
:type client: ApiClient
- :param name: The User's name.
- :type name: str
- :param email: The User's primary email address.
- :type email: str
- :param createdAt: The date and time when the user joined Abacus.AI.
- :type createdAt: str
- :param status: `ACTIVE` when the user has accepted an invite to join the organization, else `INVITED`.
- :type status: str
- :param organizationGroups: List of Organization Groups this user belongs to.
- :type organizationGroups: OrganizationGroup
+ :param keys: The keys of the video generation model options represented as the enum values.
+ :type keys: list
+ :param values: The display names of the video generation model options.
+ :type values: list
- .. py:attribute:: name
+ .. py:attribute:: keys
:value: None
- .. py:attribute:: email
+ .. py:attribute:: values
:value: None
- .. py:attribute:: created_at
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: VideoGenSettings(client, settings=None, warnings=None, model={})
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Video generation settings
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param settings: The settings for each model.
+ :type settings: dict
+ :param warnings: The warnings for each model.
+ :type warnings: dict
+ :param model: Dropdown for models available for video generation.
+ :type model: VideoGenModel
+
+
+ .. py:attribute:: settings
:value: None
- .. py:attribute:: status
+ .. py:attribute:: warnings
:value: None
- .. py:attribute:: organization_groups
+ .. py:attribute:: model
.. py:attribute:: deprecated_keys
@@ -44778,34 +49931,48 @@ Package Contents
-.. py:class:: UserException(client, type=None, value=None, traceback=None)
+.. py:class:: VideoSearchResult(client, title=None, url=None, thumbnailUrl=None, motionThumbnailUrl=None, embedUrl=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
- Exception information for errors in usercode.
+ A single video search result.
:param client: An authenticated API Client instance
:type client: ApiClient
- :param type: The type of exception
- :type type: str
- :param value: The value of the exception
- :type value: str
- :param traceback: The traceback of the exception
- :type traceback: str
+ :param title: The title of the video.
+ :type title: str
+ :param url: The URL of the video.
+ :type url: str
+ :param thumbnailUrl: The URL of the thumbnail of the video.
+ :type thumbnailUrl: str
+ :param motionThumbnailUrl: The URL of the motion thumbnail of the video.
+ :type motionThumbnailUrl: str
+ :param embedUrl: The URL of the embed of the video.
+ :type embedUrl: str
- .. py:attribute:: type
+ .. py:attribute:: title
:value: None
- .. py:attribute:: value
+ .. py:attribute:: url
:value: None
- .. py:attribute:: traceback
+ .. py:attribute:: thumbnail_url
+ :value: None
+
+
+
+ .. py:attribute:: motion_thumbnail_url
+ :value: None
+
+
+
+ .. py:attribute:: embed_url
:value: None
@@ -44825,19 +49992,19 @@ Package Contents
-.. py:class:: VideoGenSettings(client, model=None, settings=None)
+.. py:class:: VoiceGenDetails(client, model=None, voice=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
- Video generation settings
+ Voice generation details
:param client: An authenticated API Client instance
:type client: ApiClient
- :param model: The model settings.
- :type model: dict
- :param settings: The settings for each model.
- :type settings: dict
+ :param model: The model used for voice generation.
+ :type model: str
+ :param voice: The voice details.
+ :type voice: dict
.. py:attribute:: model
@@ -44845,7 +50012,7 @@ Package Contents
- .. py:attribute:: settings
+ .. py:attribute:: voice
:value: None
@@ -44865,48 +50032,48 @@ Package Contents
-.. py:class:: VideoSearchResult(client, title=None, url=None, thumbnailUrl=None, motionThumbnailUrl=None, embedUrl=None)
+.. py:class:: WebAppConversation(client, deploymentConversationId=None, llmArtifactId=None, deploymentConversationName=None, externalApplicationId=None, createdAt=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
- A single video search result.
+ Web App Conversation
:param client: An authenticated API Client instance
:type client: ApiClient
- :param title: The title of the video.
- :type title: str
- :param url: The URL of the video.
- :type url: str
- :param thumbnailUrl: The URL of the thumbnail of the video.
- :type thumbnailUrl: str
- :param motionThumbnailUrl: The URL of the motion thumbnail of the video.
- :type motionThumbnailUrl: str
- :param embedUrl: The URL of the embed of the video.
- :type embedUrl: str
+ :param deploymentConversationId: The ID of the deployment conversation
+ :type deploymentConversationId: id
+ :param llmArtifactId: The ID of the LLM artifact
+ :type llmArtifactId: id
+ :param deploymentConversationName: The name of the conversation
+ :type deploymentConversationName: str
+ :param externalApplicationId: The external application ID
+ :type externalApplicationId: str
+ :param createdAt: The creation timestamp
+ :type createdAt: str
- .. py:attribute:: title
+ .. py:attribute:: deployment_conversation_id
:value: None
- .. py:attribute:: url
+ .. py:attribute:: llm_artifact_id
:value: None
- .. py:attribute:: thumbnail_url
+ .. py:attribute:: deployment_conversation_name
:value: None
- .. py:attribute:: motion_thumbnail_url
+ .. py:attribute:: external_application_id
:value: None
- .. py:attribute:: embed_url
+ .. py:attribute:: created_at
:value: None
@@ -44926,27 +50093,109 @@ Package Contents
-.. py:class:: VoiceGenDetails(client, model=None, voice=None)
+.. py:class:: WebAppDeploymentPermissionDict(client, deploymentPermissions=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
- Voice generation details
+ Web app deployment permission dict.
:param client: An authenticated API Client instance
:type client: ApiClient
- :param model: The model used for voice generation.
- :type model: str
- :param voice: The voice details.
- :type voice: dict
+ :param deploymentPermissions: Dictionary containing deployment ID as key and list of tuples containing (user_group_id, permission) as value.
+ :type deploymentPermissions: dict
- .. py:attribute:: model
+ .. py:attribute:: deployment_permissions
:value: None
- .. py:attribute:: voice
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
+.. py:class:: WebAppDomain(client, webAppDomainId=None, hostname=None, domainType=None, lifecycle=None, nameservers=None, dnsRecords=None, metadata=None, isRootDomain=None, isDeployed=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Web App Domain
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param webAppDomainId: The ID of the web app domain
+ :type webAppDomainId: id
+ :param hostname: The hostname of the web app domain
+ :type hostname: str
+ :param domainType: The type of the web app domain
+ :type domainType: str
+ :param lifecycle: The lifecycle of the web app domain
+ :type lifecycle: str
+ :param nameservers: The nameservers of the web app domain
+ :type nameservers: list
+ :param dnsRecords: The DNS records of the web app domain
+ :type dnsRecords: list
+ :param metadata: The metadata of the web app domain
+ :type metadata: dict
+ :param isRootDomain: Whether the web app domain is a root domain
+ :type isRootDomain: bool
+ :param isDeployed: Whether the web app domain is deployed
+ :type isDeployed: bool
+
+
+ .. py:attribute:: web_app_domain_id
+ :value: None
+
+
+
+ .. py:attribute:: hostname
+ :value: None
+
+
+
+ .. py:attribute:: domain_type
+ :value: None
+
+
+
+ .. py:attribute:: lifecycle
+ :value: None
+
+
+
+ .. py:attribute:: nameservers
+ :value: None
+
+
+
+ .. py:attribute:: dns_records
+ :value: None
+
+
+
+ .. py:attribute:: metadata
+ :value: None
+
+
+
+ .. py:attribute:: is_root_domain
+ :value: None
+
+
+
+ .. py:attribute:: is_deployed
:value: None
@@ -45105,6 +50354,95 @@ Package Contents
+.. py:class:: WebServiceTriggerRun(client, endpoint=None, createdAt=None, payload=None, headers=None, method=None, responseStatus=None, responseBody=None, error=None, lifecycle=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A web service trigger run
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param endpoint: The endpoint of the web service trigger run.
+ :type endpoint: str
+ :param createdAt: The creation time of the web service trigger run.
+ :type createdAt: str
+ :param payload: The payload of the web service trigger run.
+ :type payload: dict
+ :param headers: The headers of the web service trigger run.
+ :type headers: dict
+ :param method: The method of the web service trigger run.
+ :type method: str
+ :param responseStatus: The HTTP response status code.
+ :type responseStatus: int
+ :param responseBody: The HTTP response body.
+ :type responseBody: str
+ :param error: Error message if the request failed.
+ :type error: str
+ :param lifecycle: The lifecycle status of the run.
+ :type lifecycle: str
+
+
+ .. py:attribute:: endpoint
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: payload
+ :value: None
+
+
+
+ .. py:attribute:: headers
+ :value: None
+
+
+
+ .. py:attribute:: method
+ :value: None
+
+
+
+ .. py:attribute:: response_status
+ :value: None
+
+
+
+ .. py:attribute:: response_body
+ :value: None
+
+
+
+ .. py:attribute:: error
+ :value: None
+
+
+
+ .. py:attribute:: lifecycle
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
.. py:class:: Webhook(client, webhookId=None, deploymentId=None, endpoint=None, webhookEventType=None, payloadTemplate=None, createdAt=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -45216,7 +50554,7 @@ Package Contents
-.. py:class:: WorkflowGraphNodeDetails(client, packageRequirements=None, workflowGraphNode={})
+.. py:class:: WorkflowGraphNodeDetails(client, packageRequirements=None, connectors=None, workflowGraphNode={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -45227,6 +50565,8 @@ Package Contents
:type client: ApiClient
:param packageRequirements: A list of package requirements that the node source code will need.
:type packageRequirements: list[str]
+ :param connectors: A dictionary of connectors that the node source code will need.
+ :type connectors: dict
:param workflowGraphNode: The workflow graph node object.
:type workflowGraphNode: WorkflowGraphNode
@@ -45236,6 +50576,11 @@ Package Contents
+ .. py:attribute:: connectors
+ :value: None
+
+
+
.. py:attribute:: workflow_graph_node
@@ -45352,6 +50697,6 @@ Package Contents
.. py:data:: __version__
- :value: '1.4.31'
+ :value: '1.4.76'
diff --git a/docs/_sources/autoapi/abacusai/lip_sync_gen_settings/index.rst.txt b/docs/_sources/autoapi/abacusai/lip_sync_gen_settings/index.rst.txt
new file mode 100644
index 000000000..47ece6541
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/lip_sync_gen_settings/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.lip_sync_gen_settings
+==============================
+
+.. py:module:: abacusai.lip_sync_gen_settings
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.lip_sync_gen_settings.LipSyncGenSettings
+
+
+Module Contents
+---------------
+
+.. py:class:: LipSyncGenSettings(client, model=None, settings=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Lip sync generation settings
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param model: The model settings.
+ :type model: dict
+ :param settings: The settings for each model.
+ :type settings: dict
+
+
+ .. py:attribute:: model
+ :value: None
+
+
+
+ .. py:attribute:: settings
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/llm_artifact/index.rst.txt b/docs/_sources/autoapi/abacusai/llm_artifact/index.rst.txt
new file mode 100644
index 000000000..a4409f358
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/llm_artifact/index.rst.txt
@@ -0,0 +1,141 @@
+abacusai.llm_artifact
+=====================
+
+.. py:module:: abacusai.llm_artifact
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.llm_artifact.LlmArtifact
+
+
+Module Contents
+---------------
+
+.. py:class:: LlmArtifact(client, llmArtifactId=None, info=None, description=None, createdAt=None, webAppDeploymentId=None, deploymentStatus=None, isLatest=None, deploymentConversationId=None, webAppProjectId=None, hasDatabase=None, hasStorage=None, projectMetadata=None, hasDatabaseSnapshot=None, supportsPreviewRestore=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ LLM Artifact
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param llmArtifactId: The ID of the LLM artifact
+ :type llmArtifactId: id
+ :param info: The info of the LLM artifact
+ :type info: dict
+ :param description: The description of the LLM artifact
+ :type description: str
+ :param createdAt: The creation timestamp
+ :type createdAt: str
+ :param webAppDeploymentId: The ID of the associated web app deployment
+ :type webAppDeploymentId: id
+ :param deploymentStatus: The status of the associated web app deployment
+ :type deploymentStatus: str
+ :param isLatest: Whether it is the most recent version of the artifact
+ :type isLatest: bool
+ :param deploymentConversationId: The ID of the deployment conversation
+ :type deploymentConversationId: id
+ :param webAppProjectId: The ID of the web app project
+ :type webAppProjectId: id
+ :param hasDatabase: Whether the app associated with the artifact has a database
+ :type hasDatabase: bool
+ :param hasStorage: Whether the app associated with the artifact has a storage
+ :type hasStorage: bool
+ :param projectMetadata: The metadata of the web app project
+ :type projectMetadata: dict
+ :param hasDatabaseSnapshot: Whether the artifact database has a snapshot
+ :type hasDatabaseSnapshot: bool
+ :param supportsPreviewRestore: Whether the artifact supports preview restore
+ :type supportsPreviewRestore: bool
+
+
+ .. py:attribute:: llm_artifact_id
+ :value: None
+
+
+
+ .. py:attribute:: info
+ :value: None
+
+
+
+ .. py:attribute:: description
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: web_app_deployment_id
+ :value: None
+
+
+
+ .. py:attribute:: deployment_status
+ :value: None
+
+
+
+ .. py:attribute:: is_latest
+ :value: None
+
+
+
+ .. py:attribute:: deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: web_app_project_id
+ :value: None
+
+
+
+ .. py:attribute:: has_database
+ :value: None
+
+
+
+ .. py:attribute:: has_storage
+ :value: None
+
+
+
+ .. py:attribute:: project_metadata
+ :value: None
+
+
+
+ .. py:attribute:: has_database_snapshot
+ :value: None
+
+
+
+ .. py:attribute:: supports_preview_restore
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/llm_artifact_conversation/index.rst.txt b/docs/_sources/autoapi/abacusai/llm_artifact_conversation/index.rst.txt
new file mode 100644
index 000000000..07795732b
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/llm_artifact_conversation/index.rst.txt
@@ -0,0 +1,78 @@
+abacusai.llm_artifact_conversation
+==================================
+
+.. py:module:: abacusai.llm_artifact_conversation
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.llm_artifact_conversation.LlmArtifactConversation
+
+
+Module Contents
+---------------
+
+.. py:class:: LlmArtifactConversation(client, deploymentConversationId=None, llmArtifactId=None, deploymentConversationName=None, externalApplicationId=None, createdAt=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ LLM Artifact Conversation
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param deploymentConversationId: The ID of the deployment conversation
+ :type deploymentConversationId: id
+ :param llmArtifactId: The ID of the LLM artifact
+ :type llmArtifactId: id
+ :param deploymentConversationName: The name of the conversation
+ :type deploymentConversationName: str
+ :param externalApplicationId: The external application ID
+ :type externalApplicationId: str
+ :param createdAt: The creation timestamp
+ :type createdAt: str
+
+
+ .. py:attribute:: deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: llm_artifact_id
+ :value: None
+
+
+
+ .. py:attribute:: deployment_conversation_name
+ :value: None
+
+
+
+ .. py:attribute:: external_application_id
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/mcp_config/index.rst.txt b/docs/_sources/autoapi/abacusai/mcp_config/index.rst.txt
new file mode 100644
index 000000000..ce77b2e29
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/mcp_config/index.rst.txt
@@ -0,0 +1,50 @@
+abacusai.mcp_config
+===================
+
+.. py:module:: abacusai.mcp_config
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.mcp_config.McpConfig
+
+
+Module Contents
+---------------
+
+.. py:class:: McpConfig(client, mcpConfig=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Model Context Protocol Config
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param mcpConfig: The MCP configuration for the current user
+ :type mcpConfig: dict
+
+
+ .. py:attribute:: mcp_config
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/mcp_server/index.rst.txt b/docs/_sources/autoapi/abacusai/mcp_server/index.rst.txt
new file mode 100644
index 000000000..81099712c
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/mcp_server/index.rst.txt
@@ -0,0 +1,99 @@
+abacusai.mcp_server
+===================
+
+.. py:module:: abacusai.mcp_server
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.mcp_server.McpServer
+
+
+Module Contents
+---------------
+
+.. py:class:: McpServer(client, name=None, description=None, envVars=None, config=None, envVarInstructions=None, url=None, isActive=None, metadata=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Model Context Protocol Server
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param name: The name of the MCP server.
+ :type name: str
+ :param description: description of what the MCP server does.
+ :type description: str
+ :param envVars: list of api_keys or credentials required by the MCP server.
+ :type envVars: list
+ :param config: a json string containing the command and arguments for the MCP server.
+ :type config: str
+ :param envVarInstructions: instructions for the user to get the environment variables.
+ :type envVarInstructions: str
+ :param url: The url of the MCP server github repository or webpage.
+ :type url: str
+ :param isActive: Whether the MCP server is active.
+ :type isActive: bool
+ :param metadata: additional information about the MCP server including github_stars, etc.
+ :type metadata: dict
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: description
+ :value: None
+
+
+
+ .. py:attribute:: env_vars
+ :value: None
+
+
+
+ .. py:attribute:: config
+ :value: None
+
+
+
+ .. py:attribute:: env_var_instructions
+ :value: None
+
+
+
+ .. py:attribute:: url
+ :value: None
+
+
+
+ .. py:attribute:: is_active
+ :value: None
+
+
+
+ .. py:attribute:: metadata
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/mcp_server_connection/index.rst.txt b/docs/_sources/autoapi/abacusai/mcp_server_connection/index.rst.txt
new file mode 100644
index 000000000..00e2c5377
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/mcp_server_connection/index.rst.txt
@@ -0,0 +1,134 @@
+abacusai.mcp_server_connection
+==============================
+
+.. py:module:: abacusai.mcp_server_connection
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.mcp_server_connection.McpServerConnection
+
+
+Module Contents
+---------------
+
+.. py:class:: McpServerConnection(client, mcpServerConnectionId=None, createdAt=None, updatedAt=None, name=None, config=None, description=None, transport=None, authType=None, externalConnectionId=None, inactive=None, tools=None, errorMsg=None, metadata=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Model Context Protocol Server Connection
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param mcpServerConnectionId: the id of the MCP server connection.
+ :type mcpServerConnectionId: id
+ :param createdAt: the date and time the MCP server connection was created.
+ :type createdAt: str
+ :param updatedAt: the date and time the MCP server connection was updated.
+ :type updatedAt: str
+ :param name: The name of the MCP server.
+ :type name: str
+ :param config: a dictionary containing the config for the MCP server.
+ :type config: dict
+ :param description: description of what the MCP server does.
+ :type description: str
+ :param transport: the transport type for the MCP server.
+ :type transport: str
+ :param authType: the auth type for the MCP server.
+ :type authType: str
+ :param externalConnectionId: the external connection id for the MCP server.
+ :type externalConnectionId: id
+ :param inactive: whether the MCP server is inactive.
+ :type inactive: bool
+ :param tools: the tools for the MCP server.
+ :type tools: list
+ :param errorMsg: the error message for the MCP server.
+ :type errorMsg: str
+ :param metadata: the metadata for the MCP server.
+ :type metadata: dict
+
+
+ .. py:attribute:: mcp_server_connection_id
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: updated_at
+ :value: None
+
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: config
+ :value: None
+
+
+
+ .. py:attribute:: description
+ :value: None
+
+
+
+ .. py:attribute:: transport
+ :value: None
+
+
+
+ .. py:attribute:: auth_type
+ :value: None
+
+
+
+ .. py:attribute:: external_connection_id
+ :value: None
+
+
+
+ .. py:attribute:: inactive
+ :value: None
+
+
+
+ .. py:attribute:: tools
+ :value: None
+
+
+
+ .. py:attribute:: error_msg
+ :value: None
+
+
+
+ .. py:attribute:: metadata
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/mcp_server_query_result/index.rst.txt b/docs/_sources/autoapi/abacusai/mcp_server_query_result/index.rst.txt
new file mode 100644
index 000000000..0b7179b34
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/mcp_server_query_result/index.rst.txt
@@ -0,0 +1,50 @@
+abacusai.mcp_server_query_result
+================================
+
+.. py:module:: abacusai.mcp_server_query_result
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.mcp_server_query_result.McpServerQueryResult
+
+
+Module Contents
+---------------
+
+.. py:class:: McpServerQueryResult(client, output=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Result of a MCP server query
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param output: The execution logs as well as the final output of the MCP server query
+ :type output: dict
+
+
+ .. py:attribute:: output
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/model/index.rst.txt b/docs/_sources/autoapi/abacusai/model/index.rst.txt
index d674094c3..bdd29d339 100644
--- a/docs/_sources/autoapi/abacusai/model/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/model/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: Model(client, name=None, modelId=None, modelConfigType=None, modelPredictionConfig=None, createdAt=None, projectId=None, shared=None, sharedAt=None, trainFunctionName=None, predictFunctionName=None, predictManyFunctionName=None, initializeFunctionName=None, trainingInputTables=None, sourceCode=None, cpuSize=None, memory=None, trainingFeatureGroupIds=None, algorithmModelConfigs=None, trainingVectorStoreVersions=None, documentRetrievers=None, documentRetrieverIds=None, isPythonModel=None, defaultAlgorithm=None, customAlgorithmConfigs=None, restrictedAlgorithms=None, useGpu=None, notebookId=None, trainingRequired=None, location={}, refreshSchedules={}, codeSource={}, databaseConnector={}, dataLlmFeatureGroups={}, latestModelVersion={}, modelConfig={})
+.. py:class:: Model(client, name=None, modelId=None, modelConfigType=None, modelPredictionConfig=None, createdAt=None, projectId=None, trainFunctionName=None, predictFunctionName=None, predictManyFunctionName=None, initializeFunctionName=None, trainingInputTables=None, sourceCode=None, cpuSize=None, memory=None, trainingFeatureGroupIds=None, algorithmModelConfigs=None, trainingVectorStoreVersions=None, documentRetrievers=None, documentRetrieverIds=None, isPythonModel=None, defaultAlgorithm=None, customAlgorithmConfigs=None, restrictedAlgorithms=None, useGpu=None, notebookId=None, trainingRequired=None, location={}, refreshSchedules={}, codeSource={}, databaseConnector={}, dataLlmFeatureGroups={}, latestModelVersion={}, modelConfig={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -36,10 +36,6 @@ Module Contents
:type createdAt: str
:param projectId: The project this model belongs to.
:type projectId: str
- :param shared: If model is shared to the Abacus.AI model showcase.
- :type shared: bool
- :param sharedAt: The date and time at which the model was shared to the model showcase
- :type sharedAt: str
:param trainFunctionName: Name of the function found in the source code that will be executed to train the model. It is not executed when this function is run.
:type trainFunctionName: str
:param predictFunctionName: Name of the function found in the source code that will be executed run predictions through model. It is not executed when this function is run.
@@ -126,16 +122,6 @@ Module Contents
- .. py:attribute:: shared
- :value: None
-
-
-
- .. py:attribute:: shared_at
- :value: None
-
-
-
.. py:attribute:: train_function_name
:value: None
diff --git a/docs/_sources/autoapi/abacusai/organization_external_application_settings/index.rst.txt b/docs/_sources/autoapi/abacusai/organization_external_application_settings/index.rst.txt
index a6048c000..b2c4dc95c 100644
--- a/docs/_sources/autoapi/abacusai/organization_external_application_settings/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/organization_external_application_settings/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: OrganizationExternalApplicationSettings(client, logo=None, theme=None, managedUserService=None, passwordsDisabled=None)
+.. py:class:: OrganizationExternalApplicationSettings(client, logo=None, theme=None, managedUserService=None, passwordsDisabled=None, externalServiceForRoles=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -32,6 +32,8 @@ Module Contents
:type managedUserService: str
:param passwordsDisabled: Whether or not passwords are disabled for this organization's domain.
:type passwordsDisabled: bool
+ :param externalServiceForRoles: The external service that is managing the user roles.
+ :type externalServiceForRoles: str
.. py:attribute:: logo
@@ -54,6 +56,11 @@ Module Contents
+ .. py:attribute:: external_service_for_roles
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
diff --git a/docs/_sources/autoapi/abacusai/organization_secret/index.rst.txt b/docs/_sources/autoapi/abacusai/organization_secret/index.rst.txt
index 1e1ee3768..33eeca1c8 100644
--- a/docs/_sources/autoapi/abacusai/organization_secret/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/organization_secret/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: OrganizationSecret(client, secretKey=None, value=None, createdAt=None)
+.. py:class:: OrganizationSecret(client, secretKey=None, value=None, createdAt=None, metadata=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -30,6 +30,8 @@ Module Contents
:type value: str
:param createdAt: The date and time when the secret was created, in ISO-8601 format.
:type createdAt: str
+ :param metadata: Additional metadata for the secret (e.g., web_app_project_ids).
+ :type metadata: dict
.. py:attribute:: secret_key
@@ -47,6 +49,11 @@ Module Contents
+ .. py:attribute:: metadata
+ :value: None
+
+
+
.. py:attribute:: deprecated_keys
diff --git a/docs/_sources/autoapi/abacusai/prediction_client/index.rst.txt b/docs/_sources/autoapi/abacusai/prediction_client/index.rst.txt
index 1b8408fb0..0c1acb317 100644
--- a/docs/_sources/autoapi/abacusai/prediction_client/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/prediction_client/index.rst.txt
@@ -458,7 +458,7 @@ Module Contents
- .. py:method:: get_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None)
+ .. py:method:: get_chat_response(deployment_token, deployment_id, messages, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, user_info = None)
Return a chat response which continues the conversation based on the input messages and search results.
@@ -514,7 +514,7 @@ Module Contents
- .. py:method:: get_conversation_response(deployment_id, message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None)
+ .. py:method:: get_conversation_response(deployment_id, message, deployment_token, deployment_conversation_id = None, external_session_id = None, llm_name = None, num_completion_tokens = None, system_message = None, temperature = 0.0, filter_key_values = None, search_score_cutoff = None, chat_config = None, doc_infos = None, user_info = None, execute_usercode_tool = False)
Return a conversation response which continues the conversation based on the input message and deployment conversation id (if exists).
@@ -544,6 +544,8 @@ Module Contents
:type chat_config: dict
:param doc_infos: An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
:type doc_infos: list
+ :param execute_usercode_tool: If True, will return the tool output in the response.
+ :type execute_usercode_tool: bool
@@ -580,6 +582,17 @@ Module Contents
+ .. py:method:: get_deep_agent_response(message, deployment_conversation_id = None)
+
+ Return a DeepAgent response with generated files if any, based on the input message.
+
+ :param message: The user's message/task for DeepAgent to complete
+ :type message: str
+ :param deployment_conversation_id: The unique identifier of a deployment conversation to continue. If not specified, a new one will be created.
+ :type deployment_conversation_id: str
+
+
+
.. py:method:: get_search_results(deployment_token, deployment_id, query_data, num = 15)
Return the most relevant search results to the search query from the uploaded documents.
@@ -698,6 +711,36 @@ Module Contents
+ .. py:method:: get_optimization_inputs_from_serialized(deployment_token, deployment_id, query_data = None)
+
+ Get assignments for given query, with new inputs
+
+ :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ :type deployment_token: str
+ :param deployment_id: The unique identifier of a deployment created under the project.
+ :type deployment_id: str
+ :param query_data: a dictionary with various key: value pairs corresponding to various updated FGs in the FG tree, which we want to update to compute new top level FGs for online solve. (query data will be dict of names: serialized dataframes)
+ :type query_data: dict
+
+
+
+ .. py:method:: get_assignments_online_with_new_serialized_inputs(deployment_token, deployment_id, query_data = None, solve_time_limit_seconds = None, optimality_gap_limit = None)
+
+ Get assignments for given query, with new inputs
+
+ :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it can be safely embedded in an application or website.
+ :type deployment_token: str
+ :param deployment_id: The unique identifier of a deployment created under the project.
+ :type deployment_id: str
+ :param query_data: a dictionary with assignment, constraint and constraint_equations_df (under these specific keys)
+ :type query_data: dict
+ :param solve_time_limit_seconds: Maximum time in seconds to spend solving the query.
+ :type solve_time_limit_seconds: float
+ :param optimality_gap_limit: Optimality gap we want to come within, after which we accept the solution as valid. (0 means we only want an optimal solution). it is abs(best_solution_found - best_bound) / abs(best_solution_found)
+ :type optimality_gap_limit: float
+
+
+
.. py:method:: check_constraints(deployment_token, deployment_id, query_data)
Check for any constraints violated by the overrides.
@@ -855,34 +898,6 @@ Module Contents
- .. py:method:: generate_image(deployment_token, deployment_id, query_data)
-
- Generate an image from text prompt.
-
- :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model within an application or website.
- :type deployment_token: str
- :param deployment_id: A unique identifier to a deployment created under the project.
- :type deployment_id: str
- :param query_data: Specifies the text prompt. For example, {'prompt': 'a cat'}
- :type query_data: dict
-
-
-
- .. py:method:: execute_agent(deployment_token, deployment_id, arguments = None, keyword_arguments = None)
-
- Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
-
- :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
- :type deployment_token: str
- :param deployment_id: A unique string identifier for the deployment created under the project.
- :type deployment_id: str
- :param arguments: Positional arguments to the agent execute function.
- :type arguments: list
- :param keyword_arguments: A dictionary where each 'key' represents the paramter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
- :type keyword_arguments: dict
-
-
-
.. py:method:: get_matrix_agent_schema(deployment_token, deployment_id, query, doc_infos = None, deployment_conversation_id = None, external_session_id = None)
Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
@@ -902,31 +917,6 @@ Module Contents
- .. py:method:: execute_conversation_agent(deployment_token, deployment_id, arguments = None, keyword_arguments = None, deployment_conversation_id = None, external_session_id = None, regenerate = False, doc_infos = None, agent_workflow_node_id = None)
-
- Executes a deployed AI agent function using the arguments as keyword arguments to the agent execute function.
-
- :param deployment_token: The deployment token used to authenticate access to created deployments. This token is only authorized to predict on deployments in this project, so it is safe to embed this model inside of an application or website.
- :type deployment_token: str
- :param deployment_id: A unique string identifier for the deployment created under the project.
- :type deployment_id: str
- :param arguments: Positional arguments to the agent execute function.
- :type arguments: list
- :param keyword_arguments: A dictionary where each 'key' represents the paramter name and its corresponding 'value' represents the value of that parameter for the agent execute function.
- :type keyword_arguments: dict
- :param deployment_conversation_id: A unique string identifier for the deployment conversation used for the conversation.
- :type deployment_conversation_id: str
- :param external_session_id: A unique string identifier for the session used for the conversation. If both deployment_conversation_id and external_session_id are not provided, a new session will be created.
- :type external_session_id: str
- :param regenerate: If True, will regenerate the response from the last query.
- :type regenerate: bool
- :param doc_infos: An optional list of documents use for the conversation. A keyword 'doc_id' is expected to be present in each document for retrieving contents from docstore.
- :type doc_infos: list
- :param agent_workflow_node_id: An optional agent workflow node id to trigger agent execution from an intermediate node.
- :type agent_workflow_node_id: str
-
-
-
.. py:method:: lookup_matches(deployment_token, deployment_id, data = None, filters = None, num = None, result_columns = None, max_words = None, num_retrieval_margin_words = None, max_words_per_chunk = None, score_multiplier_column = None, min_score = None, required_phrases = None, filter_clause = None, crowding_limits = None, include_text_search = False)
Lookup document retrievers and return the matching documents from the document retriever deployed with given query.
diff --git a/docs/_sources/autoapi/abacusai/presentation_export_result/index.rst.txt b/docs/_sources/autoapi/abacusai/presentation_export_result/index.rst.txt
new file mode 100644
index 000000000..07952f64b
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/presentation_export_result/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.presentation_export_result
+===================================
+
+.. py:module:: abacusai.presentation_export_result
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.presentation_export_result.PresentationExportResult
+
+
+Module Contents
+---------------
+
+.. py:class:: PresentationExportResult(client, filePath=None, webViewLink=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Export Presentation
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param filePath: The path to the exported presentation
+ :type filePath: str
+ :param webViewLink: The web view link to the exported presentation (if applicable)
+ :type webViewLink: str
+
+
+ .. py:attribute:: file_path
+ :value: None
+
+
+
+ .. py:attribute:: web_view_link
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/private_web_app_deployment/index.rst.txt b/docs/_sources/autoapi/abacusai/private_web_app_deployment/index.rst.txt
new file mode 100644
index 000000000..0cfe97bb0
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/private_web_app_deployment/index.rst.txt
@@ -0,0 +1,127 @@
+abacusai.private_web_app_deployment
+===================================
+
+.. py:module:: abacusai.private_web_app_deployment
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.private_web_app_deployment.PrivateWebAppDeployment
+
+
+Module Contents
+---------------
+
+.. py:class:: PrivateWebAppDeployment(client, hostname=None, llmArtifactId=None, hostedAppVersion=None, applicationType=None, lifecycle=None, deploymentConversationId=None, conversationName=None, userId=None, email=None, conversationType=None, source=None, conversationCreatedAt=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Private web app deployment list.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param hostname: The hostname of the web app deployment.
+ :type hostname: str
+ :param llmArtifactId: The ID of the LLM artifact.
+ :type llmArtifactId: id
+ :param hostedAppVersion: The version of the hosted app.
+ :type hostedAppVersion: id
+ :param applicationType: The type of application.
+ :type applicationType: str
+ :param lifecycle: The lifecycle of the web app deployment.
+ :type lifecycle: str
+ :param deploymentConversationId: The ID of the deployment conversation.
+ :type deploymentConversationId: id
+ :param conversationName: The name of the conversation.
+ :type conversationName: str
+ :param userId: The ID of the user who created the deployment.
+ :type userId: id
+ :param email: The email of the user who created the deployment.
+ :type email: str
+ :param conversationType: The type of conversation.
+ :type conversationType: str
+ :param source: The source of the conversation.
+ :type source: str
+ :param conversationCreatedAt: The timestamp when the conversation was created.
+ :type conversationCreatedAt: str
+
+
+ .. py:attribute:: hostname
+ :value: None
+
+
+
+ .. py:attribute:: llm_artifact_id
+ :value: None
+
+
+
+ .. py:attribute:: hosted_app_version
+ :value: None
+
+
+
+ .. py:attribute:: application_type
+ :value: None
+
+
+
+ .. py:attribute:: lifecycle
+ :value: None
+
+
+
+ .. py:attribute:: deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: conversation_name
+ :value: None
+
+
+
+ .. py:attribute:: user_id
+ :value: None
+
+
+
+ .. py:attribute:: email
+ :value: None
+
+
+
+ .. py:attribute:: conversation_type
+ :value: None
+
+
+
+ .. py:attribute:: source
+ :value: None
+
+
+
+ .. py:attribute:: conversation_created_at
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/project/index.rst.txt b/docs/_sources/autoapi/abacusai/project/index.rst.txt
index 6b371bef2..513a1859c 100644
--- a/docs/_sources/autoapi/abacusai/project/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/project/index.rst.txt
@@ -189,6 +189,17 @@ Module Contents
+ .. py:method:: add_scope_for_user(email, scope)
+
+ Add a user to a project.
+
+ :param email: The user's email.
+ :type email: str
+ :param scope: The list of project scopes.
+ :type scope: list
+
+
+
.. py:method:: describe_feature_group(feature_group_id)
Describe a feature group associated with a project
@@ -410,12 +421,12 @@ Module Contents
- .. py:method:: list_model_monitors()
+ .. py:method:: list_model_monitors(limit = None)
Retrieves the list of model monitors in the specified project.
- :param project_id: Unique string identifier associated with the project.
- :type project_id: str
+ :param limit: Maximum number of model monitors to return. We'll have internal limit if not set.
+ :type limit: int
:returns: A list of model monitors.
:rtype: list[ModelMonitor]
@@ -657,12 +668,12 @@ Module Contents
- .. py:method:: list_batch_predictions()
+ .. py:method:: list_batch_predictions(limit = None)
Retrieves a list of batch predictions in the project.
- :param project_id: Unique string identifier of the project.
- :type project_id: str
+ :param limit: Maximum number of batch predictions to return. We'll have internal limit if not set.
+ :type limit: int
:returns: List of batch prediction jobs.
:rtype: list[BatchPrediction]
diff --git a/docs/_sources/autoapi/abacusai/python_function/index.rst.txt b/docs/_sources/autoapi/abacusai/python_function/index.rst.txt
index 6e535854a..908dfefc0 100644
--- a/docs/_sources/autoapi/abacusai/python_function/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/python_function/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: PythonFunction(client, notebookId=None, name=None, createdAt=None, functionVariableMappings=None, outputVariableMappings=None, functionName=None, pythonFunctionId=None, functionType=None, packageRequirements=None, description=None, examples=None, codeSource={})
+.. py:class:: PythonFunction(client, notebookId=None, name=None, createdAt=None, functionVariableMappings=None, outputVariableMappings=None, functionName=None, pythonFunctionId=None, functionType=None, packageRequirements=None, description=None, examples=None, connectors=None, configurations=None, codeSource={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -46,6 +46,10 @@ Module Contents
:type description: str
:param examples: Dictionary containing example use cases and anti-patterns. Includes 'positive' examples showing recommended usage and 'negative' examples showing cases to avoid.
:type examples: dict[str, list[str]]
+ :param connectors: Dictionary containing user-level and organization-level connectors
+ :type connectors: dict
+ :param configurations: Dictionary containing configurations for the Python function
+ :type configurations: dict
:param codeSource: Information about the source code of the Python function.
:type codeSource: CodeSource
@@ -105,6 +109,16 @@ Module Contents
+ .. py:attribute:: connectors
+ :value: None
+
+
+
+ .. py:attribute:: configurations
+ :value: None
+
+
+
.. py:attribute:: code_source
diff --git a/docs/_sources/autoapi/abacusai/regenerate_llm_external_application/index.rst.txt b/docs/_sources/autoapi/abacusai/regenerate_llm_external_application/index.rst.txt
index 8b7e47f86..663da6979 100644
--- a/docs/_sources/autoapi/abacusai/regenerate_llm_external_application/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/regenerate_llm_external_application/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: RegenerateLlmExternalApplication(client, name=None, externalApplicationId=None)
+.. py:class:: RegenerateLlmExternalApplication(client, name=None, llmBotIcon=None, llmName=None)
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -24,10 +24,12 @@ Module Contents
:param client: An authenticated API Client instance
:type client: ApiClient
- :param name: The external name of the LLM.
+ :param name: The display name of the LLM (ie GPT-5)
:type name: str
- :param externalApplicationId: The unique identifier of the external application.
- :type externalApplicationId: str
+ :param llmBotIcon: The bot icon of the LLM.
+ :type llmBotIcon: str
+ :param llmName: The external name of the LLM (ie OPENAI_GPT5)
+ :type llmName: str
.. py:attribute:: name
@@ -35,7 +37,12 @@ Module Contents
- .. py:attribute:: external_application_id
+ .. py:attribute:: llm_bot_icon
+ :value: None
+
+
+
+ .. py:attribute:: llm_name
:value: None
diff --git a/docs/_sources/autoapi/abacusai/regenerate_llm_option/index.rst.txt b/docs/_sources/autoapi/abacusai/regenerate_llm_option/index.rst.txt
new file mode 100644
index 000000000..6a745d319
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/regenerate_llm_option/index.rst.txt
@@ -0,0 +1,64 @@
+abacusai.regenerate_llm_option
+==============================
+
+.. py:module:: abacusai.regenerate_llm_option
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.regenerate_llm_option.RegenerateLlmOption
+
+
+Module Contents
+---------------
+
+.. py:class:: RegenerateLlmOption(client, name=None, llmBotIcon=None, llmName=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ An option that specifies an LLM user can regenerate with in RouteLLM.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param name: The display name of the LLM (ie GPT-5)
+ :type name: str
+ :param llmBotIcon: The bot icon of the LLM.
+ :type llmBotIcon: str
+ :param llmName: The external name of the LLM (ie OPENAI_GPT5)
+ :type llmName: str
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: llm_bot_icon
+ :value: None
+
+
+
+ .. py:attribute:: llm_name
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/schema/index.rst.txt b/docs/_sources/autoapi/abacusai/schema/index.rst.txt
index d4188258a..7a0ea37c0 100644
--- a/docs/_sources/autoapi/abacusai/schema/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/schema/index.rst.txt
@@ -26,7 +26,7 @@ Module Contents
:type client: ApiClient
:param name: The unique name of the feature.
:type name: str
- :param featureMapping: The mapping of the feature. The possible values will be based on the project's use-case. See the (Use Case Documentation)[https://api.abacus.ai/app/help/useCases] for more details.
+ :param featureMapping: The mapping of the feature. The possible values will be based on the project's use-case. See the (Use Case Documentation)[https://api.abacus.ai/app/help/developer-platform/useCases] for more details.
:type featureMapping: str
:param detectedFeatureMapping: Detected feature mapping for this feature
:type detectedFeatureMapping: str
diff --git a/docs/_sources/autoapi/abacusai/session_summary/index.rst.txt b/docs/_sources/autoapi/abacusai/session_summary/index.rst.txt
new file mode 100644
index 000000000..7d75ea8e4
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/session_summary/index.rst.txt
@@ -0,0 +1,50 @@
+abacusai.session_summary
+========================
+
+.. py:module:: abacusai.session_summary
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.session_summary.SessionSummary
+
+
+Module Contents
+---------------
+
+.. py:class:: SessionSummary(client, summary=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A session summary
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param summary: The summary of the session.
+ :type summary: str
+
+
+ .. py:attribute:: summary
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/session_transcript/index.rst.txt b/docs/_sources/autoapi/abacusai/session_transcript/index.rst.txt
new file mode 100644
index 000000000..3620c57d7
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/session_transcript/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.session_transcript
+===========================
+
+.. py:module:: abacusai.session_transcript
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.session_transcript.SessionTranscript
+
+
+Module Contents
+---------------
+
+.. py:class:: SessionTranscript(client, role=None, content=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A session transcript
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param role: The role of the transcript.
+ :type role: str
+ :param content: The content of the transcript.
+ :type content: str
+
+
+ .. py:attribute:: role
+ :value: None
+
+
+
+ .. py:attribute:: content
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/session_transcripts/index.rst.txt b/docs/_sources/autoapi/abacusai/session_transcripts/index.rst.txt
new file mode 100644
index 000000000..222a6bc3a
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/session_transcripts/index.rst.txt
@@ -0,0 +1,50 @@
+abacusai.session_transcripts
+============================
+
+.. py:module:: abacusai.session_transcripts
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.session_transcripts.SessionTranscripts
+
+
+Module Contents
+---------------
+
+.. py:class:: SessionTranscripts(client, transcripts=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A list of session transcripts
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param transcripts: A list of session transcripts.
+ :type transcripts: list[sessiontranscript]
+
+
+ .. py:attribute:: transcripts
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/sts_gen_settings/index.rst.txt b/docs/_sources/autoapi/abacusai/sts_gen_settings/index.rst.txt
new file mode 100644
index 000000000..cc517cbfe
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/sts_gen_settings/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.sts_gen_settings
+=========================
+
+.. py:module:: abacusai.sts_gen_settings
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.sts_gen_settings.StsGenSettings
+
+
+Module Contents
+---------------
+
+.. py:class:: StsGenSettings(client, model=None, settings=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ STS generation settings
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param model: The model settings.
+ :type model: dict
+ :param settings: The settings for each model.
+ :type settings: dict
+
+
+ .. py:attribute:: model
+ :value: None
+
+
+
+ .. py:attribute:: settings
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/stt_gen_model/index.rst.txt b/docs/_sources/autoapi/abacusai/stt_gen_model/index.rst.txt
new file mode 100644
index 000000000..c57103c2e
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/stt_gen_model/index.rst.txt
@@ -0,0 +1,90 @@
+abacusai.stt_gen_model
+======================
+
+.. py:module:: abacusai.stt_gen_model
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.stt_gen_model.SttGenModel
+
+
+Module Contents
+---------------
+
+.. py:class:: SttGenModel(client, displayName=None, type=None, valueType=None, optional=None, default=None, helptext=None, options={})
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ STT generation model
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param displayName: The display name for the UI component.
+ :type displayName: str
+ :param type: The type of the UI component.
+ :type type: str
+ :param valueType: The data type of the values within the UI component.
+ :type valueType: str
+ :param optional: Whether the selection of a value is optional.
+ :type optional: bool
+ :param default: The default value for the STT generation model.
+ :type default: str
+ :param helptext: The helptext for the UI component.
+ :type helptext: str
+ :param options: The options of models available for STT generation.
+ :type options: SttGenModelOptions
+
+
+ .. py:attribute:: display_name
+ :value: None
+
+
+
+ .. py:attribute:: type
+ :value: None
+
+
+
+ .. py:attribute:: value_type
+ :value: None
+
+
+
+ .. py:attribute:: optional
+ :value: None
+
+
+
+ .. py:attribute:: default
+ :value: None
+
+
+
+ .. py:attribute:: helptext
+ :value: None
+
+
+
+ .. py:attribute:: options
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/stt_gen_model_options/index.rst.txt b/docs/_sources/autoapi/abacusai/stt_gen_model_options/index.rst.txt
new file mode 100644
index 000000000..0233a8d6f
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/stt_gen_model_options/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.stt_gen_model_options
+==============================
+
+.. py:module:: abacusai.stt_gen_model_options
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.stt_gen_model_options.SttGenModelOptions
+
+
+Module Contents
+---------------
+
+.. py:class:: SttGenModelOptions(client, keys=None, values=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ STT generation model options
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param keys: The keys of the image generation model options represented as the enum values.
+ :type keys: list
+ :param values: The display names of the image generation model options.
+ :type values: list
+
+
+ .. py:attribute:: keys
+ :value: None
+
+
+
+ .. py:attribute:: values
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/stt_gen_settings/index.rst.txt b/docs/_sources/autoapi/abacusai/stt_gen_settings/index.rst.txt
new file mode 100644
index 000000000..2b6030b01
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/stt_gen_settings/index.rst.txt
@@ -0,0 +1,55 @@
+abacusai.stt_gen_settings
+=========================
+
+.. py:module:: abacusai.stt_gen_settings
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.stt_gen_settings.SttGenSettings
+
+
+Module Contents
+---------------
+
+.. py:class:: SttGenSettings(client, settings=None, model={})
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ STT generation settings
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param settings: The settings for each model.
+ :type settings: dict
+ :param model: Dropdown for models available for STT generation.
+ :type model: SttGenModel
+
+
+ .. py:attribute:: settings
+ :value: None
+
+
+
+ .. py:attribute:: model
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/tts_gen_settings/index.rst.txt b/docs/_sources/autoapi/abacusai/tts_gen_settings/index.rst.txt
new file mode 100644
index 000000000..58761e340
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/tts_gen_settings/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.tts_gen_settings
+=========================
+
+.. py:module:: abacusai.tts_gen_settings
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.tts_gen_settings.TtsGenSettings
+
+
+Module Contents
+---------------
+
+.. py:class:: TtsGenSettings(client, model=None, settings=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ TTS generation settings
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param model: The model settings.
+ :type model: dict
+ :param settings: The settings for each model.
+ :type settings: dict
+
+
+ .. py:attribute:: model
+ :value: None
+
+
+
+ .. py:attribute:: settings
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/unified_connector/index.rst.txt b/docs/_sources/autoapi/abacusai/unified_connector/index.rst.txt
new file mode 100644
index 000000000..171c1bab3
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/unified_connector/index.rst.txt
@@ -0,0 +1,99 @@
+abacusai.unified_connector
+==========================
+
+.. py:module:: abacusai.unified_connector
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.unified_connector.UnifiedConnector
+
+
+Module Contents
+---------------
+
+.. py:class:: UnifiedConnector(client, applicationConnectorId=None, databaseConnectorId=None, service=None, name=None, createdAt=None, status=None, auth=None, isUserLevel=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Lightweight unified connector filter that skips expensive auth transformations.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param applicationConnectorId: The unique ID for the connection.
+ :type applicationConnectorId: str
+ :param databaseConnectorId: The unique ID for the connection.
+ :type databaseConnectorId: str
+ :param service: The service this connection connects to
+ :type service: str
+ :param name: A user-friendly name for the service
+ :type name: str
+ :param createdAt: When the API key was created
+ :type createdAt: str
+ :param status: The status of the Application Connector
+ :type status: str
+ :param auth: Non-secret connection information for this connector
+ :type auth: dict
+ :param isUserLevel: Whether this is a user-level connector
+ :type isUserLevel: bool
+
+
+ .. py:attribute:: application_connector_id
+ :value: None
+
+
+
+ .. py:attribute:: database_connector_id
+ :value: None
+
+
+
+ .. py:attribute:: service
+ :value: None
+
+
+
+ .. py:attribute:: name
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: status
+ :value: None
+
+
+
+ .. py:attribute:: auth
+ :value: None
+
+
+
+ .. py:attribute:: is_user_level
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/upload/index.rst.txt b/docs/_sources/autoapi/abacusai/upload/index.rst.txt
index b635332b1..632857943 100644
--- a/docs/_sources/autoapi/abacusai/upload/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/upload/index.rst.txt
@@ -176,7 +176,7 @@ Module Contents
- .. py:method:: upload_file(file, threads=10, chunksize=1024 * 1024 * 10, wait_timeout=600)
+ .. py:method:: upload_file(file, threads=10, chunksize=None, wait_timeout=600)
Uploads the file in the specified chunk size using the specified number of workers.
@@ -184,7 +184,7 @@ Module Contents
:type file: IOBase
:param threads: The max number of workers to use while uploading the file
:type threads: int
- :param chunksize: The number of bytes to use for each chunk while uploading the file. Defaults to 10 MB
+ :param chunksize: The number of bytes to use for each chunk while uploading the file. If None, dynamically calculated based on file size (defaults to 20 MB.)
:type chunksize: int
:param wait_timeout: The max number of seconds to wait for the file parts to be joined on Abacus.AI. Defaults to 600.
:type wait_timeout: int
@@ -194,6 +194,9 @@ Module Contents
+ .. py:method:: _calculate_chunk_size(file_size)
+
+
.. py:method:: _yield_upload_part(file, chunksize)
diff --git a/docs/_sources/autoapi/abacusai/user/index.rst.txt b/docs/_sources/autoapi/abacusai/user/index.rst.txt
index d07e99ffe..8e36d76f2 100644
--- a/docs/_sources/autoapi/abacusai/user/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/user/index.rst.txt
@@ -15,15 +15,17 @@ Classes
Module Contents
---------------
-.. py:class:: User(client, name=None, email=None, createdAt=None, status=None, organizationGroups={})
+.. py:class:: User(client, userId=None, name=None, email=None, createdAt=None, status=None, organizationGroups={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
- An Abacus.AI User
+ An Abacus.AI User with a unique user_id.
:param client: An authenticated API Client instance
:type client: ApiClient
+ :param userId: The unique identifier of the user.
+ :type userId: id
:param name: The User's name.
:type name: str
:param email: The User's primary email address.
@@ -36,6 +38,11 @@ Module Contents
:type organizationGroups: OrganizationGroup
+ .. py:attribute:: user_id
+ :value: None
+
+
+
.. py:attribute:: name
:value: None
diff --git a/docs/_sources/autoapi/abacusai/user_group_object_permission/index.rst.txt b/docs/_sources/autoapi/abacusai/user_group_object_permission/index.rst.txt
new file mode 100644
index 000000000..8c499f9e7
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/user_group_object_permission/index.rst.txt
@@ -0,0 +1,71 @@
+abacusai.user_group_object_permission
+=====================================
+
+.. py:module:: abacusai.user_group_object_permission
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.user_group_object_permission.UserGroupObjectPermission
+
+
+Module Contents
+---------------
+
+.. py:class:: UserGroupObjectPermission(client, userGroupId=None, userGroupName=None, objectId=None, permission=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A user group object permission
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param userGroupId: The unique identifier of the user group.
+ :type userGroupId: str
+ :param userGroupName: The name of the user group.
+ :type userGroupName: str
+ :param objectId: The unique identifier of the object.
+ :type objectId: str
+ :param permission: The permission level (e.g., 'ALL').
+ :type permission: str
+
+
+ .. py:attribute:: user_group_id
+ :value: None
+
+
+
+ .. py:attribute:: user_group_name
+ :value: None
+
+
+
+ .. py:attribute:: object_id
+ :value: None
+
+
+
+ .. py:attribute:: permission
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/video_gen_costs/index.rst.txt b/docs/_sources/autoapi/abacusai/video_gen_costs/index.rst.txt
new file mode 100644
index 000000000..955247356
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/video_gen_costs/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.video_gen_costs
+========================
+
+.. py:module:: abacusai.video_gen_costs
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.video_gen_costs.VideoGenCosts
+
+
+Module Contents
+---------------
+
+.. py:class:: VideoGenCosts(client, modelCosts=None, expensiveModels=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ The most expensive price for each video gen model in credits
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param modelCosts: The costs of the video gen models in credits
+ :type modelCosts: dict
+ :param expensiveModels: The list of video gen models that are expensive
+ :type expensiveModels: list
+
+
+ .. py:attribute:: model_costs
+ :value: None
+
+
+
+ .. py:attribute:: expensive_models
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/video_gen_model/index.rst.txt b/docs/_sources/autoapi/abacusai/video_gen_model/index.rst.txt
new file mode 100644
index 000000000..001ce9b01
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/video_gen_model/index.rst.txt
@@ -0,0 +1,90 @@
+abacusai.video_gen_model
+========================
+
+.. py:module:: abacusai.video_gen_model
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.video_gen_model.VideoGenModel
+
+
+Module Contents
+---------------
+
+.. py:class:: VideoGenModel(client, displayName=None, type=None, valueType=None, optional=None, default=None, helptext=None, options={})
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Video generation model
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param displayName: The display name for the UI component.
+ :type displayName: str
+ :param type: The type of the UI component.
+ :type type: str
+ :param valueType: The data type of the values within the UI component.
+ :type valueType: str
+ :param optional: Whether the selection of a value is optional.
+ :type optional: bool
+ :param default: The default value for the video generation model.
+ :type default: str
+ :param helptext: The helptext for the UI component.
+ :type helptext: str
+ :param options: The options of models available for video generation.
+ :type options: VideoGenModelOptions
+
+
+ .. py:attribute:: display_name
+ :value: None
+
+
+
+ .. py:attribute:: type
+ :value: None
+
+
+
+ .. py:attribute:: value_type
+ :value: None
+
+
+
+ .. py:attribute:: optional
+ :value: None
+
+
+
+ .. py:attribute:: default
+ :value: None
+
+
+
+ .. py:attribute:: helptext
+ :value: None
+
+
+
+ .. py:attribute:: options
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/video_gen_model_options/index.rst.txt b/docs/_sources/autoapi/abacusai/video_gen_model_options/index.rst.txt
new file mode 100644
index 000000000..f4f86c32b
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/video_gen_model_options/index.rst.txt
@@ -0,0 +1,57 @@
+abacusai.video_gen_model_options
+================================
+
+.. py:module:: abacusai.video_gen_model_options
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.video_gen_model_options.VideoGenModelOptions
+
+
+Module Contents
+---------------
+
+.. py:class:: VideoGenModelOptions(client, keys=None, values=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Video generation model options
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param keys: The keys of the video generation model options represented as the enum values.
+ :type keys: list
+ :param values: The display names of the video generation model options.
+ :type values: list
+
+
+ .. py:attribute:: keys
+ :value: None
+
+
+
+ .. py:attribute:: values
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/video_gen_settings/index.rst.txt b/docs/_sources/autoapi/abacusai/video_gen_settings/index.rst.txt
index 70d3a2d48..e739211da 100644
--- a/docs/_sources/autoapi/abacusai/video_gen_settings/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/video_gen_settings/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: VideoGenSettings(client, model=None, settings=None)
+.. py:class:: VideoGenSettings(client, settings=None, warnings=None, model={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -24,22 +24,27 @@ Module Contents
:param client: An authenticated API Client instance
:type client: ApiClient
- :param model: The model settings.
- :type model: dict
:param settings: The settings for each model.
:type settings: dict
+ :param warnings: The warnings for each model.
+ :type warnings: dict
+ :param model: Dropdown for models available for video generation.
+ :type model: VideoGenModel
- .. py:attribute:: model
+ .. py:attribute:: settings
:value: None
- .. py:attribute:: settings
+ .. py:attribute:: warnings
:value: None
+ .. py:attribute:: model
+
+
.. py:attribute:: deprecated_keys
diff --git a/docs/_sources/autoapi/abacusai/web_app_conversation/index.rst.txt b/docs/_sources/autoapi/abacusai/web_app_conversation/index.rst.txt
new file mode 100644
index 000000000..c04b3c227
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/web_app_conversation/index.rst.txt
@@ -0,0 +1,78 @@
+abacusai.web_app_conversation
+=============================
+
+.. py:module:: abacusai.web_app_conversation
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.web_app_conversation.WebAppConversation
+
+
+Module Contents
+---------------
+
+.. py:class:: WebAppConversation(client, deploymentConversationId=None, llmArtifactId=None, deploymentConversationName=None, externalApplicationId=None, createdAt=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Web App Conversation
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param deploymentConversationId: The ID of the deployment conversation
+ :type deploymentConversationId: id
+ :param llmArtifactId: The ID of the LLM artifact
+ :type llmArtifactId: id
+ :param deploymentConversationName: The name of the conversation
+ :type deploymentConversationName: str
+ :param externalApplicationId: The external application ID
+ :type externalApplicationId: str
+ :param createdAt: The creation timestamp
+ :type createdAt: str
+
+
+ .. py:attribute:: deployment_conversation_id
+ :value: None
+
+
+
+ .. py:attribute:: llm_artifact_id
+ :value: None
+
+
+
+ .. py:attribute:: deployment_conversation_name
+ :value: None
+
+
+
+ .. py:attribute:: external_application_id
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/web_app_deployment_permission_dict/index.rst.txt b/docs/_sources/autoapi/abacusai/web_app_deployment_permission_dict/index.rst.txt
new file mode 100644
index 000000000..b5182a5a0
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/web_app_deployment_permission_dict/index.rst.txt
@@ -0,0 +1,50 @@
+abacusai.web_app_deployment_permission_dict
+===========================================
+
+.. py:module:: abacusai.web_app_deployment_permission_dict
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.web_app_deployment_permission_dict.WebAppDeploymentPermissionDict
+
+
+Module Contents
+---------------
+
+.. py:class:: WebAppDeploymentPermissionDict(client, deploymentPermissions=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Web app deployment permission dict.
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param deploymentPermissions: Dictionary containing deployment ID as key and list of tuples containing (user_group_id, permission) as value.
+ :type deploymentPermissions: dict
+
+
+ .. py:attribute:: deployment_permissions
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/web_app_domain/index.rst.txt b/docs/_sources/autoapi/abacusai/web_app_domain/index.rst.txt
new file mode 100644
index 000000000..7e670bef7
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/web_app_domain/index.rst.txt
@@ -0,0 +1,106 @@
+abacusai.web_app_domain
+=======================
+
+.. py:module:: abacusai.web_app_domain
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.web_app_domain.WebAppDomain
+
+
+Module Contents
+---------------
+
+.. py:class:: WebAppDomain(client, webAppDomainId=None, hostname=None, domainType=None, lifecycle=None, nameservers=None, dnsRecords=None, metadata=None, isRootDomain=None, isDeployed=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ Web App Domain
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param webAppDomainId: The ID of the web app domain
+ :type webAppDomainId: id
+ :param hostname: The hostname of the web app domain
+ :type hostname: str
+ :param domainType: The type of the web app domain
+ :type domainType: str
+ :param lifecycle: The lifecycle of the web app domain
+ :type lifecycle: str
+ :param nameservers: The nameservers of the web app domain
+ :type nameservers: list
+ :param dnsRecords: The DNS records of the web app domain
+ :type dnsRecords: list
+ :param metadata: The metadata of the web app domain
+ :type metadata: dict
+ :param isRootDomain: Whether the web app domain is a root domain
+ :type isRootDomain: bool
+ :param isDeployed: Whether the web app domain is deployed
+ :type isDeployed: bool
+
+
+ .. py:attribute:: web_app_domain_id
+ :value: None
+
+
+
+ .. py:attribute:: hostname
+ :value: None
+
+
+
+ .. py:attribute:: domain_type
+ :value: None
+
+
+
+ .. py:attribute:: lifecycle
+ :value: None
+
+
+
+ .. py:attribute:: nameservers
+ :value: None
+
+
+
+ .. py:attribute:: dns_records
+ :value: None
+
+
+
+ .. py:attribute:: metadata
+ :value: None
+
+
+
+ .. py:attribute:: is_root_domain
+ :value: None
+
+
+
+ .. py:attribute:: is_deployed
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/web_service_trigger_run/index.rst.txt b/docs/_sources/autoapi/abacusai/web_service_trigger_run/index.rst.txt
new file mode 100644
index 000000000..d1e1a37a0
--- /dev/null
+++ b/docs/_sources/autoapi/abacusai/web_service_trigger_run/index.rst.txt
@@ -0,0 +1,106 @@
+abacusai.web_service_trigger_run
+================================
+
+.. py:module:: abacusai.web_service_trigger_run
+
+
+Classes
+-------
+
+.. autoapisummary::
+
+ abacusai.web_service_trigger_run.WebServiceTriggerRun
+
+
+Module Contents
+---------------
+
+.. py:class:: WebServiceTriggerRun(client, endpoint=None, createdAt=None, payload=None, headers=None, method=None, responseStatus=None, responseBody=None, error=None, lifecycle=None)
+
+ Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
+
+
+ A web service trigger run
+
+ :param client: An authenticated API Client instance
+ :type client: ApiClient
+ :param endpoint: The endpoint of the web service trigger run.
+ :type endpoint: str
+ :param createdAt: The creation time of the web service trigger run.
+ :type createdAt: str
+ :param payload: The payload of the web service trigger run.
+ :type payload: dict
+ :param headers: The headers of the web service trigger run.
+ :type headers: dict
+ :param method: The method of the web service trigger run.
+ :type method: str
+ :param responseStatus: The HTTP response status code.
+ :type responseStatus: int
+ :param responseBody: The HTTP response body.
+ :type responseBody: str
+ :param error: Error message if the request failed.
+ :type error: str
+ :param lifecycle: The lifecycle status of the run.
+ :type lifecycle: str
+
+
+ .. py:attribute:: endpoint
+ :value: None
+
+
+
+ .. py:attribute:: created_at
+ :value: None
+
+
+
+ .. py:attribute:: payload
+ :value: None
+
+
+
+ .. py:attribute:: headers
+ :value: None
+
+
+
+ .. py:attribute:: method
+ :value: None
+
+
+
+ .. py:attribute:: response_status
+ :value: None
+
+
+
+ .. py:attribute:: response_body
+ :value: None
+
+
+
+ .. py:attribute:: error
+ :value: None
+
+
+
+ .. py:attribute:: lifecycle
+ :value: None
+
+
+
+ .. py:attribute:: deprecated_keys
+
+
+ .. py:method:: __repr__()
+
+
+ .. py:method:: to_dict()
+
+ Get a dict representation of the parameters in this class
+
+ :returns: The dict value representation of the class parameters
+ :rtype: dict
+
+
+
diff --git a/docs/_sources/autoapi/abacusai/workflow_graph_node_details/index.rst.txt b/docs/_sources/autoapi/abacusai/workflow_graph_node_details/index.rst.txt
index 39ac65a6e..f5011d784 100644
--- a/docs/_sources/autoapi/abacusai/workflow_graph_node_details/index.rst.txt
+++ b/docs/_sources/autoapi/abacusai/workflow_graph_node_details/index.rst.txt
@@ -15,7 +15,7 @@ Classes
Module Contents
---------------
-.. py:class:: WorkflowGraphNodeDetails(client, packageRequirements=None, workflowGraphNode={})
+.. py:class:: WorkflowGraphNodeDetails(client, packageRequirements=None, connectors=None, workflowGraphNode={})
Bases: :py:obj:`abacusai.return_class.AbstractApiClass`
@@ -26,6 +26,8 @@ Module Contents
:type client: ApiClient
:param packageRequirements: A list of package requirements that the node source code will need.
:type packageRequirements: list[str]
+ :param connectors: A dictionary of connectors that the node source code will need.
+ :type connectors: dict
:param workflowGraphNode: The workflow graph node object.
:type workflowGraphNode: WorkflowGraphNode
@@ -35,6 +37,11 @@ Module Contents
+ .. py:attribute:: connectors
+ :value: None
+
+
+
.. py:attribute:: workflow_graph_node
diff --git a/docs/autoapi/abacusai/abacus_api/index.html b/docs/autoapi/abacusai/abacus_api/index.html
index 09f3ba5ad..4df5da7de 100644
--- a/docs/autoapi/abacusai/abacus_api/index.html
+++ b/docs/autoapi/abacusai/abacus_api/index.html
@@ -73,6 +73,8 @@
abacusai.app_user_group_sign_in_token
abacusai.application_connector
abacusai.audio_gen_settings
+abacusai.audio_url_result
+abacusai.audit_log
abacusai.batch_prediction
abacusai.batch_prediction_version
abacusai.batch_prediction_version_logs
@@ -81,25 +83,38 @@
abacusai.chat_message
abacusai.chat_session
abacusai.chatllm_computer
+abacusai.chatllm_computer_status
+abacusai.chatllm_memory
+abacusai.chatllm_project
+abacusai.chatllm_project_permissions
abacusai.chatllm_referral_invite
abacusai.chatllm_task
abacusai.client
abacusai.code_agent_response
+abacusai.code_autocomplete_edit_prediction_response
abacusai.code_autocomplete_response
abacusai.code_bot
abacusai.code_edit
abacusai.code_edit_response
abacusai.code_edits
+abacusai.code_embeddings
+abacusai.code_llm_changed_files
abacusai.code_source
+abacusai.code_suggestion_validation_response
+abacusai.code_summary_response
+abacusai.codellm_embedding_constants
abacusai.compute_point_info
abacusai.concatenation_config
+abacusai.constants_autocomplete_response
abacusai.cpu_gpu_memory_specs
abacusai.cryptography
abacusai.custom_chat_instructions
+abacusai.custom_domain
abacusai.custom_loss_function
abacusai.custom_metric
abacusai.custom_metric_version
abacusai.custom_train_function_info
+abacusai.daemon_task_conversation
abacusai.data_consistency_duplication
abacusai.data_metrics
abacusai.data_prep_logs
@@ -113,6 +128,7 @@
abacusai.dataset_column
abacusai.dataset_version
abacusai.dataset_version_logs
+abacusai.default_llm
abacusai.deployment
abacusai.deployment_auth_token
abacusai.deployment_conversation
@@ -123,6 +139,9 @@
abacusai.document_retriever
abacusai.document_retriever_lookup_result
abacusai.document_retriever_version
+abacusai.document_retriever_version_logs
+abacusai.domain_purchase_result
+abacusai.domain_search_result
abacusai.drift_distribution
abacusai.drift_distributions
abacusai.eda
@@ -133,7 +152,9 @@
abacusai.eda_feature_collinearity
abacusai.eda_forecasting_analysis
abacusai.eda_version
+abacusai.edit_image_models
abacusai.embedding_feature_drift_distribution
+abacusai.entri_auth_token
abacusai.execute_feature_group_operation
abacusai.external_application
abacusai.external_invite
@@ -160,7 +181,6 @@
abacusai.feature_importance
abacusai.feature_mapping
abacusai.feature_performance_analysis
-abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
abacusai.file_connector_verification
@@ -168,20 +188,32 @@
abacusai.forecasting_analysis_graph_data
abacusai.forecasting_monitor_item_analysis
abacusai.forecasting_monitor_summary
+abacusai.fs_entry
abacusai.function_logs
abacusai.generated_pit_feature_config_option
abacusai.global_context
abacusai.graph_dashboard
abacusai.holdout_analysis
abacusai.holdout_analysis_version
+abacusai.hosted_app
+abacusai.hosted_app_container
+abacusai.hosted_app_file_read
+abacusai.hosted_artifact
+abacusai.hosted_database
+abacusai.hosted_database_snapshot
abacusai.hosted_model_token
+abacusai.hostname_info
abacusai.hume_voice
+abacusai.image_gen_model
+abacusai.image_gen_model_options
abacusai.image_gen_settings
abacusai.indexing_config
abacusai.inferred_database_column_to_feature_mappings
abacusai.inferred_feature_mappings
abacusai.item_statistics
+abacusai.lip_sync_gen_settings
abacusai.llm_app
+abacusai.llm_artifact
abacusai.llm_code_block
abacusai.llm_execution_preview
abacusai.llm_execution_result
@@ -189,6 +221,10 @@
abacusai.llm_input
abacusai.llm_parameters
abacusai.llm_response
+abacusai.mcp_config
+abacusai.mcp_server
+abacusai.mcp_server_connection
+abacusai.mcp_server_query_result
abacusai.memory_options
abacusai.messaging_connector_response
abacusai.model
@@ -243,6 +279,8 @@
abacusai.prediction_log_record
abacusai.prediction_operator
abacusai.prediction_operator_version
+abacusai.presentation_export_result
+abacusai.private_web_app_deployment
abacusai.problem_type
abacusai.project
abacusai.project_config
@@ -259,33 +297,51 @@
abacusai.refresh_policy
abacusai.refresh_schedule
abacusai.regenerate_llm_external_application
+abacusai.regenerate_llm_option
abacusai.resolved_feature_group_template
abacusai.return_class
abacusai.routing_action
abacusai.schema
+abacusai.session_summary
+abacusai.session_transcript
+abacusai.session_transcripts
abacusai.sftp_key
abacusai.streaming_auth_token
abacusai.streaming_client
abacusai.streaming_connector
abacusai.streaming_row_count
abacusai.streaming_sample_code
+abacusai.sts_gen_settings
+abacusai.stt_gen_model
+abacusai.stt_gen_model_options
+abacusai.stt_gen_settings
abacusai.template_node_details
abacusai.test_point_predictions
abacusai.tone_details
abacusai.training_config_options
+abacusai.tts_gen_settings
abacusai.twitter_search_result
+abacusai.unified_connector
abacusai.upload
abacusai.upload_part
abacusai.use_case
abacusai.use_case_requirements
abacusai.user
abacusai.user_exception
+abacusai.user_group_object_permission
+abacusai.video_gen_costs
+abacusai.video_gen_model
+abacusai.video_gen_model_options
abacusai.video_gen_settings
abacusai.video_search_result
abacusai.voice_gen_details
+abacusai.web_app_conversation
+abacusai.web_app_deployment_permission_dict
+abacusai.web_app_domain
abacusai.web_page_response
abacusai.web_search_response
abacusai.web_search_result
+abacusai.web_service_trigger_run
abacusai.webhook
abacusai.workflow_graph_node_details
abacusai.workflow_node_template
@@ -348,9 +404,9 @@ Module Contents
-
-location: str = None
+location: str = None
-
-space_key: str = None
+space_key: str = None
-
-pull_attachments: bool = False
+pull_attachments: bool = False
+extract_bounding_boxes: bool = False
+
+
+
+-
+location_type: str = None
@@ -468,17 +540,17 @@ Module Contents
-
-csv_delimiter: str = None
+csv_delimiter: str = None
-
-merge_file_schemas: bool = False
+merge_file_schemas: bool = False
@@ -496,25 +568,25 @@ Module Contents
-
-location: str = None
+location: str = None
-
-start_timestamp: int = None
+start_timestamp: int = None
-
-end_timestamp: int = None
+end_timestamp: int = None
@@ -532,31 +604,37 @@ Module Contents
-
-location: str = None
+location: str = None
-
-csv_delimiter: str = None
+csv_delimiter: str = None
+extract_bounding_boxes: bool = False
-
-merge_file_schemas: bool = False
+merge_file_schemas: bool = False
+
+
+
+-
+location_type: str = None
@@ -574,31 +652,31 @@ Module Contents
-
-jql: str = None
+jql: str = None
-
-custom_fields: list = None
+custom_fields: list = None
+include_comments: bool = False
-
-include_watchers: bool = False
+include_watchers: bool = False
@@ -616,31 +694,31 @@ Module Contents
-
-location: str = None
+location: str = None
-
-csv_delimiter: str = None
+csv_delimiter: str = None
+extract_bounding_boxes: bool = False
-
-merge_file_schemas: bool = False
+merge_file_schemas: bool = False
@@ -658,31 +736,37 @@ Module Contents
-
-location: str = None
+location: str = None
-
-csv_delimiter: str = None
+csv_delimiter: str = None
+extract_bounding_boxes: bool = False
-
-merge_file_schemas: bool = False
+merge_file_schemas: bool = False
+
+
+
+-
+add_file_metadata: bool = False
@@ -699,12 +783,12 @@ Module Contents
-
-location: str = None
+location: str = None
@@ -722,26 +806,26 @@ Module Contents
-
-include_entire_conversation_history: bool = False
+include_entire_conversation_history: bool = False
-
-include_all_feedback: bool = False
+include_all_feedback: bool = False
-
-resolve_matching_documents: bool = False
+resolve_matching_documents: bool = False
@@ -759,25 +843,31 @@ Module Contents
-
-pull_chat_messages: bool = False
+pull_chat_messages: bool = False
-
-pull_channel_posts: bool = False
+pull_channel_posts: bool = False
-
-pull_transcripts: bool = False
+pull_transcripts: bool = False
+
+
+
+-
+max_days_to_lookback: int = 365
@@ -787,6 +877,48 @@ Module Contents
+
+
+-
+class abacusai.api_class.dataset_application_connector.AzureStorageDatasetConfig
+Bases: ApplicationConnectorDatasetConfig
+Dataset config for Azure Storage Application Connector
+
+- Parameters:
+-
+
+
+
+-
+location: str = None
+
+
+
+-
+merge_file_schemas: bool = False
+
+
+
+-
+__post_init__()
+
+
+
+
-
class abacusai.api_class.dataset_application_connector.FreshserviceDatasetConfig
@@ -799,12 +931,52 @@ Module Contents
+
-
class abacusai.api_class.dataset_application_connector._ApplicationConnectorDatasetConfigFactory
Bases: abacusai.api_class.abstract._ApiClassFactory
-Helper class that provides a standard way to create an ABC using
-inheritance.
-
config_abstract_class
diff --git a/docs/autoapi/abacusai/api_class/deployment/index.html b/docs/autoapi/abacusai/api_class/deployment/index.html
index e622a1f49..7f758fcea 100644
--- a/docs/autoapi/abacusai/api_class/deployment/index.html
+++ b/docs/autoapi/abacusai/api_class/deployment/index.html
@@ -76,6 +76,8 @@
- abacusai.app_user_group_sign_in_token
- abacusai.application_connector
- abacusai.audio_gen_settings
+- abacusai.audio_url_result
+- abacusai.audit_log
- abacusai.batch_prediction
- abacusai.batch_prediction_version
- abacusai.batch_prediction_version_logs
@@ -84,25 +86,38 @@
- abacusai.chat_message
- abacusai.chat_session
- abacusai.chatllm_computer
+- abacusai.chatllm_computer_status
+- abacusai.chatllm_memory
+- abacusai.chatllm_project
+- abacusai.chatllm_project_permissions
- abacusai.chatllm_referral_invite
- abacusai.chatllm_task
- abacusai.client
- abacusai.code_agent_response
+- abacusai.code_autocomplete_edit_prediction_response
- abacusai.code_autocomplete_response
- abacusai.code_bot
- abacusai.code_edit
- abacusai.code_edit_response
- abacusai.code_edits
+- abacusai.code_embeddings
+- abacusai.code_llm_changed_files
- abacusai.code_source
+- abacusai.code_suggestion_validation_response
+- abacusai.code_summary_response
+- abacusai.codellm_embedding_constants
- abacusai.compute_point_info
- abacusai.concatenation_config
+- abacusai.constants_autocomplete_response
- abacusai.cpu_gpu_memory_specs
- abacusai.cryptography
- abacusai.custom_chat_instructions
+- abacusai.custom_domain
- abacusai.custom_loss_function
- abacusai.custom_metric
- abacusai.custom_metric_version
- abacusai.custom_train_function_info
+- abacusai.daemon_task_conversation
- abacusai.data_consistency_duplication
- abacusai.data_metrics
- abacusai.data_prep_logs
@@ -116,6 +131,7 @@
- abacusai.dataset_column
- abacusai.dataset_version
- abacusai.dataset_version_logs
+- abacusai.default_llm
- abacusai.deployment
- abacusai.deployment_auth_token
- abacusai.deployment_conversation
@@ -126,6 +142,9 @@
- abacusai.document_retriever
- abacusai.document_retriever_lookup_result
- abacusai.document_retriever_version
+- abacusai.document_retriever_version_logs
+- abacusai.domain_purchase_result
+- abacusai.domain_search_result
- abacusai.drift_distribution
- abacusai.drift_distributions
- abacusai.eda
@@ -136,7 +155,9 @@
- abacusai.eda_feature_collinearity
- abacusai.eda_forecasting_analysis
- abacusai.eda_version
+- abacusai.edit_image_models
- abacusai.embedding_feature_drift_distribution
+- abacusai.entri_auth_token
- abacusai.execute_feature_group_operation
- abacusai.external_application
- abacusai.external_invite
@@ -163,7 +184,6 @@
- abacusai.feature_importance
- abacusai.feature_mapping
- abacusai.feature_performance_analysis
-- abacusai.feature_record
- abacusai.file_connector
- abacusai.file_connector_instructions
- abacusai.file_connector_verification
@@ -171,20 +191,32 @@
- abacusai.forecasting_analysis_graph_data
- abacusai.forecasting_monitor_item_analysis
- abacusai.forecasting_monitor_summary
+- abacusai.fs_entry
- abacusai.function_logs
- abacusai.generated_pit_feature_config_option
- abacusai.global_context
- abacusai.graph_dashboard
- abacusai.holdout_analysis
- abacusai.holdout_analysis_version
+- abacusai.hosted_app
+- abacusai.hosted_app_container
+- abacusai.hosted_app_file_read
+- abacusai.hosted_artifact
+- abacusai.hosted_database
+- abacusai.hosted_database_snapshot
- abacusai.hosted_model_token
+- abacusai.hostname_info
- abacusai.hume_voice
+- abacusai.image_gen_model
+- abacusai.image_gen_model_options
- abacusai.image_gen_settings
- abacusai.indexing_config
- abacusai.inferred_database_column_to_feature_mappings
- abacusai.inferred_feature_mappings
- abacusai.item_statistics
+- abacusai.lip_sync_gen_settings
- abacusai.llm_app
+- abacusai.llm_artifact
- abacusai.llm_code_block
- abacusai.llm_execution_preview
- abacusai.llm_execution_result
@@ -192,6 +224,10 @@
- abacusai.llm_input
- abacusai.llm_parameters
- abacusai.llm_response
+- abacusai.mcp_config
+- abacusai.mcp_server
+- abacusai.mcp_server_connection
+- abacusai.mcp_server_query_result
- abacusai.memory_options
- abacusai.messaging_connector_response
- abacusai.model
@@ -246,6 +282,8 @@
- abacusai.prediction_log_record
- abacusai.prediction_operator
- abacusai.prediction_operator_version
+- abacusai.presentation_export_result
+- abacusai.private_web_app_deployment
- abacusai.problem_type
- abacusai.project
- abacusai.project_config
@@ -262,33 +300,51 @@
- abacusai.refresh_policy
- abacusai.refresh_schedule
- abacusai.regenerate_llm_external_application
+- abacusai.regenerate_llm_option
- abacusai.resolved_feature_group_template
- abacusai.return_class
- abacusai.routing_action
- abacusai.schema
+- abacusai.session_summary
+- abacusai.session_transcript
+- abacusai.session_transcripts
- abacusai.sftp_key
- abacusai.streaming_auth_token
- abacusai.streaming_client
- abacusai.streaming_connector
- abacusai.streaming_row_count
- abacusai.streaming_sample_code
+- abacusai.sts_gen_settings
+- abacusai.stt_gen_model
+- abacusai.stt_gen_model_options
+- abacusai.stt_gen_settings
- abacusai.template_node_details
- abacusai.test_point_predictions
- abacusai.tone_details
- abacusai.training_config_options
+- abacusai.tts_gen_settings
- abacusai.twitter_search_result
+- abacusai.unified_connector
- abacusai.upload
- abacusai.upload_part
- abacusai.use_case
- abacusai.use_case_requirements
- abacusai.user
- abacusai.user_exception
+- abacusai.user_group_object_permission
+- abacusai.video_gen_costs
+- abacusai.video_gen_model
+- abacusai.video_gen_model_options
- abacusai.video_gen_settings
- abacusai.video_search_result
- abacusai.voice_gen_details
+- abacusai.web_app_conversation
+- abacusai.web_app_deployment_permission_dict
+- abacusai.web_app_domain
- abacusai.web_page_response
- abacusai.web_search_response
- abacusai.web_search_result
+- abacusai.web_service_trigger_run
- abacusai.webhook
- abacusai.workflow_graph_node_details
- abacusai.workflow_node_template
@@ -363,7 +419,7 @@ ClassesPrediction arguments for the FEATURE_STORE problem type |
_PredictionArgumentsFactory
|
-Helper class that provides a standard way to create an ABC using |
+ |
@@ -377,12 +433,12 @@ Module Contents
-
-kwargs: dict
+kwargs: dict
@@ -405,25 +461,25 @@ Module Contents
-
-forced_assignments: dict = None
+forced_assignments: dict = None
-
-solve_time_limit_seconds: float = None
+solve_time_limit_seconds: float = None
-
-include_all_assignments: bool = None
+include_all_assignments: bool = None
@@ -441,25 +497,25 @@ Module Contents
-
-start_timestamp: str = None
+start_timestamp: str = None
-
-end_timestamp: str = None
+end_timestamp: str = None
-
-get_all_item_data: bool = None
+get_all_item_data: bool = None
@@ -477,43 +533,43 @@ Module Contents
-
-llm_name: str = None
+llm_name: str = None
-
-num_completion_tokens: int = None
+num_completion_tokens: int = None
-
-system_message: str = None
+system_message: str = None
-
-temperature: float = None
+temperature: float = None
-
-search_score_cutoff: float = None
+search_score_cutoff: float = None
-
-ignore_documents: bool = None
+ignore_documents: bool = None
@@ -531,19 +587,19 @@ Module Contents
-
-explain_predictions: bool = None
+explain_predictions: bool = None
-
-explainer_type: str = None
+explainer_type: str = None
@@ -561,37 +617,37 @@ Module Contents
-
-num_predictions: int = None
+num_predictions: int = None
-
-prediction_start: str = None
+prediction_start: str = None
-
-explain_predictions: bool = None
+explain_predictions: bool = None
-
-explainer_type: str = None
+explainer_type: str = None
-
-get_item_data: bool = None
+get_item_data: bool = None
@@ -609,37 +665,37 @@ Module Contents
-
-num_predictions: int = None
+num_predictions: int = None
-
-prediction_start: str = None
+prediction_start: str = None
-
-explain_predictions: bool = None
+explain_predictions: bool = None
-
-explainer_type: str = None
+explainer_type: str = None
-
-get_item_data: bool = None
+get_item_data: bool = None
@@ -657,43 +713,43 @@ Module Contents
-
-llm_name: str = None
+llm_name: str = None
-
-num_completion_tokens: int = None
+num_completion_tokens: int = None
-
-system_message: str = None
+system_message: str = None
-
-temperature: float = None
+temperature: float = None
-
-search_score_cutoff: float = None
+search_score_cutoff: float = None
-
-ignore_documents: bool = None
+ignore_documents: bool = None
@@ -710,12 +766,12 @@ Module Contents
-
-limit_results: int = None
+limit_results: int = None
@@ -729,8 +785,6 @@
-
-chunk_size: int = None
+chunk_size: int = None
-
-chunk_overlap_fraction: float = None
+chunk_overlap_fraction: float = None
@@ -391,37 +447,37 @@ Module Contents
-
-score_multiplier_column: str = None
+score_multiplier_column: str = None
-
-prune_vectors: bool = None
+prune_vectors: bool = None
-
-index_metadata_columns: bool = None
+index_metadata_columns: bool = None
-
-use_document_summary: bool = None
+use_document_summary: bool = None
-
-summary_instructions: str = None
+summary_instructions: str = None
-
-standalone_deployment: bool = False
+standalone_deployment: bool = False
diff --git a/docs/autoapi/abacusai/api_class/enums/index.html b/docs/autoapi/abacusai/api_class/enums/index.html
index fb82fe585..fa9288e23 100644
--- a/docs/autoapi/abacusai/api_class/enums/index.html
+++ b/docs/autoapi/abacusai/api_class/enums/index.html
@@ -76,6 +76,8 @@
abacusai.app_user_group_sign_in_token
abacusai.application_connector
abacusai.audio_gen_settings
+abacusai.audio_url_result
+abacusai.audit_log
abacusai.batch_prediction
abacusai.batch_prediction_version
abacusai.batch_prediction_version_logs
@@ -84,25 +86,38 @@
abacusai.chat_message
abacusai.chat_session
abacusai.chatllm_computer
+abacusai.chatllm_computer_status
+abacusai.chatllm_memory
+abacusai.chatllm_project
+abacusai.chatllm_project_permissions
abacusai.chatllm_referral_invite
abacusai.chatllm_task
abacusai.client
abacusai.code_agent_response
+abacusai.code_autocomplete_edit_prediction_response
abacusai.code_autocomplete_response
abacusai.code_bot
abacusai.code_edit
abacusai.code_edit_response
abacusai.code_edits
+abacusai.code_embeddings
+abacusai.code_llm_changed_files
abacusai.code_source
+abacusai.code_suggestion_validation_response
+abacusai.code_summary_response
+abacusai.codellm_embedding_constants
abacusai.compute_point_info
abacusai.concatenation_config
+abacusai.constants_autocomplete_response
abacusai.cpu_gpu_memory_specs
abacusai.cryptography
abacusai.custom_chat_instructions
+abacusai.custom_domain
abacusai.custom_loss_function
abacusai.custom_metric
abacusai.custom_metric_version
abacusai.custom_train_function_info
+abacusai.daemon_task_conversation
abacusai.data_consistency_duplication
abacusai.data_metrics
abacusai.data_prep_logs
@@ -116,6 +131,7 @@
abacusai.dataset_column
abacusai.dataset_version
abacusai.dataset_version_logs
+abacusai.default_llm
abacusai.deployment
abacusai.deployment_auth_token
abacusai.deployment_conversation
@@ -126,6 +142,9 @@
abacusai.document_retriever
abacusai.document_retriever_lookup_result
abacusai.document_retriever_version
+abacusai.document_retriever_version_logs
+abacusai.domain_purchase_result
+abacusai.domain_search_result
abacusai.drift_distribution
abacusai.drift_distributions
abacusai.eda
@@ -136,7 +155,9 @@
abacusai.eda_feature_collinearity
abacusai.eda_forecasting_analysis
abacusai.eda_version
+abacusai.edit_image_models
abacusai.embedding_feature_drift_distribution
+abacusai.entri_auth_token
abacusai.execute_feature_group_operation
abacusai.external_application
abacusai.external_invite
@@ -163,7 +184,6 @@
abacusai.feature_importance
abacusai.feature_mapping
abacusai.feature_performance_analysis
-abacusai.feature_record
abacusai.file_connector
abacusai.file_connector_instructions
abacusai.file_connector_verification
@@ -171,20 +191,32 @@
abacusai.forecasting_analysis_graph_data
abacusai.forecasting_monitor_item_analysis
abacusai.forecasting_monitor_summary
+abacusai.fs_entry
abacusai.function_logs
abacusai.generated_pit_feature_config_option
abacusai.global_context
abacusai.graph_dashboard
abacusai.holdout_analysis
abacusai.holdout_analysis_version
+abacusai.hosted_app
+abacusai.hosted_app_container
+abacusai.hosted_app_file_read
+abacusai.hosted_artifact
+abacusai.hosted_database
+abacusai.hosted_database_snapshot
abacusai.hosted_model_token
+abacusai.hostname_info
abacusai.hume_voice
+abacusai.image_gen_model
+abacusai.image_gen_model_options
abacusai.image_gen_settings
abacusai.indexing_config
abacusai.inferred_database_column_to_feature_mappings
abacusai.inferred_feature_mappings
abacusai.item_statistics
+abacusai.lip_sync_gen_settings
abacusai.llm_app
+abacusai.llm_artifact
abacusai.llm_code_block
abacusai.llm_execution_preview
abacusai.llm_execution_result
@@ -192,6 +224,10 @@
abacusai.llm_input
abacusai.llm_parameters
abacusai.llm_response
+abacusai.mcp_config
+abacusai.mcp_server
+abacusai.mcp_server_connection
+abacusai.mcp_server_query_result
abacusai.memory_options
abacusai.messaging_connector_response
abacusai.model
@@ -246,6 +282,8 @@
abacusai.prediction_log_record
abacusai.prediction_operator
abacusai.prediction_operator_version
+abacusai.presentation_export_result
+abacusai.private_web_app_deployment
abacusai.problem_type
abacusai.project
abacusai.project_config
@@ -262,33 +300,51 @@
abacusai.refresh_policy
abacusai.refresh_schedule
abacusai.regenerate_llm_external_application
+abacusai.regenerate_llm_option
abacusai.resolved_feature_group_template
abacusai.return_class
abacusai.routing_action
abacusai.schema
+abacusai.session_summary
+abacusai.session_transcript
+abacusai.session_transcripts
abacusai.sftp_key
abacusai.streaming_auth_token
abacusai.streaming_client
abacusai.streaming_connector
abacusai.streaming_row_count
abacusai.streaming_sample_code
+abacusai.sts_gen_settings
+abacusai.stt_gen_model
+abacusai.stt_gen_model_options
+abacusai.stt_gen_settings
abacusai.template_node_details
abacusai.test_point_predictions
abacusai.tone_details
abacusai.training_config_options
+abacusai.tts_gen_settings
abacusai.twitter_search_result
+abacusai.unified_connector
abacusai.upload
abacusai.upload_part
abacusai.use_case
abacusai.use_case_requirements
abacusai.user
abacusai.user_exception
+abacusai.user_group_object_permission
+abacusai.video_gen_costs
+abacusai.video_gen_model
+abacusai.video_gen_model_options
abacusai.video_gen_settings
abacusai.video_search_result
abacusai.voice_gen_details
+abacusai.web_app_conversation
+abacusai.web_app_deployment_permission_dict
+abacusai.web_app_domain
abacusai.web_page_response
abacusai.web_search_response
abacusai.web_search_result
+abacusai.web_service_trigger_run
abacusai.webhook
abacusai.workflow_graph_node_details
abacusai.workflow_node_template
@@ -533,6 +589,12 @@ Classes
@@ -377,19 +433,19 @@ Module Contents