tasks
__all__ = ['ActionsSubtask', 'OutputSchemaValidationSubtask', 'AssistantTask', 'AudioTranscriptionTask', 'BaseAudioGenerationTask', 'BaseImageGenerationTask', 'BaseTask', 'BaseSubtask', 'BaseTextInputTask', 'BranchTask', 'CodeExecutionTask', 'ExtractionTask', 'InpaintingImageGenerationTask', 'OutpaintingImageGenerationTask', 'PromptImageGenerationTask', 'PromptTask', 'RagTask', 'StructureRunTask', 'TextSummaryTask', 'TextToSpeechTask', 'ToolTask', 'ToolkitTask', 'VariationImageGenerationTask']
module-attribute
Bases:
BaseSubtask[Union[ListArtifact, ErrorArtifact]]
Source Code in griptape/tasks/actions_subtask.py
@define class ActionsSubtask(BaseSubtask[Union[ListArtifact, ErrorArtifact]]): THOUGHT_PATTERN = r"(?s)^Thought:\s*(.*?)$" ACTIONS_PATTERN = r"(?s)Actions:[^\[]*(\[.*\])" ANSWER_PATTERN = r"(?s)^Answer:\s?([\s\S]*)$" RESPONSE_STOP_SEQUENCE = "<|Response|>" thought: Optional[str] = field(default=None, kw_only=True) actions: list[ToolAction] = field(factory=list, kw_only=True) output: Optional[BaseArtifact] = field(default=None, init=False) generate_assistant_subtask_template: Callable[[ActionsSubtask], str] = field( default=Factory(lambda self: self.default_generate_assistant_subtask_template, takes_self=True), kw_only=True, ) generate_user_subtask_template: Callable[[ActionsSubtask], str] = field( default=Factory(lambda self: self.default_generate_user_subtask_template, takes_self=True), kw_only=True, ) response_stop_sequence: str = field(default=RESPONSE_STOP_SEQUENCE, kw_only=True) _input: Union[str, list, tuple, BaseArtifact, Callable[[BaseTask], BaseArtifact]] = field( default=lambda task: task.full_context["args"][0] if task.full_context["args"] else TextArtifact(value=""), alias="input", ) _memory: Optional[TaskMemory] = None _origin_task: Optional[BaseTask] = field(default=None, kw_only=True) @property def input(self) -> TextArtifact | AudioArtifact | ListArtifact: return self._process_task_input(self._input) @input.setter def input(self, value: str | list | tuple | BaseArtifact | Callable[[BaseTask], BaseArtifact]) -> None: self._input = value def attach_to(self, parent_task: BaseTask) -> None: super().attach_to(parent_task) self.structure = parent_task.structure task_input = self.input try: if isinstance(task_input, TextArtifact) and task_input.meta.get("is_react_prompt", False): self.__init_from_prompt(task_input.to_text()) else: self.__init_from_artifact(task_input) # If StructuredOutputTool was used, treat the input to it as the output of the subtask. structured_outputs = [a for a in self.actions if isinstance(a.tool, StructuredOutputTool)] if structured_outputs: output_values = [JsonArtifact(a.input["values"]) for a in structured_outputs] if len(structured_outputs) > 1: self.output = ListArtifact(output_values) else: self.output = output_values[0] except Exception as e: logger.error("Subtask %s\nError parsing tool action: %s", self.origin_task.id, e) self.output = ErrorArtifact(f"ToolAction input parsing error: {e}", exception=e) def before_run(self) -> None: EventBus.publish_event( StartActionsSubtaskEvent( task_id=self.id, task_parent_ids=self.parent_ids, task_child_ids=self.child_ids, task_input=self.input, task_output=self.output, subtask_parent_task_id=self.origin_task.id, subtask_thought=self.thought, subtask_actions=self.actions_to_dicts(), ), ) parts = [ f"{self.__class__.__name__} {self.id}", *([f"\nThought: {self.thought}"] if self.thought else []), f"\nActions: {self.actions_to_json()}", ] logger.info("".join(parts)) def try_run(self) -> ListArtifact | ErrorArtifact: try: if any(isinstance(a.output, ErrorArtifact) for a in self.actions): errors = [a.output.value for a in self.actions if isinstance(a.output, ErrorArtifact)] self.output = ErrorArtifact("\n\n".join(errors)) else: results = self.run_actions(self.actions) actions_output = [] for result in results: tag, output = result output.name = f"{tag} output" actions_output.append(output) self.output = ListArtifact(actions_output) except Exception as e: logger.debug("Subtask %s\n%s", self.id, e) self.output = ErrorArtifact(str(e), exception=e) if self.output is not None: return self.output return ErrorArtifact("no tool output") def run_actions(self, actions: list[ToolAction]) -> list[tuple[str, BaseArtifact]]: with self.create_futures_executor() as futures_executor: return utils.execute_futures_list( [futures_executor.submit(with_contextvars(self.run_action), a) for a in actions] ) def run_action(self, action: ToolAction) -> tuple[str, BaseArtifact]: if action.tool is not None: if action.path is not None: output = action.tool.run(getattr(action.tool, action.path), self, action) else: output = ErrorArtifact("action path not found") else: output = ErrorArtifact("action name not found") action.output = output return action.tag, output def after_run(self) -> None: response = self.output.to_text() if isinstance(self.output, BaseArtifact) else str(self.output) EventBus.publish_event( FinishActionsSubtaskEvent( task_id=self.id, task_parent_ids=self.parent_ids, task_child_ids=self.child_ids, task_input=self.input, task_output=self.output, subtask_parent_task_id=self.origin_task.id, subtask_thought=self.thought, subtask_actions=self.actions_to_dicts(), ), ) logger.info("%s %s\nResponse: %s", self.__class__.__name__, self.id, response) def actions_to_dicts(self) -> list[dict]: json_list = [] for action in self.actions: json_dict = {} if action.tag: json_dict["tag"] = action.tag if action.name: json_dict["name"] = action.name if action.path: json_dict["path"] = action.path if action.input: json_dict["input"] = action.input json_list.append(json_dict) return json_list def actions_to_json(self) -> str: return json.dumps(self.actions_to_dicts(), indent=2) def add_to_prompt_stack(self, stack: PromptStack) -> None: from griptape.tasks import PromptTask if isinstance(self.origin_task, PromptTask) and self.origin_task.prompt_driver.use_native_tools: action_calls = [ ToolAction(name=action.name, path=action.path, tag=action.tag, input=action.input) for action in self.actions ] action_results = [ ToolAction( name=action.name, path=action.path, tag=action.tag, output=action.output if action.output is not None else self.output, ) for action in self.actions ] stack.add_assistant_message( ListArtifact( [ *([TextArtifact(self.thought)] if self.thought else []), *[ActionArtifact(a) for a in action_calls], ], ), ) stack.add_user_message( ListArtifact( [ *[ActionArtifact(a) for a in action_results], *([] if self.output else [TextArtifact("Please keep going")]), ], ), ) else: stack.add_assistant_message(self.generate_assistant_subtask_template(self)) stack.add_user_message(self.generate_user_subtask_template(self)) def default_generate_assistant_subtask_template(self, subtask: ActionsSubtask) -> str: return J2("tasks/prompt_task/assistant_actions_subtask.j2").render( stop_sequence=self.response_stop_sequence, subtask=subtask, ) def default_generate_user_subtask_template(self, subtask: ActionsSubtask) -> str: return J2("tasks/prompt_task/user_actions_subtask.j2").render( stop_sequence=self.response_stop_sequence, subtask=subtask, ) def _process_task_input( self, task_input: Union[str, tuple, list, BaseArtifact, Callable[[BaseTask], BaseArtifact]], ) -> Union[TextArtifact, AudioArtifact, ListArtifact]: if isinstance(task_input, (TextArtifact, AudioArtifact, ListArtifact)): return task_input if isinstance(task_input, ActionArtifact): return ListArtifact([task_input]) if isinstance(task_input, Callable): return self._process_task_input(task_input(self)) if isinstance(task_input, str): return self._process_task_input(TextArtifact(task_input)) if isinstance(task_input, (list, tuple)): return ListArtifact([self._process_task_input(elem) for elem in task_input]) raise ValueError(f"Invalid input type: {type(task_input)} ") def __init_from_prompt(self, value: str) -> None: thought_matches = re.findall(self.THOUGHT_PATTERN, value, re.MULTILINE) actions_matches = re.findall(self.ACTIONS_PATTERN, value, re.DOTALL) answer_matches = re.findall(self.ANSWER_PATTERN, value, re.MULTILINE) self.actions = self.__parse_actions(actions_matches) if thought_matches: self.thought = thought_matches[-1] if not self.actions and self.output is None: if answer_matches: # A direct answer is provided, set it as the output. self.output = TextArtifact(answer_matches[-1]) else: # The LLM failed to follow the ReAct prompt, set the LLM's raw response as the output. self.output = TextArtifact(value) def __init_from_artifact(self, artifact: TextArtifact | AudioArtifact | ListArtifact) -> None: """Parses the input Artifact to extract either a final answer or thought and actions. When the input Artifact is a TextArtifact, it is assumed to be the final answer. When the input Artifact is a ListArtifact, it is assumed to contain both thought and actions. Text Artifacts are parsed as the thought, and ToolAction Artifacts parsed as the actions. Args: artifact: The input Artifacts. Returns: None """ # When using native tools, we can assume that a TextArtifact or AudioArtifact is the LLM providing its final answer. if isinstance(artifact, (TextArtifact, AudioArtifact)): self.output = artifact return self.actions = [ self.__process_action_object(artifact.value.to_dict()) for artifact in artifact.value if isinstance(artifact, ActionArtifact) ] # When parsing from Artifacts we can't determine the thought unless there are also Actions if self.actions: thoughts = [artifact.value for artifact in artifact.value if isinstance(artifact, TextArtifact)] if thoughts: self.thought = thoughts[0] elif self.output is None: self.output = TextArtifact(artifact.to_text()) def __parse_actions(self, actions_matches: list[str]) -> list[ToolAction]: if len(actions_matches) == 0: return [] try: data = actions_matches[-1] actions_list: list[dict] = json.loads(data, strict=False) return [self.__process_action_object(action_object) for action_object in actions_list] except json.JSONDecodeError as e: logger.debug("Subtask %s\nInvalid actions JSON: %s", self.origin_task.id, e) self.output = ErrorArtifact(f"Actions JSON decoding error: {e}", exception=e) return [] def __process_action_object(self, action_object: dict) -> ToolAction: # Load action tag; throw exception if the key is not present action_tag = action_object["tag"] # Load action name; throw exception if the key is not present action_name = action_object["name"] # Load action method; throw exception if the key is not present action_path = action_object["path"] # Load optional input value; don't throw exceptions if key is not present if "input" in action_object: # Some LLMs don't support nested parameters and therefore won't generate "values". # So we need to manually add it here. if "values" not in action_object["input"]: action_object["input"] = {"values": action_object["input"]} # The schema library has a bug, where something like `Or(str, None)` doesn't get # correctly translated into JSON schema. For some optional input fields LLMs sometimes # still provide null value, which trips up the validator. The temporary solution that # works is to strip all key-values where value is null. action_input = remove_null_values_in_dict_recursively(action_object["input"]) else: action_input = {} # Load the action itself if isinstance(self.origin_task, ActionsSubtaskOriginMixin): tool = self.origin_task.find_tool(action_name) else: raise Exception("ActionSubtask must be attached to a Task that implements ActionSubtaskOriginMixin.") action = ToolAction(tag=action_tag, name=action_name, path=action_path, input=action_input, tool=tool) self.__validate_action(action) return action def __validate_action(self, action: ToolAction) -> None: if action.tool is None: return if action.path is None: raise Exception("ToolAction path not found.") activity = getattr(action.tool, action.path) if activity is None: raise Exception("Activity not found.") activity_schema = action.tool.activity_schema(activity) if activity_schema is None or action.input is None: return try: action.tool.validate_activity_schema(activity_schema, action.input) except ValueError as e: logger.debug("Subtask %s\nInvalid action JSON: %s", self.origin_task.id, e) action.output = ErrorArtifact(f"Activity input JSON validation error: {e}", exception=e)
ACTIONS_PATTERN = '(?s)Actions:[^\\[]*(\\[.*\\])'
class-attribute instance-attributeANSWER_PATTERN = '(?s)^Answer:\\s?([\\s\\S]*)$'
class-attribute instance-attributeRESPONSE_STOP_SEQUENCE = '<|Response|>'
class-attribute instance-attributeTHOUGHT_PATTERN = '(?s)^Thought:\\s*(.*?)$'
class-attribute instance-attribute_input = field(default=lambda task: task.full_context['args'][0] if task.full_context['args'] else TextArtifact(value=''), alias='input')
class-attribute instance-attribute_memory = None
class-attribute instance-attribute_origin_task = field(default=None, kw_only=True)
class-attribute instance-attributeactions = field(factory=list, kw_only=True)
class-attribute instance-attributegenerate_assistant_subtask_template = field(default=Factory(lambda self: self.default_generate_assistant_subtask_template, takes_self=True), kw_only=True)
class-attribute instance-attributegenerate_user_subtask_template = field(default=Factory(lambda self: self.default_generate_user_subtask_template, takes_self=True), kw_only=True)
class-attribute instance-attributeinput
property writableoutput = field(default=None, init=False)
class-attribute instance-attributeresponse_stop_sequence = field(default=RESPONSE_STOP_SEQUENCE, kw_only=True)
class-attribute instance-attributethought = field(default=None, kw_only=True)
class-attribute instance-attribute
__init_from_artifact(artifact)
Parses the input Artifact to extract either a final answer or thought and actions.
When the input Artifact is a TextArtifact, it is assumed to be the final answer. When the input Artifact is a ListArtifact, it is assumed to contain both thought and actions. Text Artifacts are parsed as the thought, and ToolAction Artifacts parsed as the actions.
Parameters
Name | Type | Description | Default |
---|---|---|---|
artifact | TextArtifact | AudioArtifact | ListArtifact | The input Artifacts. | required |
Returns
Type | Description |
---|---|
None | None |
Source Code in griptape/tasks/actions_subtask.py
def __init_from_artifact(self, artifact: TextArtifact | AudioArtifact | ListArtifact) -> None: """Parses the input Artifact to extract either a final answer or thought and actions. When the input Artifact is a TextArtifact, it is assumed to be the final answer. When the input Artifact is a ListArtifact, it is assumed to contain both thought and actions. Text Artifacts are parsed as the thought, and ToolAction Artifacts parsed as the actions. Args: artifact: The input Artifacts. Returns: None """ # When using native tools, we can assume that a TextArtifact or AudioArtifact is the LLM providing its final answer. if isinstance(artifact, (TextArtifact, AudioArtifact)): self.output = artifact return self.actions = [ self.__process_action_object(artifact.value.to_dict()) for artifact in artifact.value if isinstance(artifact, ActionArtifact) ] # When parsing from Artifacts we can't determine the thought unless there are also Actions if self.actions: thoughts = [artifact.value for artifact in artifact.value if isinstance(artifact, TextArtifact)] if thoughts: self.thought = thoughts[0] elif self.output is None: self.output = TextArtifact(artifact.to_text())
__init_from_prompt(value)
Source Code in griptape/tasks/actions_subtask.py
def __init_from_prompt(self, value: str) -> None: thought_matches = re.findall(self.THOUGHT_PATTERN, value, re.MULTILINE) actions_matches = re.findall(self.ACTIONS_PATTERN, value, re.DOTALL) answer_matches = re.findall(self.ANSWER_PATTERN, value, re.MULTILINE) self.actions = self.__parse_actions(actions_matches) if thought_matches: self.thought = thought_matches[-1] if not self.actions and self.output is None: if answer_matches: # A direct answer is provided, set it as the output. self.output = TextArtifact(answer_matches[-1]) else: # The LLM failed to follow the ReAct prompt, set the LLM's raw response as the output. self.output = TextArtifact(value)
__parse_actions(actions_matches)
Source Code in griptape/tasks/actions_subtask.py
def __parse_actions(self, actions_matches: list[str]) -> list[ToolAction]: if len(actions_matches) == 0: return [] try: data = actions_matches[-1] actions_list: list[dict] = json.loads(data, strict=False) return [self.__process_action_object(action_object) for action_object in actions_list] except json.JSONDecodeError as e: logger.debug("Subtask %s\nInvalid actions JSON: %s", self.origin_task.id, e) self.output = ErrorArtifact(f"Actions JSON decoding error: {e}", exception=e) return []
__process_action_object(action_object)
Source Code in griptape/tasks/actions_subtask.py
def __process_action_object(self, action_object: dict) -> ToolAction: # Load action tag; throw exception if the key is not present action_tag = action_object["tag"] # Load action name; throw exception if the key is not present action_name = action_object["name"] # Load action method; throw exception if the key is not present action_path = action_object["path"] # Load optional input value; don't throw exceptions if key is not present if "input" in action_object: # Some LLMs don't support nested parameters and therefore won't generate "values". # So we need to manually add it here. if "values" not in action_object["input"]: action_object["input"] = {"values": action_object["input"]} # The schema library has a bug, where something like `Or(str, None)` doesn't get # correctly translated into JSON schema. For some optional input fields LLMs sometimes # still provide null value, which trips up the validator. The temporary solution that # works is to strip all key-values where value is null. action_input = remove_null_values_in_dict_recursively(action_object["input"]) else: action_input = {} # Load the action itself if isinstance(self.origin_task, ActionsSubtaskOriginMixin): tool = self.origin_task.find_tool(action_name) else: raise Exception("ActionSubtask must be attached to a Task that implements ActionSubtaskOriginMixin.") action = ToolAction(tag=action_tag, name=action_name, path=action_path, input=action_input, tool=tool) self.__validate_action(action) return action
__validate_action(action)
Source Code in griptape/tasks/actions_subtask.py
def __validate_action(self, action: ToolAction) -> None: if action.tool is None: return if action.path is None: raise Exception("ToolAction path not found.") activity = getattr(action.tool, action.path) if activity is None: raise Exception("Activity not found.") activity_schema = action.tool.activity_schema(activity) if activity_schema is None or action.input is None: return try: action.tool.validate_activity_schema(activity_schema, action.input) except ValueError as e: logger.debug("Subtask %s\nInvalid action JSON: %s", self.origin_task.id, e) action.output = ErrorArtifact(f"Activity input JSON validation error: {e}", exception=e)
_process_task_input(task_input)
Source Code in griptape/tasks/actions_subtask.py
def _process_task_input( self, task_input: Union[str, tuple, list, BaseArtifact, Callable[[BaseTask], BaseArtifact]], ) -> Union[TextArtifact, AudioArtifact, ListArtifact]: if isinstance(task_input, (TextArtifact, AudioArtifact, ListArtifact)): return task_input if isinstance(task_input, ActionArtifact): return ListArtifact([task_input]) if isinstance(task_input, Callable): return self._process_task_input(task_input(self)) if isinstance(task_input, str): return self._process_task_input(TextArtifact(task_input)) if isinstance(task_input, (list, tuple)): return ListArtifact([self._process_task_input(elem) for elem in task_input]) raise ValueError(f"Invalid input type: {type(task_input)} ")
actions_to_dicts()
Source Code in griptape/tasks/actions_subtask.py
def actions_to_dicts(self) -> list[dict]: json_list = [] for action in self.actions: json_dict = {} if action.tag: json_dict["tag"] = action.tag if action.name: json_dict["name"] = action.name if action.path: json_dict["path"] = action.path if action.input: json_dict["input"] = action.input json_list.append(json_dict) return json_list
actions_to_json()
Source Code in griptape/tasks/actions_subtask.py
def actions_to_json(self) -> str: return json.dumps(self.actions_to_dicts(), indent=2)
add_to_prompt_stack(stack)
Source Code in griptape/tasks/actions_subtask.py
def add_to_prompt_stack(self, stack: PromptStack) -> None: from griptape.tasks import PromptTask if isinstance(self.origin_task, PromptTask) and self.origin_task.prompt_driver.use_native_tools: action_calls = [ ToolAction(name=action.name, path=action.path, tag=action.tag, input=action.input) for action in self.actions ] action_results = [ ToolAction( name=action.name, path=action.path, tag=action.tag, output=action.output if action.output is not None else self.output, ) for action in self.actions ] stack.add_assistant_message( ListArtifact( [ *([TextArtifact(self.thought)] if self.thought else []), *[ActionArtifact(a) for a in action_calls], ], ), ) stack.add_user_message( ListArtifact( [ *[ActionArtifact(a) for a in action_results], *([] if self.output else [TextArtifact("Please keep going")]), ], ), ) else: stack.add_assistant_message(self.generate_assistant_subtask_template(self)) stack.add_user_message(self.generate_user_subtask_template(self))
after_run()
Source Code in griptape/tasks/actions_subtask.py
def after_run(self) -> None: response = self.output.to_text() if isinstance(self.output, BaseArtifact) else str(self.output) EventBus.publish_event( FinishActionsSubtaskEvent( task_id=self.id, task_parent_ids=self.parent_ids, task_child_ids=self.child_ids, task_input=self.input, task_output=self.output, subtask_parent_task_id=self.origin_task.id, subtask_thought=self.thought, subtask_actions=self.actions_to_dicts(), ), ) logger.info("%s %s\nResponse: %s", self.__class__.__name__, self.id, response)
attach_to(parent_task)
Source Code in griptape/tasks/actions_subtask.py
def attach_to(self, parent_task: BaseTask) -> None: super().attach_to(parent_task) self.structure = parent_task.structure task_input = self.input try: if isinstance(task_input, TextArtifact) and task_input.meta.get("is_react_prompt", False): self.__init_from_prompt(task_input.to_text()) else: self.__init_from_artifact(task_input) # If StructuredOutputTool was used, treat the input to it as the output of the subtask. structured_outputs = [a for a in self.actions if isinstance(a.tool, StructuredOutputTool)] if structured_outputs: output_values = [JsonArtifact(a.input["values"]) for a in structured_outputs] if len(structured_outputs) > 1: self.output = ListArtifact(output_values) else: self.output = output_values[0] except Exception as e: logger.error("Subtask %s\nError parsing tool action: %s", self.origin_task.id, e) self.output = ErrorArtifact(f"ToolAction input parsing error: {e}", exception=e)
before_run()
Source Code in griptape/tasks/actions_subtask.py
def before_run(self) -> None: EventBus.publish_event( StartActionsSubtaskEvent( task_id=self.id, task_parent_ids=self.parent_ids, task_child_ids=self.child_ids, task_input=self.input, task_output=self.output, subtask_parent_task_id=self.origin_task.id, subtask_thought=self.thought, subtask_actions=self.actions_to_dicts(), ), ) parts = [ f"{self.__class__.__name__} {self.id}", *([f"\nThought: {self.thought}"] if self.thought else []), f"\nActions: {self.actions_to_json()}", ] logger.info("".join(parts))
default_generate_assistant_subtask_template(subtask)
Source Code in griptape/tasks/actions_subtask.py
def default_generate_assistant_subtask_template(self, subtask: ActionsSubtask) -> str: return J2("tasks/prompt_task/assistant_actions_subtask.j2").render( stop_sequence=self.response_stop_sequence, subtask=subtask, )
default_generate_user_subtask_template(subtask)
Source Code in griptape/tasks/actions_subtask.py
def default_generate_user_subtask_template(self, subtask: ActionsSubtask) -> str: return J2("tasks/prompt_task/user_actions_subtask.j2").render( stop_sequence=self.response_stop_sequence, subtask=subtask, )
run_action(action)
Source Code in griptape/tasks/actions_subtask.py
def run_action(self, action: ToolAction) -> tuple[str, BaseArtifact]: if action.tool is not None: if action.path is not None: output = action.tool.run(getattr(action.tool, action.path), self, action) else: output = ErrorArtifact("action path not found") else: output = ErrorArtifact("action name not found") action.output = output return action.tag, output
run_actions(actions)
Source Code in griptape/tasks/actions_subtask.py
def run_actions(self, actions: list[ToolAction]) -> list[tuple[str, BaseArtifact]]: with self.create_futures_executor() as futures_executor: return utils.execute_futures_list( [futures_executor.submit(with_contextvars(self.run_action), a) for a in actions] )
try_run()
Source Code in griptape/tasks/actions_subtask.py
def try_run(self) -> ListArtifact | ErrorArtifact: try: if any(isinstance(a.output, ErrorArtifact) for a in self.actions): errors = [a.output.value for a in self.actions if isinstance(a.output, ErrorArtifact)] self.output = ErrorArtifact("\n\n".join(errors)) else: results = self.run_actions(self.actions) actions_output = [] for result in results: tag, output = result output.name = f"{tag} output" actions_output.append(output) self.output = ListArtifact(actions_output) except Exception as e: logger.debug("Subtask %s\n%s", self.id, e) self.output = ErrorArtifact(str(e), exception=e) if self.output is not None: return self.output return ErrorArtifact("no tool output")
AssistantTask
Bases:
BaseTextInputTask[TextArtifact]
Attributes
Name | Type | Description |
---|---|---|
assistant_driver | BaseAssistantDriver | Driver to run the Assistant. |
Source Code in griptape/tasks/assistant_task.py
@define class AssistantTask(BaseTextInputTask[TextArtifact]): """Task to run an Assistant. Attributes: assistant_driver: Driver to run the Assistant. """ assistant_driver: BaseAssistantDriver = field(kw_only=True) def try_run(self) -> TextArtifact: return self.assistant_driver.run(self.input)
assistant_driver = field(kw_only=True)
class-attribute instance-attribute
try_run()
Source Code in griptape/tasks/assistant_task.py
def try_run(self) -> TextArtifact: return self.assistant_driver.run(self.input)
AudioTranscriptionTask
Bases:
BaseAudioInputTask[TextArtifact]
Source Code in griptape/tasks/audio_transcription_task.py
@define class AudioTranscriptionTask(BaseAudioInputTask[TextArtifact]): audio_transcription_driver: BaseAudioTranscriptionDriver = field( default=Factory(lambda: Defaults.drivers_config.audio_transcription_driver), kw_only=True, ) def try_run(self) -> TextArtifact: return self.audio_transcription_driver.run(self.input)
audio_transcription_driver = field(default=Factory(lambda: Defaults.drivers_config.audio_transcription_driver), kw_only=True)
class-attribute instance-attribute
try_run()
Source Code in griptape/tasks/audio_transcription_task.py
def try_run(self) -> TextArtifact: return self.audio_transcription_driver.run(self.input)
BaseAudioGenerationTask
Bases:
ArtifactFileOutputMixin
, RuleMixin
, BaseTask[AudioArtifact]
, ABC
Source Code in griptape/tasks/base_audio_generation_task.py
@define class BaseAudioGenerationTask(ArtifactFileOutputMixin, RuleMixin, BaseTask[AudioArtifact], ABC): def before_run(self) -> None: super().before_run() logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, self.input.to_text()) def after_run(self) -> None: super().after_run() logger.info( "%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text() if self.output is not None else "", )
after_run()
Source Code in griptape/tasks/base_audio_generation_task.py
def after_run(self) -> None: super().after_run() logger.info( "%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text() if self.output is not None else "", )
before_run()
Source Code in griptape/tasks/base_audio_generation_task.py
def before_run(self) -> None: super().before_run() logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, self.input.to_text())
BaseImageGenerationTask
Bases:
ArtifactFileOutputMixin
, RuleMixin
, BaseTask[ImageArtifact]
, ABC
Attributes
Name | Type | Description |
---|---|---|
negative_rulesets | list[Ruleset] | List of negatively-weighted rulesets applied to the text prompt, if supported by the driver. |
negative_rules | list[Rule] | List of negatively-weighted rules applied to the text prompt, if supported by the driver. |
output_dir | Optional[str] | If provided, the generated image will be written to disk in output_dir. |
output_file | Optional[str] | If provided, the generated image will be written to disk as output_file. |
Source Code in griptape/tasks/base_image_generation_task.py
@define class BaseImageGenerationTask(ArtifactFileOutputMixin, RuleMixin, BaseTask[ImageArtifact], ABC): """Provides a base class for image generation-related tasks. Attributes: negative_rulesets: List of negatively-weighted rulesets applied to the text prompt, if supported by the driver. negative_rules: List of negatively-weighted rules applied to the text prompt, if supported by the driver. output_dir: If provided, the generated image will be written to disk in output_dir. output_file: If provided, the generated image will be written to disk as output_file. """ DEFAULT_NEGATIVE_RULESET_NAME = "Negative Ruleset" image_generation_driver: BaseImageGenerationDriver = field( default=Factory(lambda: Defaults.drivers_config.image_generation_driver), kw_only=True, ) _negative_rulesets: list[Ruleset] = field(factory=list, kw_only=True, alias="negative_rulesets") negative_rules: list[Rule] = field(factory=list, kw_only=True) @property def negative_rulesets(self) -> list[Ruleset]: negative_rulesets = self._negative_rulesets if self.negative_rules: negative_rulesets.append(Ruleset(name=self.DEFAULT_NEGATIVE_RULESET_NAME, rules=self.negative_rules)) return negative_rulesets def _read_from_file(self, path: str) -> ImageArtifact: logger.info("Reading image from %s", os.path.abspath(path)) return ImageLoader().load(Path(path)) def _get_prompts(self, prompt: str) -> list[str]: return [prompt, *[rule.value for ruleset in self.rulesets for rule in ruleset.rules]] def _get_negative_prompts(self) -> list[str]: return [rule.value for ruleset in self.negative_rulesets for rule in ruleset.rules]
DEFAULT_NEGATIVE_RULESET_NAME = 'Negative Ruleset'
class-attribute instance-attribute_negative_rulesets = field(factory=list, kw_only=True, alias='negative_rulesets')
class-attribute instance-attributeimage_generation_driver = field(default=Factory(lambda: Defaults.drivers_config.image_generation_driver), kw_only=True)
class-attribute instance-attributenegative_rules = field(factory=list, kw_only=True)
class-attribute instance-attributenegative_rulesets
property
_get_negative_prompts()
Source Code in griptape/tasks/base_image_generation_task.py
def _get_negative_prompts(self) -> list[str]: return [rule.value for ruleset in self.negative_rulesets for rule in ruleset.rules]
_get_prompts(prompt)
Source Code in griptape/tasks/base_image_generation_task.py
def _get_prompts(self, prompt: str) -> list[str]: return [prompt, *[rule.value for ruleset in self.rulesets for rule in ruleset.rules]]
_read_from_file(path)
Source Code in griptape/tasks/base_image_generation_task.py
def _read_from_file(self, path: str) -> ImageArtifact: logger.info("Reading image from %s", os.path.abspath(path)) return ImageLoader().load(Path(path))
BaseSubtask
Bases:
BaseTask[T]
Source Code in griptape/tasks/base_subtask.py
@define class BaseSubtask(BaseTask[T]): @property def origin_task(self) -> BaseTask: if self._origin_task is not None: return self._origin_task raise Exception("ActionSubtask has no origin task.") @property def parents(self) -> list[BaseTask]: if isinstance(self.origin_task, ActionsSubtaskOriginMixin): return [self.origin_task.find_subtask(parent_id) for parent_id in self.parent_ids] raise Exception("ActionSubtask must be attached to a Task that implements ActionSubtaskOriginMixin.") @property def children(self) -> list[BaseTask]: if isinstance(self.origin_task, ActionsSubtaskOriginMixin): return [self.origin_task.find_subtask(child_id) for child_id in self.child_ids] raise Exception("ActionSubtask must be attached to a Task that implements ActionSubtaskOriginMixin.") def add_child(self, child: BaseTask) -> BaseTask: if child.id not in self.child_ids: self.child_ids.append(child.id) return child def add_parent(self, parent: BaseTask) -> BaseTask: if parent.id not in self.parent_ids: self.parent_ids.append(parent.id) return parent def attach_to(self, parent_task: BaseTask) -> None: self._origin_task = parent_task @abstractmethod def add_to_prompt_stack(self, stack: PromptStack) -> None: ...
children
propertyorigin_task
propertyparents
property
add_child(child)
Source Code in griptape/tasks/base_subtask.py
def add_child(self, child: BaseTask) -> BaseTask: if child.id not in self.child_ids: self.child_ids.append(child.id) return child
add_parent(parent)
Source Code in griptape/tasks/base_subtask.py
def add_parent(self, parent: BaseTask) -> BaseTask: if parent.id not in self.parent_ids: self.parent_ids.append(parent.id) return parent
add_to_prompt_stack(stack)abstractmethod
Source Code in griptape/tasks/base_subtask.py
@abstractmethod def add_to_prompt_stack(self, stack: PromptStack) -> None: ...
attach_to(parent_task)
Source Code in griptape/tasks/base_subtask.py
def attach_to(self, parent_task: BaseTask) -> None: self._origin_task = parent_task
BaseTask
Bases:
FuturesExecutorMixin
, SerializableMixin
, RunnableMixin['BaseTask']
, ABC, Generic[T]
Source Code in griptape/tasks/base_task.py
@define class BaseTask(FuturesExecutorMixin, SerializableMixin, RunnableMixin["BaseTask"], ABC, Generic[T]): class State(Enum): PENDING = 1 RUNNING = 2 FINISHED = 3 SKIPPED = 4 id: str = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True, metadata={"serializable": True}) state: State = field(default=State.PENDING, kw_only=True, metadata={"serializable": True}) parent_ids: list[str] = field(factory=list, kw_only=True, metadata={"serializable": True}) child_ids: list[str] = field(factory=list, kw_only=True, metadata={"serializable": True}) max_meta_memory_entries: Optional[int] = field(default=20, kw_only=True, metadata={"serializable": True}) structure: Optional[Structure] = field(default=None, kw_only=True) output: Optional[T] = field(default=None, init=False) context: dict[str, Any] = field(factory=dict, kw_only=True, metadata={"serializable": True}) _execution_args: tuple = field(factory=tuple, init=False) @property def execution_args(self) -> tuple: return self._execution_args def __rshift__(self, other: BaseTask | list[BaseTask]) -> BaseTask | list[BaseTask]: if isinstance(other, list): self.add_children(other) else: self.add_child(other) return other def __lshift__(self, other: BaseTask | list[BaseTask]) -> BaseTask | list[BaseTask]: if isinstance(other, list): self.add_parents(other) else: self.add_parent(other) return other def __attrs_post_init__(self) -> None: if self.structure is not None: self.structure.add_task(self) @property @abstractmethod def input(self) -> BaseArtifact: ... @property def parents(self) -> list[BaseTask]: if self.structure is not None: return [self.structure.find_task(parent_id) for parent_id in self.parent_ids] raise ValueError("Structure must be set to access parents") @property def children(self) -> list[BaseTask]: if self.structure is not None: return [self.structure.find_task(child_id) for child_id in self.child_ids] raise ValueError("Structure must be set to access children") @property def parent_outputs(self) -> dict[str, BaseArtifact]: return {parent.id: parent.output for parent in self.parents if parent.output} @property def parents_output_text(self) -> str: return "\n".join([parent.output.to_text() for parent in self.parents if parent.output]) @property def meta_memories(self) -> list[BaseMetaEntry]: if self.structure is not None and self.structure.meta_memory: if self.max_meta_memory_entries: return self.structure.meta_memory.entries[: self.max_meta_memory_entries] return self.structure.meta_memory.entries return [] def __str__(self) -> str: return str(self.output.value) if self.output is not None else "" def add_parents(self, parents: list[BaseTask]) -> None: for parent in parents: self.add_parent(parent) def add_parent(self, parent: BaseTask) -> BaseTask: if parent.id not in self.parent_ids: self.parent_ids.append(parent.id) if self.id not in parent.child_ids: parent.child_ids.append(self.id) if self.structure is not None and parent not in self.structure.tasks: self.structure.add_task(parent) return self def add_children(self, children: list[BaseTask]) -> None: for child in children: self.add_child(child) def add_child(self, child: BaseTask) -> BaseTask: if child.id not in self.child_ids: self.child_ids.append(child.id) if self.id not in child.parent_ids: child.parent_ids.append(self.id) if self.structure is not None and child not in self.structure.tasks: self.structure.add_task(child) return self def preprocess(self, structure: Structure) -> BaseTask: self.structure = structure return self def is_pending(self) -> bool: return self.state == BaseTask.State.PENDING def is_finished(self) -> bool: return self.state == BaseTask.State.FINISHED def is_running(self) -> bool: return self.state == BaseTask.State.RUNNING def is_skipped(self) -> bool: return self.state == BaseTask.State.SKIPPED def before_run(self) -> None: super().before_run() if self.structure is not None: EventBus.publish_event( StartTaskEvent( task_id=self.id, task_parent_ids=self.parent_ids, task_child_ids=self.child_ids, task_input=self.input, task_output=self.output, ), ) def run(self, *args) -> T: try: self._execution_args = args self.state = BaseTask.State.RUNNING self.before_run() self.output = self.try_run() self.after_run() except Exception as e: logger.exception("%s %s\n%s", self.__class__.__name__, self.id, e) self.output = cast("T", ErrorArtifact(str(e), exception=e)) finally: self.state = BaseTask.State.FINISHED return self.output def after_run(self) -> None: super().after_run() if self.structure is not None: EventBus.publish_event( FinishTaskEvent( task_id=self.id, task_parent_ids=self.parent_ids, task_child_ids=self.child_ids, task_input=self.input, task_output=self.output, ), ) def can_run(self) -> bool: # If this Task has been skipped or is not pending, it should not run if self.is_skipped() or not self.is_pending(): return False # If this Task has parents, and _all_ of them are skipped, it should not run if self.parents and all(parent.is_skipped() for parent in self.parents): self.state = BaseTask.State.SKIPPED return False # If _all_ this Task's unskipped parents are finished, it should run unskipped_parents = [parent for parent in self.parents if not parent.is_skipped()] return all(parent.is_finished() for parent in unskipped_parents) def reset(self) -> BaseTask: self.state = BaseTask.State.PENDING self.output = None self._execution_args = () return self @abstractmethod def try_run(self) -> T: ... @property def full_context(self) -> dict[str, Any]: # Need to deep copy so that the serialized context doesn't contain non-serializable data context = deepcopy(self.context) if self.structure is None: context.update({"args": self._execution_args}) else: context.update(self.structure.context(self)) return context
_execution_args = field(factory=tuple, init=False)
class-attribute instance-attributechild_ids = field(factory=list, kw_only=True, metadata={'serializable': True})
class-attribute instance-attributechildren
propertycontext = field(factory=dict, kw_only=True, metadata={'serializable': True})
class-attribute instance-attributeexecution_args
propertyfull_context
propertyid = field(default=Factory(lambda: uuid.uuid4().hex), kw_only=True, metadata={'serializable': True})
class-attribute instance-attributeinput
abstractmethod propertymax_meta_memory_entries = field(default=20, kw_only=True, metadata={'serializable': True})
class-attribute instance-attributemeta_memories
propertyoutput = field(default=None, init=False)
class-attribute instance-attributeparent_ids = field(factory=list, kw_only=True, metadata={'serializable': True})
class-attribute instance-attributeparent_outputs
propertyparents
propertyparents_output_text
propertystate = field(default=State.PENDING, kw_only=True, metadata={'serializable': True})
class-attribute instance-attributestructure = field(default=None, kw_only=True)
class-attribute instance-attribute
State
Bases:
EnumSource Code in griptape/tasks/base_task.py
class State(Enum): PENDING = 1 RUNNING = 2 FINISHED = 3 SKIPPED = 4
FINISHED = 3
class-attribute instance-attributePENDING = 1
class-attribute instance-attributeRUNNING = 2
class-attribute instance-attributeSKIPPED = 4
class-attribute instance-attribute
attrs_post_init()
Source Code in griptape/tasks/base_task.py
def __attrs_post_init__(self) -> None: if self.structure is not None: self.structure.add_task(self)
lshift(other)
Source Code in griptape/tasks/base_task.py
def __lshift__(self, other: BaseTask | list[BaseTask]) -> BaseTask | list[BaseTask]: if isinstance(other, list): self.add_parents(other) else: self.add_parent(other) return other
rshift(other)
Source Code in griptape/tasks/base_task.py
def __rshift__(self, other: BaseTask | list[BaseTask]) -> BaseTask | list[BaseTask]: if isinstance(other, list): self.add_children(other) else: self.add_child(other) return other
str()
Source Code in griptape/tasks/base_task.py
def __str__(self) -> str: return str(self.output.value) if self.output is not None else ""
add_child(child)
Source Code in griptape/tasks/base_task.py
def add_child(self, child: BaseTask) -> BaseTask: if child.id not in self.child_ids: self.child_ids.append(child.id) if self.id not in child.parent_ids: child.parent_ids.append(self.id) if self.structure is not None and child not in self.structure.tasks: self.structure.add_task(child) return self
add_children(children)
Source Code in griptape/tasks/base_task.py
def add_children(self, children: list[BaseTask]) -> None: for child in children: self.add_child(child)
add_parent(parent)
Source Code in griptape/tasks/base_task.py
def add_parent(self, parent: BaseTask) -> BaseTask: if parent.id not in self.parent_ids: self.parent_ids.append(parent.id) if self.id not in parent.child_ids: parent.child_ids.append(self.id) if self.structure is not None and parent not in self.structure.tasks: self.structure.add_task(parent) return self
add_parents(parents)
Source Code in griptape/tasks/base_task.py
def add_parents(self, parents: list[BaseTask]) -> None: for parent in parents: self.add_parent(parent)
after_run()
Source Code in griptape/tasks/base_task.py
def after_run(self) -> None: super().after_run() if self.structure is not None: EventBus.publish_event( FinishTaskEvent( task_id=self.id, task_parent_ids=self.parent_ids, task_child_ids=self.child_ids, task_input=self.input, task_output=self.output, ), )
before_run()
Source Code in griptape/tasks/base_task.py
def before_run(self) -> None: super().before_run() if self.structure is not None: EventBus.publish_event( StartTaskEvent( task_id=self.id, task_parent_ids=self.parent_ids, task_child_ids=self.child_ids, task_input=self.input, task_output=self.output, ), )
can_run()
Source Code in griptape/tasks/base_task.py
def can_run(self) -> bool: # If this Task has been skipped or is not pending, it should not run if self.is_skipped() or not self.is_pending(): return False # If this Task has parents, and _all_ of them are skipped, it should not run if self.parents and all(parent.is_skipped() for parent in self.parents): self.state = BaseTask.State.SKIPPED return False # If _all_ this Task's unskipped parents are finished, it should run unskipped_parents = [parent for parent in self.parents if not parent.is_skipped()] return all(parent.is_finished() for parent in unskipped_parents)
is_finished()
Source Code in griptape/tasks/base_task.py
def is_finished(self) -> bool: return self.state == BaseTask.State.FINISHED
is_pending()
Source Code in griptape/tasks/base_task.py
def is_pending(self) -> bool: return self.state == BaseTask.State.PENDING
is_running()
Source Code in griptape/tasks/base_task.py
def is_running(self) -> bool: return self.state == BaseTask.State.RUNNING
is_skipped()
Source Code in griptape/tasks/base_task.py
def is_skipped(self) -> bool: return self.state == BaseTask.State.SKIPPED
preprocess(structure)
Source Code in griptape/tasks/base_task.py
def preprocess(self, structure: Structure) -> BaseTask: self.structure = structure return self
reset()
Source Code in griptape/tasks/base_task.py
def reset(self) -> BaseTask: self.state = BaseTask.State.PENDING self.output = None self._execution_args = () return self
run(*args)
Source Code in griptape/tasks/base_task.py
def run(self, *args) -> T: try: self._execution_args = args self.state = BaseTask.State.RUNNING self.before_run() self.output = self.try_run() self.after_run() except Exception as e: logger.exception("%s %s\n%s", self.__class__.__name__, self.id, e) self.output = cast("T", ErrorArtifact(str(e), exception=e)) finally: self.state = BaseTask.State.FINISHED return self.output
try_run()abstractmethod
Source Code in griptape/tasks/base_task.py
@abstractmethod def try_run(self) -> T: ...
BaseTextInputTask
Bases:
RuleMixin
, BaseTask[T]
, ABC
Source Code in griptape/tasks/base_text_input_task.py
@define class BaseTextInputTask(RuleMixin, BaseTask[T], ABC): DEFAULT_INPUT_TEMPLATE = "{{ args[0] }}" _input: Union[str, TextArtifact, Callable[[BaseTask], TextArtifact]] = field( default=DEFAULT_INPUT_TEMPLATE, alias="input", ) @property def input(self) -> TextArtifact: if isinstance(self._input, TextArtifact): return self._input if isinstance(self._input, Callable): return self._input(self) return TextArtifact(J2().render_from_string(self._input, **self.full_context)) @input.setter def input(self, value: str | TextArtifact | Callable[[BaseTask], TextArtifact]) -> None: self._input = value def before_run(self) -> None: super().before_run() logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, self.input.to_text()) def after_run(self) -> None: super().after_run() logger.info( "%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text() if self.output is not None else "", )
DEFAULT_INPUT_TEMPLATE = '{{ args[0] }}'
class-attribute instance-attribute_input = field(default=DEFAULT_INPUT_TEMPLATE, alias='input')
class-attribute instance-attributeinput
property writable
after_run()
Source Code in griptape/tasks/base_text_input_task.py
def after_run(self) -> None: super().after_run() logger.info( "%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text() if self.output is not None else "", )
before_run()
Source Code in griptape/tasks/base_text_input_task.py
def before_run(self) -> None: super().before_run() logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, self.input.to_text())
BranchTask
Bases:
CodeExecutionTask[Union[InfoArtifact, ListArtifact[InfoArtifact]]]
Source Code in griptape/tasks/branch_task.py
@define class BranchTask(CodeExecutionTask[Union[InfoArtifact, ListArtifact[InfoArtifact]]]): on_run: Callable[[BranchTask], Union[InfoArtifact, ListArtifact[InfoArtifact]]] = field(kw_only=True) def try_run(self) -> InfoArtifact | ListArtifact[InfoArtifact]: result = self.on_run(self) if isinstance(result, ListArtifact): branch_task_ids = {artifact.value for artifact in result} else: branch_task_ids = {result.value} if not all(branch_task_id in self.child_ids for branch_task_id in branch_task_ids): raise ValueError(f"Branch task returned invalid child task id {branch_task_ids}") if self.structure is not None: children_to_skip = [child for child in self.children if child.id not in branch_task_ids] for child in children_to_skip: child.state = BaseTask.State.SKIPPED return result
on_run = field(kw_only=True)
class-attribute instance-attribute
try_run()
Source Code in griptape/tasks/branch_task.py
def try_run(self) -> InfoArtifact | ListArtifact[InfoArtifact]: result = self.on_run(self) if isinstance(result, ListArtifact): branch_task_ids = {artifact.value for artifact in result} else: branch_task_ids = {result.value} if not all(branch_task_id in self.child_ids for branch_task_id in branch_task_ids): raise ValueError(f"Branch task returned invalid child task id {branch_task_ids}") if self.structure is not None: children_to_skip = [child for child in self.children if child.id not in branch_task_ids] for child in children_to_skip: child.state = BaseTask.State.SKIPPED return result
CodeExecutionTask
Bases:
BaseTask[T]
Source Code in griptape/tasks/code_execution_task.py
@define class CodeExecutionTask(BaseTask[T]): DEFAULT_INPUT_TEMPLATE = "{{ args[0] }}" _input: Union[str, TextArtifact, Callable[[BaseTask], TextArtifact]] = field( default=DEFAULT_INPUT_TEMPLATE, alias="input", ) on_run: Callable[[CodeExecutionTask[T]], T] = field(kw_only=True) @property def input(self) -> TextArtifact: if isinstance(self._input, TextArtifact): return self._input if callable(self._input): return self._input(self) return TextArtifact(J2().render_from_string(self._input, **self.full_context)) def try_run(self) -> T: return self.on_run(self)
DEFAULT_INPUT_TEMPLATE = '{{ args[0] }}'
class-attribute instance-attribute_input = field(default=DEFAULT_INPUT_TEMPLATE, alias='input')
class-attribute instance-attributeinput
propertyon_run = field(kw_only=True)
class-attribute instance-attribute
try_run()
Source Code in griptape/tasks/code_execution_task.py
def try_run(self) -> T: return self.on_run(self)
ExtractionTask
Bases:
BaseTextInputTask[ListArtifact]
Source Code in griptape/tasks/extraction_task.py
@define class ExtractionTask(BaseTextInputTask[ListArtifact]): extraction_engine: BaseExtractionEngine = field(kw_only=True) args: dict = field(kw_only=True, factory=dict) def try_run(self) -> ListArtifact: return self.extraction_engine.extract_artifacts(ListArtifact([self.input]), rulesets=self.rulesets, **self.args)
args = field(kw_only=True, factory=dict)
class-attribute instance-attributeextraction_engine = field(kw_only=True)
class-attribute instance-attribute
try_run()
Source Code in griptape/tasks/extraction_task.py
def try_run(self) -> ListArtifact: return self.extraction_engine.extract_artifacts(ListArtifact([self.input]), rulesets=self.rulesets, **self.args)
InpaintingImageGenerationTask
Bases:
BaseImageGenerationTask
Attributes
Name | Type | Description |
---|---|---|
image_generation_driver | BaseImageGenerationDriver | The driver used to generate the image. |
negative_rulesets | list[Ruleset] | List of negatively-weighted rulesets applied to the text prompt, if supported by the driver. |
negative_rules | list[Rule] | List of negatively-weighted rules applied to the text prompt, if supported by the driver. |
output_dir | Optional[str] | If provided, the generated image will be written to disk in output_dir. |
output_file | Optional[str] | If provided, the generated image will be written to disk as output_file. |
Source Code in griptape/tasks/inpainting_image_generation_task.py
@define class InpaintingImageGenerationTask(BaseImageGenerationTask): """A task that modifies a select region within an image using a mask. Accepts a text prompt, image, and mask as input in one of the following formats: - tuple of (template string, ImageArtifact, ImageArtifact) - tuple of (TextArtifact, ImageArtifact, ImageArtifact) - Callable that returns a tuple of (TextArtifact, ImageArtifact, ImageArtifact). Attributes: image_generation_driver: The driver used to generate the image. negative_rulesets: List of negatively-weighted rulesets applied to the text prompt, if supported by the driver. negative_rules: List of negatively-weighted rules applied to the text prompt, if supported by the driver. output_dir: If provided, the generated image will be written to disk in output_dir. output_file: If provided, the generated image will be written to disk as output_file. """ _input: Union[ tuple[Union[str, TextArtifact], ImageArtifact, ImageArtifact], Callable[[BaseTask], ListArtifact], ListArtifact ] = field(default=None, alias="input") @property def input(self) -> ListArtifact: if isinstance(self._input, ListArtifact): return self._input if isinstance(self._input, tuple): if isinstance(self._input[0], TextArtifact): input_text = self._input[0] else: input_text = TextArtifact(J2().render_from_string(self._input[0], **self.full_context)) return ListArtifact([input_text, self._input[1], self._input[2]]) if isinstance(self._input, Callable): return self._input(self) raise ValueError("Input must be a tuple of (text, image, mask) or a callable that returns such a tuple.") @input.setter def input( self, value: tuple[str | TextArtifact, ImageArtifact, ImageArtifact] | Callable[[BaseTask], ListArtifact], ) -> None: self._input = value def try_run(self) -> ImageArtifact: prompt_artifact = self.input[0] image_artifact = self.input[1] if not isinstance(image_artifact, ImageArtifact): raise ValueError("Image must be an ImageArtifact.") mask_artifact = self.input[2] if not isinstance(mask_artifact, ImageArtifact): raise ValueError("Mask must be an ImageArtifact.") output_image_artifact = self.image_generation_driver.run_image_inpainting( prompts=self._get_prompts(prompt_artifact.to_text()), negative_prompts=self._get_negative_prompts(), image=image_artifact, mask=mask_artifact, ) if self.output_dir or self.output_file: self._write_to_file(output_image_artifact) return output_image_artifact
_input = field(default=None, alias='input')
class-attribute instance-attributeinput
property writable
try_run()
Source Code in griptape/tasks/inpainting_image_generation_task.py
def try_run(self) -> ImageArtifact: prompt_artifact = self.input[0] image_artifact = self.input[1] if not isinstance(image_artifact, ImageArtifact): raise ValueError("Image must be an ImageArtifact.") mask_artifact = self.input[2] if not isinstance(mask_artifact, ImageArtifact): raise ValueError("Mask must be an ImageArtifact.") output_image_artifact = self.image_generation_driver.run_image_inpainting( prompts=self._get_prompts(prompt_artifact.to_text()), negative_prompts=self._get_negative_prompts(), image=image_artifact, mask=mask_artifact, ) if self.output_dir or self.output_file: self._write_to_file(output_image_artifact) return output_image_artifact
OutpaintingImageGenerationTask
Bases:
BaseImageGenerationTask
Attributes
Name | Type | Description |
---|---|---|
image_generation_driver | BaseImageGenerationDriver | The engine used to generate the image. |
negative_rulesets | list[Ruleset] | List of negatively-weighted rulesets applied to the text prompt, if supported by the driver. |
negative_rules | list[Rule] | List of negatively-weighted rules applied to the text prompt, if supported by the driver. |
output_dir | Optional[str] | If provided, the generated image will be written to disk in output_dir. |
output_file | Optional[str] | If provided, the generated image will be written to disk as output_file. |
Source Code in griptape/tasks/outpainting_image_generation_task.py
@define class OutpaintingImageGenerationTask(BaseImageGenerationTask): """A task that modifies an image outside the bounds of a mask. Accepts a text prompt, image, and mask as input in one of the following formats: - tuple of (template string, ImageArtifact, ImageArtifact) - tuple of (TextArtifact, ImageArtifact, ImageArtifact) - Callable that returns a tuple of (TextArtifact, ImageArtifact, ImageArtifact). Attributes: image_generation_driver: The engine used to generate the image. negative_rulesets: List of negatively-weighted rulesets applied to the text prompt, if supported by the driver. negative_rules: List of negatively-weighted rules applied to the text prompt, if supported by the driver. output_dir: If provided, the generated image will be written to disk in output_dir. output_file: If provided, the generated image will be written to disk as output_file. """ _input: Union[ tuple[Union[str, TextArtifact], ImageArtifact, ImageArtifact], Callable[[BaseTask], ListArtifact], ListArtifact ] = field(default=None, alias="input") @property def input(self) -> ListArtifact: if isinstance(self._input, ListArtifact): return self._input if isinstance(self._input, tuple): if isinstance(self._input[0], TextArtifact): input_text = self._input[0] else: input_text = TextArtifact(J2().render_from_string(self._input[0], **self.full_context)) return ListArtifact([input_text, self._input[1], self._input[2]]) if isinstance(self._input, Callable): return self._input(self) raise ValueError("Input must be a tuple of (text, image, mask) or a callable that returns such a tuple.") @input.setter def input( self, value: tuple[str | TextArtifact, ImageArtifact, ImageArtifact] | Callable[[BaseTask], ListArtifact], ) -> None: self._input = value def try_run(self) -> ImageArtifact: prompt_artifact = self.input[0] image_artifact = self.input[1] if not isinstance(image_artifact, ImageArtifact): raise ValueError("Image must be an ImageArtifact.") mask_artifact = self.input[2] if not isinstance(mask_artifact, ImageArtifact): raise ValueError("Mask must be an ImageArtifact.") output_image_artifact = self.image_generation_driver.run_image_outpainting( prompts=self._get_prompts(prompt_artifact.to_text()), negative_prompts=self._get_negative_prompts(), image=image_artifact, mask=mask_artifact, ) if self.output_dir or self.output_file: self._write_to_file(output_image_artifact) return output_image_artifact
_input = field(default=None, alias='input')
class-attribute instance-attributeinput
property writable
try_run()
Source Code in griptape/tasks/outpainting_image_generation_task.py
def try_run(self) -> ImageArtifact: prompt_artifact = self.input[0] image_artifact = self.input[1] if not isinstance(image_artifact, ImageArtifact): raise ValueError("Image must be an ImageArtifact.") mask_artifact = self.input[2] if not isinstance(mask_artifact, ImageArtifact): raise ValueError("Mask must be an ImageArtifact.") output_image_artifact = self.image_generation_driver.run_image_outpainting( prompts=self._get_prompts(prompt_artifact.to_text()), negative_prompts=self._get_negative_prompts(), image=image_artifact, mask=mask_artifact, ) if self.output_dir or self.output_file: self._write_to_file(output_image_artifact) return output_image_artifact
OutputSchemaValidationSubtask
Bases:
BaseSubtask
Source Code in griptape/tasks/output_schema_validation_subtask.py
@define class OutputSchemaValidationSubtask(BaseSubtask): _input: BaseArtifact = field(alias="input") output_schema: Union[Schema, type[BaseModel]] = field(kw_only=True) structured_output_strategy: StructuredOutputStrategy = field( default="rule", kw_only=True, metadata={"serializable": True} ) generate_assistant_subtask_template: Callable[[OutputSchemaValidationSubtask], str] = field( default=Factory(lambda self: self.default_generate_assistant_subtask_template, takes_self=True), kw_only=True, ) generate_user_subtask_template: Callable[[OutputSchemaValidationSubtask], str] = field( default=Factory(lambda self: self.default_generate_user_subtask_template, takes_self=True), kw_only=True, ) _validation_errors: str | None = field(default=None, init=False) @property def input(self) -> BaseArtifact: return self._input @input.setter def input(self, value: BaseArtifact) -> None: self._input = value @property def validation_errors(self) -> str | None: return self._validation_errors def attach_to(self, parent_task: BaseTask) -> None: super().attach_to(parent_task) try: # With `native` or `rule` strategies, the output will be a json string that can be parsed. # With the `tool` strategy, the output will already be a `JsonArtifact`. if self.structured_output_strategy in ("native", "rule"): value_to_validate = ( self.input.value if isinstance(self.input.value, str) else json.dumps(self.input.value) ) if isinstance(self.output_schema, Schema): self.output_schema.validate(json.loads(value_to_validate)) self.output = JsonArtifact(self.input.value) else: model = TypeAdapter(self.output_schema).validate_json(value_to_validate) self.output = ModelArtifact(model) else: self.output = self.input except SchemaError as e: self._validation_errors = str(e) except ValidationError as e: self._validation_errors = str(e.errors()) def before_run(self) -> None: logger.info("%s Validating: %s", self.__class__.__name__, self.input.value) def try_run(self) -> BaseArtifact: if self._validation_errors is None: return self._input return ErrorArtifact( value=f"Validation error: {self._validation_errors}", ) def after_run(self) -> None: if self._validation_errors is None: logger.info("%s Validation successful", self.__class__.__name__) else: logger.error("%s Validation error: %s", self.__class__.__name__, self._validation_errors) def add_to_prompt_stack(self, stack: PromptStack) -> None: if self.output is None: return stack.add_assistant_message(self.generate_assistant_subtask_template(self)) stack.add_user_message(self.generate_user_subtask_template(self)) def default_generate_assistant_subtask_template(self, subtask: OutputSchemaValidationSubtask) -> str: return J2("tasks/prompt_task/assistant_output_schema_validation_subtask.j2").render( subtask=subtask, ) def default_generate_user_subtask_template(self, subtask: OutputSchemaValidationSubtask) -> str: return J2("tasks/prompt_task/user_output_schema_validation_subtask.j2").render( subtask=subtask, )
_input = field(alias='input')
class-attribute instance-attribute_validation_errors = field(default=None, init=False)
class-attribute instance-attributegenerate_assistant_subtask_template = field(default=Factory(lambda self: self.default_generate_assistant_subtask_template, takes_self=True), kw_only=True)
class-attribute instance-attributegenerate_user_subtask_template = field(default=Factory(lambda self: self.default_generate_user_subtask_template, takes_self=True), kw_only=True)
class-attribute instance-attributeinput
property writableoutput_schema = field(kw_only=True)
class-attribute instance-attributestructured_output_strategy = field(default='rule', kw_only=True, metadata={'serializable': True})
class-attribute instance-attributevalidation_errors
property
add_to_prompt_stack(stack)
Source Code in griptape/tasks/output_schema_validation_subtask.py
def add_to_prompt_stack(self, stack: PromptStack) -> None: if self.output is None: return stack.add_assistant_message(self.generate_assistant_subtask_template(self)) stack.add_user_message(self.generate_user_subtask_template(self))
after_run()
Source Code in griptape/tasks/output_schema_validation_subtask.py
def after_run(self) -> None: if self._validation_errors is None: logger.info("%s Validation successful", self.__class__.__name__) else: logger.error("%s Validation error: %s", self.__class__.__name__, self._validation_errors)
attach_to(parent_task)
Source Code in griptape/tasks/output_schema_validation_subtask.py
def attach_to(self, parent_task: BaseTask) -> None: super().attach_to(parent_task) try: # With `native` or `rule` strategies, the output will be a json string that can be parsed. # With the `tool` strategy, the output will already be a `JsonArtifact`. if self.structured_output_strategy in ("native", "rule"): value_to_validate = ( self.input.value if isinstance(self.input.value, str) else json.dumps(self.input.value) ) if isinstance(self.output_schema, Schema): self.output_schema.validate(json.loads(value_to_validate)) self.output = JsonArtifact(self.input.value) else: model = TypeAdapter(self.output_schema).validate_json(value_to_validate) self.output = ModelArtifact(model) else: self.output = self.input except SchemaError as e: self._validation_errors = str(e) except ValidationError as e: self._validation_errors = str(e.errors())
before_run()
Source Code in griptape/tasks/output_schema_validation_subtask.py
def before_run(self) -> None: logger.info("%s Validating: %s", self.__class__.__name__, self.input.value)
default_generate_assistant_subtask_template(subtask)
Source Code in griptape/tasks/output_schema_validation_subtask.py
def default_generate_assistant_subtask_template(self, subtask: OutputSchemaValidationSubtask) -> str: return J2("tasks/prompt_task/assistant_output_schema_validation_subtask.j2").render( subtask=subtask, )
default_generate_user_subtask_template(subtask)
Source Code in griptape/tasks/output_schema_validation_subtask.py
def default_generate_user_subtask_template(self, subtask: OutputSchemaValidationSubtask) -> str: return J2("tasks/prompt_task/user_output_schema_validation_subtask.j2").render( subtask=subtask, )
try_run()
Source Code in griptape/tasks/output_schema_validation_subtask.py
def try_run(self) -> BaseArtifact: if self._validation_errors is None: return self._input return ErrorArtifact( value=f"Validation error: {self._validation_errors}", )
PromptImageGenerationTask
Bases:
BaseImageGenerationTask
Attributes
Name | Type | Description |
---|---|---|
image_generation_driver | BaseImageGenerationDriver | The engine used to generate the image. |
negative_rulesets | list[Ruleset] | List of negatively-weighted rulesets applied to the text prompt, if supported by the driver. |
negative_rules | list[Rule] | List of negatively-weighted rules applied to the text prompt, if supported by the driver. |
output_dir | Optional[str] | If provided, the generated image will be written to disk in output_dir. |
output_file | Optional[str] | If provided, the generated image will be written to disk as output_file. |
Source Code in griptape/tasks/prompt_image_generation_task.py
@define class PromptImageGenerationTask(BaseImageGenerationTask): """Used to generate an image from a text prompt. Accepts prompt as input in one of the following formats: - template string - TextArtifact - Callable that returns a TextArtifact. Attributes: image_generation_driver: The engine used to generate the image. negative_rulesets: List of negatively-weighted rulesets applied to the text prompt, if supported by the driver. negative_rules: List of negatively-weighted rules applied to the text prompt, if supported by the driver. output_dir: If provided, the generated image will be written to disk in output_dir. output_file: If provided, the generated image will be written to disk as output_file. """ DEFAULT_INPUT_TEMPLATE = "{{ args[0] }}" _input: Union[str, TextArtifact, Callable[[BaseTask], TextArtifact]] = field( default=DEFAULT_INPUT_TEMPLATE, alias="input" ) @property def input(self) -> TextArtifact: if isinstance(self._input, TextArtifact): return self._input if isinstance(self._input, Callable): return self._input(self) return TextArtifact(J2().render_from_string(self._input, **self.full_context)) @input.setter def input(self, value: TextArtifact) -> None: self._input = value def try_run(self) -> ImageArtifact: image_artifact = self.image_generation_driver.run_text_to_image( prompts=self._get_prompts(self.input.to_text()), negative_prompts=self._get_negative_prompts(), ) if self.output_dir or self.output_file: self._write_to_file(image_artifact) return image_artifact
DEFAULT_INPUT_TEMPLATE = '{{ args[0] }}'
class-attribute instance-attribute_input = field(default=DEFAULT_INPUT_TEMPLATE, alias='input')
class-attribute instance-attributeinput
property writable
try_run()
Source Code in griptape/tasks/prompt_image_generation_task.py
def try_run(self) -> ImageArtifact: image_artifact = self.image_generation_driver.run_text_to_image( prompts=self._get_prompts(self.input.to_text()), negative_prompts=self._get_negative_prompts(), ) if self.output_dir or self.output_file: self._write_to_file(image_artifact) return image_artifact
PromptTask
Bases:
BaseTask[Union[TextArtifact, AudioArtifact, GenericArtifact, JsonArtifact, ListArtifact, ErrorArtifact]]
, RuleMixin
, ActionsSubtaskOriginMixin
Source Code in griptape/tasks/prompt_task.py
@define class PromptTask( BaseTask[Union[TextArtifact, AudioArtifact, GenericArtifact, JsonArtifact, ListArtifact, ErrorArtifact]], RuleMixin, ActionsSubtaskOriginMixin, ): DEFAULT_MAX_STEPS = 20 # Stop sequence for chain-of-thought in the framework. Using this "token-like" string to make it more unique, # so that it doesn't trigger on accident. RESPONSE_STOP_SEQUENCE = "<|Response|>" prompt_driver: BasePromptDriver = field( default=Factory(lambda: Defaults.drivers_config.prompt_driver), kw_only=True, metadata={"serializable": True} ) output_schema: Optional[Union[Schema, type[BaseModel]]] = field(default=None, kw_only=True) generate_system_template: Callable[[PromptTask], str] = field( default=Factory(lambda self: self.default_generate_system_template, takes_self=True), kw_only=True, ) _conversation_memory: Union[Optional[BaseConversationMemory], NothingType] = field( default=Factory(lambda: NOTHING), kw_only=True, alias="conversation_memory" ) _input: Union[str, list, tuple, BaseArtifact, Callable[[BaseTask], BaseArtifact]] = field( default=lambda task: task.full_context["args"][0] if task.full_context["args"] else TextArtifact(value=""), alias="input", ) tools: list[BaseTool] = field(factory=list, kw_only=True, metadata={"serializable": True}) max_subtasks: int = field(default=DEFAULT_MAX_STEPS, kw_only=True, metadata={"serializable": True}) task_memory: Optional[TaskMemory] = field(default=None, kw_only=True) subtasks: list[BaseSubtask] = field(factory=list) generate_assistant_subtask_template: Callable[[ActionsSubtask], str] = field( default=Factory(lambda self: self.default_generate_assistant_subtask_template, takes_self=True), kw_only=True, ) generate_user_subtask_template: Callable[[ActionsSubtask], str] = field( default=Factory(lambda self: self.default_generate_user_subtask_template, takes_self=True), kw_only=True, ) response_stop_sequence: str = field(default=RESPONSE_STOP_SEQUENCE, kw_only=True) reflect_on_tool_use: bool = field(default=True, kw_only=True) subtask_runners: list[Callable[[BaseArtifact], BaseArtifact]] = field( default=Factory( lambda self: [self.default_run_actions_subtasks, self.default_run_output_schema_validation_subtasks], takes_self=True, ), kw_only=True, ) @property def rulesets(self) -> list: default_rules = self.rules rulesets = self._rulesets.copy() if self.structure is not None: if self.structure._rulesets: rulesets = self.structure._rulesets + self._rulesets if self.structure.rules: default_rules = self.structure.rules + self.rules if default_rules: rulesets.append(Ruleset(name=self.DEFAULT_RULESET_NAME, rules=default_rules)) return rulesets @property def input(self) -> BaseArtifact: return self._process_task_input(self._input) @input.setter def input(self, value: str | list | tuple | BaseArtifact | Callable[[BaseTask], BaseArtifact]) -> None: self._input = value @property def conversation_memory(self) -> Optional[BaseConversationMemory]: if self._conversation_memory is NOTHING: if self.structure is None: return None return self.structure.conversation_memory return self._conversation_memory @conversation_memory.setter def conversation_memory(self, value: Optional[BaseConversationMemory]) -> None: self._conversation_memory = value @property def prompt_stack(self) -> PromptStack: stack = PromptStack(tools=self.tools, output_schema=self.output_schema) memory = self.conversation_memory system_template = self.generate_system_template(self) if system_template: stack.add_system_message(system_template) stack.add_user_message(self.input) if self.output: stack.add_assistant_message(self.output.to_text()) else: for s in self.subtasks: s.add_to_prompt_stack(stack) if memory is not None: # inserting at index 1 to place memory right after system prompt memory.add_to_prompt_stack(self.prompt_driver, stack, 1 if system_template else 0) return stack @property def tool_output_memory(self) -> list[TaskMemory]: unique_memory_dict = {} for memories in [tool.output_memory for tool in self.tools if tool.output_memory]: for memory_list in memories.values(): for memory in memory_list: if memory.name not in unique_memory_dict: unique_memory_dict[memory.name] = memory return list(unique_memory_dict.values()) @tools.validator # pyright: ignore[reportAttributeAccessIssue] def validate_tools(self, _: Attribute, tools: list[BaseTool]) -> None: tool_names = [t.name for t in tools] if len(tool_names) > len(set(tool_names)): raise ValueError("tools names have to be unique in task") @output_schema.validator # pyright: ignore[reportAttributeAccessIssue, reportOptionalMemberAccess] def validate_output_schema(self, _: Attribute, output_schema: Optional[Union[Schema, type[BaseModel]]]) -> None: if ( output_schema is None or isinstance(self.output_schema, Schema) or (isinstance(self.output_schema, type) and issubclass(self.output_schema, BaseModel)) ): return raise ValueError(f"Unsupported output schema type: {type(self.output_schema)}") def __attrs_post_init__(self) -> None: super().__attrs_post_init__() if self.task_memory: self.set_default_tools_memory(self.task_memory) output: Optional[BaseArtifact] = field(default=None, init=False) def before_run(self) -> None: super().before_run() logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, self.input.to_text()) def after_run(self) -> None: super().after_run() logger.info( "%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text() if self.output is not None else "", ) conversation_memory = self.conversation_memory if ( (self.structure is None or self.structure.conversation_memory_strategy == "per_task") and conversation_memory is not None and self.output is not None ): run = Run(input=self.input, output=self.output) conversation_memory.add_run(run) def try_run(self) -> ListArtifact | TextArtifact | AudioArtifact | GenericArtifact | JsonArtifact | ErrorArtifact: self.subtasks.clear() if self.response_stop_sequence not in self.prompt_driver.tokenizer.stop_sequences: self.prompt_driver.tokenizer.stop_sequences.extend([self.response_stop_sequence]) output = self.prompt_driver.run(self.prompt_stack).to_artifact( meta={"is_react_prompt": not self.prompt_driver.use_native_tools} ) for subtask_runner in self.subtask_runners: output = subtask_runner(output) if isinstance(output, (ListArtifact, TextArtifact, AudioArtifact, JsonArtifact, ModelArtifact, ErrorArtifact)): return output raise ValueError(f"Unsupported output type: {type(output)}") def preprocess(self, structure: Structure) -> BaseTask: super().preprocess(structure) if self.task_memory is None and structure.task_memory: self.set_default_tools_memory(structure.task_memory) return self def default_generate_system_template(self, _: PromptTask) -> str: schema = self.actions_schema().json_schema("Actions Schema") schema["minItems"] = 1 # The `schema` library doesn't support `minItems` so we must add it manually. return J2("tasks/prompt_task/system.j2").render( rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.rulesets), action_names=str.join(", ", [tool.name for tool in self.tools]), actions_schema=utils.minify_json(json.dumps(schema)), meta_memory=J2("memory/meta/meta_memory.j2").render(meta_memories=self.meta_memories), use_native_tools=self.prompt_driver.use_native_tools, stop_sequence=self.response_stop_sequence, reflect_on_tool_use=self.reflect_on_tool_use, ) def default_generate_assistant_subtask_template(self, subtask: ActionsSubtask) -> str: return J2("tasks/prompt_task/assistant_actions_subtask.j2").render( stop_sequence=self.response_stop_sequence, subtask=subtask, ) def default_generate_user_subtask_template(self, subtask: ActionsSubtask) -> str: return J2("tasks/prompt_task/user_actions_subtask.j2").render( stop_sequence=self.response_stop_sequence, subtask=subtask, ) def actions_schema(self) -> Schema: return self._actions_schema_for_tools(self.tools) def set_default_tools_memory(self, memory: TaskMemory) -> None: self.task_memory = memory for tool in self.tools: if self.task_memory: if tool.input_memory is None: tool.input_memory = [self.task_memory] if tool.output_memory is None and tool.off_prompt: tool.output_memory = {getattr(a, "name"): [self.task_memory] for a in tool.activities()} def find_subtask(self, subtask_id: str) -> BaseSubtask: for subtask in self.subtasks: if subtask.id == subtask_id: return subtask raise ValueError(f"Subtask with id {subtask_id} not found.") def add_subtask(self, subtask: BaseSubtask) -> BaseSubtask: subtask.attach_to(self) subtask.structure = self.structure if len(self.subtasks) > 0: self.subtasks[-1].add_child(subtask) subtask.add_parent(self.subtasks[-1]) self.subtasks.append(subtask) return subtask def find_tool(self, tool_name: str) -> BaseTool: for tool in self.tools: if tool.name == tool_name: return tool raise ValueError(f"Tool with name {tool_name} not found.") def find_memory(self, memory_name: str) -> TaskMemory: for memory in self.tool_output_memory: if memory.name == memory_name: return memory raise ValueError(f"Memory with name {memory_name} not found.") def default_run_actions_subtasks(self, subtask_input: BaseArtifact) -> BaseArtifact: if not self.tools: return subtask_input subtask = self.add_subtask( ActionsSubtask( subtask_input, # TODO: Remove these fields in Prompt Task in Gen AI Builder 2.0 generate_user_subtask_template=self.generate_user_subtask_template, generate_assistant_subtask_template=self.generate_assistant_subtask_template, response_stop_sequence=self.response_stop_sequence, ) ) while subtask.output is None: if len(self.subtasks) >= self.max_subtasks: subtask.output = ErrorArtifact(f"Exceeded tool limit of {self.max_subtasks} subtasks per task") else: subtask.run() if self.reflect_on_tool_use: output = self.prompt_driver.run(self.prompt_stack).to_artifact( meta={"is_react_prompt": not self.prompt_driver.use_native_tools} ) subtask = self.add_subtask(ActionsSubtask(output)) return subtask.output def default_run_output_schema_validation_subtasks(self, subtask_input: BaseArtifact) -> BaseArtifact: if self.output_schema is None: return subtask_input subtask = self.add_subtask(OutputSchemaValidationSubtask(subtask_input, output_schema=self.output_schema)) while subtask.output is None: if len(self.subtasks) >= self.max_subtasks: subtask.output = ErrorArtifact(f"Exceeded tool limit of {self.max_subtasks} subtasks per task") else: subtask.run() output = subtask.output output = self.prompt_driver.run(self.prompt_stack).to_artifact( meta={"is_react_prompt": not self.prompt_driver.use_native_tools} ) subtask = self.add_subtask(OutputSchemaValidationSubtask(output, output_schema=self.output_schema)) return subtask.output def _process_task_input( self, task_input: str | tuple | list | BaseArtifact | Callable[[BaseTask], BaseArtifact], ) -> BaseArtifact: if isinstance(task_input, TextArtifact): return TextArtifact(J2().render_from_string(task_input.value, **self.full_context), meta=task_input.meta) if isinstance(task_input, Callable): return self._process_task_input(task_input(self)) if isinstance(task_input, ListArtifact): return ListArtifact([self._process_task_input(elem) for elem in task_input.value]) if isinstance(task_input, BaseArtifact): return task_input if isinstance(task_input, (list, tuple)): return ListArtifact([self._process_task_input(elem) for elem in task_input]) return self._process_task_input(TextArtifact(task_input))
DEFAULT_MAX_STEPS = 20
class-attribute instance-attributeRESPONSE_STOP_SEQUENCE = '<|Response|>'
class-attribute instance-attribute_conversation_memory = field(default=Factory(lambda: NOTHING), kw_only=True, alias='conversation_memory')
class-attribute instance-attribute_input = field(default=lambda task: task.full_context['args'][0] if task.full_context['args'] else TextArtifact(value=''), alias='input')
class-attribute instance-attributeconversation_memory
property writablegenerate_assistant_subtask_template = field(default=Factory(lambda self: self.default_generate_assistant_subtask_template, takes_self=True), kw_only=True)
class-attribute instance-attributegenerate_system_template = field(default=Factory(lambda self: self.default_generate_system_template, takes_self=True), kw_only=True)
class-attribute instance-attributegenerate_user_subtask_template = field(default=Factory(lambda self: self.default_generate_user_subtask_template, takes_self=True), kw_only=True)
class-attribute instance-attributeinput
property writablemax_subtasks = field(default=DEFAULT_MAX_STEPS, kw_only=True, metadata={'serializable': True})
class-attribute instance-attributeoutput = field(default=None, init=False)
class-attribute instance-attributeoutput_schema = field(default=None, kw_only=True)
class-attribute instance-attributeprompt_driver = field(default=Factory(lambda: Defaults.drivers_config.prompt_driver), kw_only=True, metadata={'serializable': True})
class-attribute instance-attributeprompt_stack
propertyreflect_on_tool_use = field(default=True, kw_only=True)
class-attribute instance-attributeresponse_stop_sequence = field(default=RESPONSE_STOP_SEQUENCE, kw_only=True)
class-attribute instance-attributerulesets
propertysubtask_runners = field(default=Factory(lambda self: [self.default_run_actions_subtasks, self.default_run_output_schema_validation_subtasks], takes_self=True), kw_only=True)
class-attribute instance-attributesubtasks = field(factory=list)
class-attribute instance-attributetask_memory = field(default=None, kw_only=True)
class-attribute instance-attributetool_output_memory
propertytools = field(factory=list, kw_only=True, metadata={'serializable': True})
class-attribute instance-attribute
attrs_post_init()
Source Code in griptape/tasks/prompt_task.py
def __attrs_post_init__(self) -> None: super().__attrs_post_init__() if self.task_memory: self.set_default_tools_memory(self.task_memory)
_process_task_input(task_input)
Source Code in griptape/tasks/prompt_task.py
def _process_task_input( self, task_input: str | tuple | list | BaseArtifact | Callable[[BaseTask], BaseArtifact], ) -> BaseArtifact: if isinstance(task_input, TextArtifact): return TextArtifact(J2().render_from_string(task_input.value, **self.full_context), meta=task_input.meta) if isinstance(task_input, Callable): return self._process_task_input(task_input(self)) if isinstance(task_input, ListArtifact): return ListArtifact([self._process_task_input(elem) for elem in task_input.value]) if isinstance(task_input, BaseArtifact): return task_input if isinstance(task_input, (list, tuple)): return ListArtifact([self._process_task_input(elem) for elem in task_input]) return self._process_task_input(TextArtifact(task_input))
actions_schema()
Source Code in griptape/tasks/prompt_task.py
def actions_schema(self) -> Schema: return self._actions_schema_for_tools(self.tools)
add_subtask(subtask)
Source Code in griptape/tasks/prompt_task.py
def add_subtask(self, subtask: BaseSubtask) -> BaseSubtask: subtask.attach_to(self) subtask.structure = self.structure if len(self.subtasks) > 0: self.subtasks[-1].add_child(subtask) subtask.add_parent(self.subtasks[-1]) self.subtasks.append(subtask) return subtask
after_run()
Source Code in griptape/tasks/prompt_task.py
def after_run(self) -> None: super().after_run() logger.info( "%s %s\nOutput: %s", self.__class__.__name__, self.id, self.output.to_text() if self.output is not None else "", ) conversation_memory = self.conversation_memory if ( (self.structure is None or self.structure.conversation_memory_strategy == "per_task") and conversation_memory is not None and self.output is not None ): run = Run(input=self.input, output=self.output) conversation_memory.add_run(run)
before_run()
Source Code in griptape/tasks/prompt_task.py
def before_run(self) -> None: super().before_run() logger.info("%s %s\nInput: %s", self.__class__.__name__, self.id, self.input.to_text())
default_generate_assistant_subtask_template(subtask)
Source Code in griptape/tasks/prompt_task.py
def default_generate_assistant_subtask_template(self, subtask: ActionsSubtask) -> str: return J2("tasks/prompt_task/assistant_actions_subtask.j2").render( stop_sequence=self.response_stop_sequence, subtask=subtask, )
defaultgenerate_system_template()
Source Code in griptape/tasks/prompt_task.py
def default_generate_system_template(self, _: PromptTask) -> str: schema = self.actions_schema().json_schema("Actions Schema") schema["minItems"] = 1 # The `schema` library doesn't support `minItems` so we must add it manually. return J2("tasks/prompt_task/system.j2").render( rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.rulesets), action_names=str.join(", ", [tool.name for tool in self.tools]), actions_schema=utils.minify_json(json.dumps(schema)), meta_memory=J2("memory/meta/meta_memory.j2").render(meta_memories=self.meta_memories), use_native_tools=self.prompt_driver.use_native_tools, stop_sequence=self.response_stop_sequence, reflect_on_tool_use=self.reflect_on_tool_use, )
default_generate_user_subtask_template(subtask)
Source Code in griptape/tasks/prompt_task.py
def default_generate_user_subtask_template(self, subtask: ActionsSubtask) -> str: return J2("tasks/prompt_task/user_actions_subtask.j2").render( stop_sequence=self.response_stop_sequence, subtask=subtask, )
default_run_actions_subtasks(subtask_input)
Source Code in griptape/tasks/prompt_task.py
def default_run_actions_subtasks(self, subtask_input: BaseArtifact) -> BaseArtifact: if not self.tools: return subtask_input subtask = self.add_subtask( ActionsSubtask( subtask_input, # TODO: Remove these fields in Prompt Task in Gen AI Builder 2.0 generate_user_subtask_template=self.generate_user_subtask_template, generate_assistant_subtask_template=self.generate_assistant_subtask_template, response_stop_sequence=self.response_stop_sequence, ) ) while subtask.output is None: if len(self.subtasks) >= self.max_subtasks: subtask.output = ErrorArtifact(f"Exceeded tool limit of {self.max_subtasks} subtasks per task") else: subtask.run() if self.reflect_on_tool_use: output = self.prompt_driver.run(self.prompt_stack).to_artifact( meta={"is_react_prompt": not self.prompt_driver.use_native_tools} ) subtask = self.add_subtask(ActionsSubtask(output)) return subtask.output
default_run_output_schema_validation_subtasks(subtask_input)
Source Code in griptape/tasks/prompt_task.py
def default_run_output_schema_validation_subtasks(self, subtask_input: BaseArtifact) -> BaseArtifact: if self.output_schema is None: return subtask_input subtask = self.add_subtask(OutputSchemaValidationSubtask(subtask_input, output_schema=self.output_schema)) while subtask.output is None: if len(self.subtasks) >= self.max_subtasks: subtask.output = ErrorArtifact(f"Exceeded tool limit of {self.max_subtasks} subtasks per task") else: subtask.run() output = subtask.output output = self.prompt_driver.run(self.prompt_stack).to_artifact( meta={"is_react_prompt": not self.prompt_driver.use_native_tools} ) subtask = self.add_subtask(OutputSchemaValidationSubtask(output, output_schema=self.output_schema)) return subtask.output
find_memory(memory_name)
Source Code in griptape/tasks/prompt_task.py
def find_memory(self, memory_name: str) -> TaskMemory: for memory in self.tool_output_memory: if memory.name == memory_name: return memory raise ValueError(f"Memory with name {memory_name} not found.")
find_subtask(subtask_id)
Source Code in griptape/tasks/prompt_task.py
def find_subtask(self, subtask_id: str) -> BaseSubtask: for subtask in self.subtasks: if subtask.id == subtask_id: return subtask raise ValueError(f"Subtask with id {subtask_id} not found.")
find_tool(tool_name)
Source Code in griptape/tasks/prompt_task.py
def find_tool(self, tool_name: str) -> BaseTool: for tool in self.tools: if tool.name == tool_name: return tool raise ValueError(f"Tool with name {tool_name} not found.")
preprocess(structure)
Source Code in griptape/tasks/prompt_task.py
def preprocess(self, structure: Structure) -> BaseTask: super().preprocess(structure) if self.task_memory is None and structure.task_memory: self.set_default_tools_memory(structure.task_memory) return self
set_default_tools_memory(memory)
Source Code in griptape/tasks/prompt_task.py
def set_default_tools_memory(self, memory: TaskMemory) -> None: self.task_memory = memory for tool in self.tools: if self.task_memory: if tool.input_memory is None: tool.input_memory = [self.task_memory] if tool.output_memory is None and tool.off_prompt: tool.output_memory = {getattr(a, "name"): [self.task_memory] for a in tool.activities()}
try_run()
Source Code in griptape/tasks/prompt_task.py
def try_run(self) -> ListArtifact | TextArtifact | AudioArtifact | GenericArtifact | JsonArtifact | ErrorArtifact: self.subtasks.clear() if self.response_stop_sequence not in self.prompt_driver.tokenizer.stop_sequences: self.prompt_driver.tokenizer.stop_sequences.extend([self.response_stop_sequence]) output = self.prompt_driver.run(self.prompt_stack).to_artifact( meta={"is_react_prompt": not self.prompt_driver.use_native_tools} ) for subtask_runner in self.subtask_runners: output = subtask_runner(output) if isinstance(output, (ListArtifact, TextArtifact, AudioArtifact, JsonArtifact, ModelArtifact, ErrorArtifact)): return output raise ValueError(f"Unsupported output type: {type(output)}")
validateoutput_schema(, output_schema)
Source Code in griptape/tasks/prompt_task.py
@output_schema.validator # pyright: ignore[reportAttributeAccessIssue, reportOptionalMemberAccess] def validate_output_schema(self, _: Attribute, output_schema: Optional[Union[Schema, type[BaseModel]]]) -> None: if ( output_schema is None or isinstance(self.output_schema, Schema) or (isinstance(self.output_schema, type) and issubclass(self.output_schema, BaseModel)) ): return raise ValueError(f"Unsupported output schema type: {type(self.output_schema)}")
validatetools(, tools)
Source Code in griptape/tasks/prompt_task.py
@tools.validator # pyright: ignore[reportAttributeAccessIssue] def validate_tools(self, _: Attribute, tools: list[BaseTool]) -> None: tool_names = [t.name for t in tools] if len(tool_names) > len(set(tool_names)): raise ValueError("tools names have to be unique in task")
RagTask
Bases:
BaseTextInputTask[Union[ListArtifact, ErrorArtifact]]
Source Code in griptape/tasks/rag_task.py
@define class RagTask(BaseTextInputTask[Union[ListArtifact, ErrorArtifact]]): rag_engine: RagEngine = field(kw_only=True, default=Factory(lambda: RagEngine())) def try_run(self) -> ListArtifact | ErrorArtifact: outputs = self.rag_engine.process_query(self.input.to_text()).outputs if len(outputs) > 0: return ListArtifact(outputs) return ErrorArtifact("empty output")
rag_engine = field(kw_only=True, default=Factory(lambda: RagEngine()))
class-attribute instance-attribute
try_run()
Source Code in griptape/tasks/rag_task.py
def try_run(self) -> ListArtifact | ErrorArtifact: outputs = self.rag_engine.process_query(self.input.to_text()).outputs if len(outputs) > 0: return ListArtifact(outputs) return ErrorArtifact("empty output")
StructureRunTask
Bases:
BaseTask
Attributes
Name | Type | Description |
---|---|---|
structure_run_driver | BaseStructureRunDriver | Driver to run the Structure. |
Source Code in griptape/tasks/structure_run_task.py
@define class StructureRunTask(BaseTask): """Task to run a Structure. Attributes: structure_run_driver: Driver to run the Structure. """ _input: Union[str, list, tuple, BaseArtifact, Callable[[BaseTask], BaseArtifact]] = field( default=lambda task: task.full_context["args"][0] if task.full_context["args"] else TextArtifact(value=""), ) @property def input(self) -> BaseArtifact: return self._process_task_input(self._input) @input.setter def input(self, value: str | list | tuple | BaseArtifact | Callable[[BaseTask], BaseArtifact]) -> None: self._input = value structure_run_driver: BaseStructureRunDriver = field(kw_only=True) def try_run(self) -> BaseArtifact: if isinstance(self.input, ListArtifact): return self.structure_run_driver.run(*self.input.value) return self.structure_run_driver.run(self.input) def _process_task_input( self, task_input: str | tuple | list | BaseArtifact | Callable[[BaseTask], BaseArtifact], ) -> BaseArtifact: if isinstance(task_input, TextArtifact): task_input.value = J2().render_from_string(task_input.value, **self.full_context) return task_input if isinstance(task_input, Callable): return self._process_task_input(task_input(self)) if isinstance(task_input, ListArtifact): return ListArtifact([self._process_task_input(elem) for elem in task_input.value]) if isinstance(task_input, BaseArtifact): return task_input if isinstance(task_input, (list, tuple)): return ListArtifact([self._process_task_input(elem) for elem in task_input]) return self._process_task_input(TextArtifact(task_input))
_input = field(default=lambda task: task.full_context['args'][0] if task.full_context['args'] else TextArtifact(value=''))
class-attribute instance-attributeinput
property writablestructure_run_driver = field(kw_only=True)
class-attribute instance-attribute
_process_task_input(task_input)
Source Code in griptape/tasks/structure_run_task.py
def _process_task_input( self, task_input: str | tuple | list | BaseArtifact | Callable[[BaseTask], BaseArtifact], ) -> BaseArtifact: if isinstance(task_input, TextArtifact): task_input.value = J2().render_from_string(task_input.value, **self.full_context) return task_input if isinstance(task_input, Callable): return self._process_task_input(task_input(self)) if isinstance(task_input, ListArtifact): return ListArtifact([self._process_task_input(elem) for elem in task_input.value]) if isinstance(task_input, BaseArtifact): return task_input if isinstance(task_input, (list, tuple)): return ListArtifact([self._process_task_input(elem) for elem in task_input]) return self._process_task_input(TextArtifact(task_input))
try_run()
Source Code in griptape/tasks/structure_run_task.py
def try_run(self) -> BaseArtifact: if isinstance(self.input, ListArtifact): return self.structure_run_driver.run(*self.input.value) return self.structure_run_driver.run(self.input)
TextSummaryTask
Bases:
BaseTextInputTask[TextArtifact]
Source Code in griptape/tasks/text_summary_task.py
@define class TextSummaryTask(BaseTextInputTask[TextArtifact]): summary_engine: BaseSummaryEngine = field(default=Factory(lambda: PromptSummaryEngine()), kw_only=True) def try_run(self) -> TextArtifact: return TextArtifact(self.summary_engine.summarize_text(self.input.to_text(), rulesets=self.rulesets))
summary_engine = field(default=Factory(lambda: PromptSummaryEngine()), kw_only=True)
class-attribute instance-attribute
try_run()
Source Code in griptape/tasks/text_summary_task.py
def try_run(self) -> TextArtifact: return TextArtifact(self.summary_engine.summarize_text(self.input.to_text(), rulesets=self.rulesets))
TextToSpeechTask
Bases:
BaseAudioGenerationTask
Source Code in griptape/tasks/text_to_speech_task.py
@define class TextToSpeechTask(BaseAudioGenerationTask): DEFAULT_INPUT_TEMPLATE = "{{ args[0] }}" _input: Union[str, TextArtifact, Callable[[BaseTask], TextArtifact]] = field(default=DEFAULT_INPUT_TEMPLATE) text_to_speech_driver: BaseTextToSpeechDriver = field( default=Factory(lambda: Defaults.drivers_config.text_to_speech_driver), kw_only=True ) @property def input(self) -> TextArtifact: if isinstance(self._input, TextArtifact): return self._input if isinstance(self._input, Callable): return self._input(self) return TextArtifact(J2().render_from_string(self._input, **self.full_context)) @input.setter def input(self, value: TextArtifact) -> None: self._input = value def try_run(self) -> AudioArtifact: audio_artifact = self.text_to_speech_driver.run_text_to_audio(prompts=[self.input.to_text()]) if self.output_dir or self.output_file: self._write_to_file(audio_artifact) return audio_artifact
DEFAULT_INPUT_TEMPLATE = '{{ args[0] }}'
class-attribute instance-attribute_input = field(default=DEFAULT_INPUT_TEMPLATE)
class-attribute instance-attributeinput
property writabletext_to_speech_driver = field(default=Factory(lambda: Defaults.drivers_config.text_to_speech_driver), kw_only=True)
class-attribute instance-attribute
try_run()
Source Code in griptape/tasks/text_to_speech_task.py
def try_run(self) -> AudioArtifact: audio_artifact = self.text_to_speech_driver.run_text_to_audio(prompts=[self.input.to_text()]) if self.output_dir or self.output_file: self._write_to_file(audio_artifact) return audio_artifact
ToolTask
Bases:
PromptTask
, ActionsSubtaskOriginMixin
Source Code in griptape/tasks/tool_task.py
@define class ToolTask(PromptTask, ActionsSubtaskOriginMixin): DEFAULT_MAX_STEPS = 0 ACTION_PATTERN = r"(?s)[^{]*({.*})" tool: BaseTool = field(kw_only=True, metadata={"serializable": True}) subtask: Optional[ActionsSubtask] = field(default=None, kw_only=True) task_memory: Optional[TaskMemory] = field(default=None, kw_only=True) tools: list[BaseTool] = field(factory=list, kw_only=True, metadata={"serializable": False}) max_subtasks: int = field(default=DEFAULT_MAX_STEPS, kw_only=True, metadata={"serializable": False}) @property def prompt_stack(self) -> PromptStack: stack = super().prompt_stack stack.tools = [self.tool] return stack def __attrs_post_init__(self) -> None: super().__attrs_post_init__() if self.task_memory is not None: self.set_default_tools_memory(self.task_memory) def preprocess(self, structure: Structure) -> ToolTask: super().preprocess(structure) if self.task_memory is None and structure.task_memory is not None: self.set_default_tools_memory(structure.task_memory) return self def default_generate_system_template(self, _: PromptTask) -> str: return J2("tasks/tool_task/system.j2").render( rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.rulesets), action_schema=utils.minify_json(json.dumps(self.tool.schema())), meta_memory=J2("memory/meta/meta_memory.j2").render(meta_memories=self.meta_memories), use_native_tools=self.prompt_driver.use_native_tools, ) def actions_schema(self) -> Schema: return self._actions_schema_for_tools([self.tool]) def try_run(self) -> ListArtifact | TextArtifact | ErrorArtifact: warnings.warn( "`ToolTask` is deprecated and will be removed in a future release. Use `PromptTask` with `reflect_on_tool_use=False` instead.", DeprecationWarning, stacklevel=2, ) result = self.prompt_driver.run(self.prompt_stack) if self.prompt_driver.use_native_tools: subtask_input = result.to_artifact(meta={"is_react_prompt": False}) else: action_matches = re.findall(self.ACTION_PATTERN, result.to_text(), re.DOTALL) if not action_matches: return ErrorArtifact("No action found in prompt output.") data = action_matches[-1] action_dict = json.loads(data) action_dict["tag"] = self.tool.name subtask_input = TextArtifact( J2("tasks/tool_task/subtask.j2").render(action_json=json.dumps(action_dict)), meta={"is_react_prompt": True}, ) try: subtask = self.add_subtask(ActionsSubtask(subtask_input)) output = subtask.run() if isinstance(output, ListArtifact): output = output[0] except Exception as e: output = ErrorArtifact(f"Error processing tool input: {e}", exception=e) return output def find_tool(self, tool_name: str) -> BaseTool: if self.tool.name == tool_name: return self.tool raise ValueError(f"Tool with name {tool_name} not found.") def find_memory(self, memory_name: str) -> TaskMemory: raise NotImplementedError("ToolTask does not support Task Memory.") def find_subtask(self, subtask_id: str) -> ActionsSubtask: if self.subtask and self.subtask.id == subtask_id: return self.subtask raise ValueError(f"Subtask with id {subtask_id} not found.") def add_subtask(self, subtask: BaseSubtask) -> ActionsSubtask: if not isinstance(subtask, ActionsSubtask): raise TypeError("Subtask must be an instance of ActionsSubtask.") self.subtask = subtask self.subtask.attach_to(self) return self.subtask def set_default_tools_memory(self, memory: TaskMemory) -> None: self.task_memory = memory if self.task_memory: if self.tool.input_memory is None: self.tool.input_memory = [self.task_memory] if self.tool.output_memory is None and self.tool.off_prompt: self.tool.output_memory = {getattr(a, "name"): [self.task_memory] for a in self.tool.activities()}
ACTION_PATTERN = '(?s)[^{]*({.*})'
class-attribute instance-attributeDEFAULT_MAX_STEPS = 0
class-attribute instance-attributemax_subtasks = field(default=DEFAULT_MAX_STEPS, kw_only=True, metadata={'serializable': False})
class-attribute instance-attributeprompt_stack
propertysubtask = field(default=None, kw_only=True)
class-attribute instance-attributetask_memory = field(default=None, kw_only=True)
class-attribute instance-attributetool = field(kw_only=True, metadata={'serializable': True})
class-attribute instance-attributetools = field(factory=list, kw_only=True, metadata={'serializable': False})
class-attribute instance-attribute
attrs_post_init()
Source Code in griptape/tasks/tool_task.py
def __attrs_post_init__(self) -> None: super().__attrs_post_init__() if self.task_memory is not None: self.set_default_tools_memory(self.task_memory)
actions_schema()
Source Code in griptape/tasks/tool_task.py
def actions_schema(self) -> Schema: return self._actions_schema_for_tools([self.tool])
add_subtask(subtask)
Source Code in griptape/tasks/tool_task.py
def add_subtask(self, subtask: BaseSubtask) -> ActionsSubtask: if not isinstance(subtask, ActionsSubtask): raise TypeError("Subtask must be an instance of ActionsSubtask.") self.subtask = subtask self.subtask.attach_to(self) return self.subtask
defaultgenerate_system_template()
Source Code in griptape/tasks/tool_task.py
def default_generate_system_template(self, _: PromptTask) -> str: return J2("tasks/tool_task/system.j2").render( rulesets=J2("rulesets/rulesets.j2").render(rulesets=self.rulesets), action_schema=utils.minify_json(json.dumps(self.tool.schema())), meta_memory=J2("memory/meta/meta_memory.j2").render(meta_memories=self.meta_memories), use_native_tools=self.prompt_driver.use_native_tools, )
find_memory(memory_name)
Source Code in griptape/tasks/tool_task.py
def find_memory(self, memory_name: str) -> TaskMemory: raise NotImplementedError("ToolTask does not support Task Memory.")
find_subtask(subtask_id)
Source Code in griptape/tasks/tool_task.py
def find_subtask(self, subtask_id: str) -> ActionsSubtask: if self.subtask and self.subtask.id == subtask_id: return self.subtask raise ValueError(f"Subtask with id {subtask_id} not found.")
find_tool(tool_name)
Source Code in griptape/tasks/tool_task.py
def find_tool(self, tool_name: str) -> BaseTool: if self.tool.name == tool_name: return self.tool raise ValueError(f"Tool with name {tool_name} not found.")
preprocess(structure)
Source Code in griptape/tasks/tool_task.py
def preprocess(self, structure: Structure) -> ToolTask: super().preprocess(structure) if self.task_memory is None and structure.task_memory is not None: self.set_default_tools_memory(structure.task_memory) return self
set_default_tools_memory(memory)
Source Code in griptape/tasks/tool_task.py
def set_default_tools_memory(self, memory: TaskMemory) -> None: self.task_memory = memory if self.task_memory: if self.tool.input_memory is None: self.tool.input_memory = [self.task_memory] if self.tool.output_memory is None and self.tool.off_prompt: self.tool.output_memory = {getattr(a, "name"): [self.task_memory] for a in self.tool.activities()}
try_run()
Source Code in griptape/tasks/tool_task.py
def try_run(self) -> ListArtifact | TextArtifact | ErrorArtifact: warnings.warn( "`ToolTask` is deprecated and will be removed in a future release. Use `PromptTask` with `reflect_on_tool_use=False` instead.", DeprecationWarning, stacklevel=2, ) result = self.prompt_driver.run(self.prompt_stack) if self.prompt_driver.use_native_tools: subtask_input = result.to_artifact(meta={"is_react_prompt": False}) else: action_matches = re.findall(self.ACTION_PATTERN, result.to_text(), re.DOTALL) if not action_matches: return ErrorArtifact("No action found in prompt output.") data = action_matches[-1] action_dict = json.loads(data) action_dict["tag"] = self.tool.name subtask_input = TextArtifact( J2("tasks/tool_task/subtask.j2").render(action_json=json.dumps(action_dict)), meta={"is_react_prompt": True}, ) try: subtask = self.add_subtask(ActionsSubtask(subtask_input)) output = subtask.run() if isinstance(output, ListArtifact): output = output[0] except Exception as e: output = ErrorArtifact(f"Error processing tool input: {e}", exception=e) return output
ToolkitTask
Bases:
PromptTask
Source Code in griptape/tasks/toolkit_task.py
@define class ToolkitTask(PromptTask): def try_run(self) -> ListArtifact | TextArtifact | AudioArtifact | GenericArtifact | JsonArtifact | ErrorArtifact: warnings.warn( "`ToolkitTask` is deprecated and will be removed in a future release. `PromptTask` is a drop-in replacement.", DeprecationWarning, stacklevel=2, ) return super().try_run()
try_run()
Source Code in griptape/tasks/toolkit_task.py
def try_run(self) -> ListArtifact | TextArtifact | AudioArtifact | GenericArtifact | JsonArtifact | ErrorArtifact: warnings.warn( "`ToolkitTask` is deprecated and will be removed in a future release. `PromptTask` is a drop-in replacement.", DeprecationWarning, stacklevel=2, ) return super().try_run()
VariationImageGenerationTask
Bases:
BaseImageGenerationTask
Attributes
Name | Type | Description |
---|---|---|
image_generation_driver | BaseImageGenerationDriver | The engine used to generate the image. |
negative_rulesets | list[Ruleset] | List of negatively-weighted rulesets applied to the text prompt, if supported by the driver. |
negative_rules | list[Rule] | List of negatively-weighted rules applied to the text prompt, if supported by the driver. |
output_dir | Optional[str] | If provided, the generated image will be written to disk in output_dir. |
output_file | Optional[str] | If provided, the generated image will be written to disk as output_file. |
Source Code in griptape/tasks/variation_image_generation_task.py
@define class VariationImageGenerationTask(BaseImageGenerationTask): """A task that generates a variation of an image using a prompt. Accepts a text prompt and image as input in one of the following formats: - tuple of (template string, ImageArtifact) - tuple of (TextArtifact, ImageArtifact) - Callable that returns a tuple of (TextArtifact, ImageArtifact). Attributes: image_generation_driver: The engine used to generate the image. negative_rulesets: List of negatively-weighted rulesets applied to the text prompt, if supported by the driver. negative_rules: List of negatively-weighted rules applied to the text prompt, if supported by the driver. output_dir: If provided, the generated image will be written to disk in output_dir. output_file: If provided, the generated image will be written to disk as output_file. """ image_generation_driver: BaseImageGenerationDriver = field( default=Factory(lambda: Defaults.drivers_config.image_generation_driver), kw_only=True, ) _input: Union[tuple[Union[str, TextArtifact], ImageArtifact], Callable[[BaseTask], ListArtifact], ListArtifact] = ( field(default=None, alias="input") ) @property def input(self) -> ListArtifact: if isinstance(self._input, ListArtifact): return self._input if isinstance(self._input, tuple): if isinstance(self._input[0], TextArtifact): input_text = self._input[0] else: input_text = TextArtifact(J2().render_from_string(self._input[0], **self.full_context)) return ListArtifact([input_text, self._input[1]]) if isinstance(self._input, Callable): return self._input(self) raise ValueError("Input must be a tuple of (text, image) or a callable that returns such a tuple.") @input.setter def input(self, value: tuple[str | TextArtifact, ImageArtifact] | Callable[[BaseTask], ListArtifact]) -> None: self._input = value def try_run(self) -> ImageArtifact: prompt_artifact = self.input[0] image_artifact = self.input[1] if not isinstance(image_artifact, ImageArtifact): raise ValueError("Image must be an ImageArtifact.") output_image_artifact = self.image_generation_driver.run_image_variation( prompts=self._get_prompts(prompt_artifact.to_text()), negative_prompts=self._get_negative_prompts(), image=image_artifact, ) if self.output_dir or self.output_file: self._write_to_file(output_image_artifact) return output_image_artifact
_input = field(default=None, alias='input')
class-attribute instance-attributeimage_generation_driver = field(default=Factory(lambda: Defaults.drivers_config.image_generation_driver), kw_only=True)
class-attribute instance-attributeinput
property writable
try_run()
Source Code in griptape/tasks/variation_image_generation_task.py
def try_run(self) -> ImageArtifact: prompt_artifact = self.input[0] image_artifact = self.input[1] if not isinstance(image_artifact, ImageArtifact): raise ValueError("Image must be an ImageArtifact.") output_image_artifact = self.image_generation_driver.run_image_variation( prompts=self._get_prompts(prompt_artifact.to_text()), negative_prompts=self._get_negative_prompts(), image=image_artifact, ) if self.output_dir or self.output_file: self._write_to_file(output_image_artifact) return output_image_artifact
- On this page
- AssistantTask
- AudioTranscriptionTask
- BaseAudioGenerationTask
- BaseImageGenerationTask
- BaseSubtask
- BaseTask
- BaseTextInputTask
- BranchTask
- CodeExecutionTask
- ExtractionTask
- InpaintingImageGenerationTask
- OutpaintingImageGenerationTask
- OutputSchemaValidationSubtask
- PromptImageGenerationTask
- PromptTask
- RagTask
- StructureRunTask
- TextSummaryTask
- TextToSpeechTask
- ToolTask
- ToolkitTask
- VariationImageGenerationTask
Could this page be better? Report a problem or suggest an addition!