sherpa_ai package

In This Page:

sherpa_ai package#

Subpackages#

Submodules#

sherpa_ai.events module#

class sherpa_ai.events.Event(event_type: EventType, agent: str, content: str)[source]#

Bases: object

classmethod from_dict(data)[source]#
class sherpa_ai.events.EventType(value, names=None, *, module=None, qualname=None, type=None, start=1, boundary=None)[source]#

Bases: Enum

action = 5#
action_output = 6#
feedback = 4#
planning = 1#
result = 3#
task = 2#
user_input = 7#

sherpa_ai.orchestrator module#

class sherpa_ai.orchestrator.Orchestrator(config: ~sherpa_ai.orchestrator.OrchestratorConfig, agent_pool: ~sherpa_ai.agents.agent_pool.AgentPool = <sherpa_ai.agents.agent_pool.AgentPool object>)[source]#

Bases: object

add_agent(agent: BaseAgent)[source]#
continue_with_user_feedback(user_feedback) str | None[source]#
execute(plan: Plan, planner: Planner)[source]#
plan(task: str, planner: Planner, critic: Critic) Plan[source]#
classmethod restore(data: dict, agent_pool: AgentPool)[source]#
run(task)[source]#
save(shared_memory: SharedMemory, agents: List[BaseAgent])[source]#
class sherpa_ai.orchestrator.OrchestratorConfig(*, llm_name: str = 'gpt-3.5-turbo', llm_temperature: float = 0.7, critic_rounds: int = 3)[source]#

Bases: BaseModel

critic_rounds: int#
llm_name: str#
llm_temperature: float#
model_config: ClassVar[ConfigDict] = {}#

Configuration for the model, should be a dictionary conforming to [ConfigDict][pydantic.config.ConfigDict].

sherpa_ai.output_parser module#

class sherpa_ai.output_parser.BaseTaskOutputParser(*args: Any, name: str | None = None)[source]#

Bases: BaseOutputParser

abstract parse(text: str) TaskAction[source]#

Return TaskAction

class sherpa_ai.output_parser.TaskAction(name, args)[source]#

Bases: NamedTuple

args: Dict#

Alias for field number 1

name: str#

Alias for field number 0

class sherpa_ai.output_parser.TaskOutputParser(*args: Any, name: str | None = None)[source]#

Bases: BaseTaskOutputParser

parse(text: str) TaskAction[source]#

Return TaskAction

sherpa_ai.output_parser.preprocess_json_input(input_str: str) str[source]#

sherpa_ai.post_processors module#

Post-processors for outputs from the LLM.

sherpa_ai.prompt module#

sherpa_ai.prompt_generator module#

sherpa_ai.reflection module#

sherpa_ai.task_agent module#

sherpa_ai.tools module#

class sherpa_ai.tools.ContextTool(*, name: str = 'Context Search', description: str = 'Access internal technical documentation for AI related projects, includingFixie, LangChain, GPT index, GPTCache, GPT4ALL, autoGPT, db-GPT, AgentGPT, sherpa.Only use this tool if you need information for these projects specifically.', args_schema: Type[BaseModel] | Type[BaseModel] | None = None, return_direct: bool = False, verbose: bool = False, callbacks: List[BaseCallbackHandler] | BaseCallbackManager | None = None, callback_manager: BaseCallbackManager | None = None, tags: List[str] | None = None, metadata: Dict[str, Any] | None = None, handle_tool_error: bool | str | Callable[[ToolException], str] | None = False, handle_validation_error: bool | str | Callable[[ValidationError], str] | None = False, response_format: Literal['content', 'content_and_artifact'] = 'content', memory: VectorStoreRetriever)[source]#

Bases: BaseTool

memory: VectorStoreRetriever#
class sherpa_ai.tools.LinkScraperTool(*, name: str = 'Link Scraper', description: str = 'Access the content of a link. Only use this tool when you need to extract information from a link.', args_schema: Type[BaseModel] | Type[BaseModel] | None = None, return_direct: bool = False, verbose: bool = False, callbacks: List[BaseCallbackHandler] | BaseCallbackManager | None = None, callback_manager: BaseCallbackManager | None = None, tags: List[str] | None = None, metadata: Dict[str, Any] | None = None, handle_tool_error: bool | str | Callable[[ToolException], str] | None = False, handle_validation_error: bool | str | Callable[[ValidationError], str] | None = False, response_format: Literal['content', 'content_and_artifact'] = 'content', llm: Any = None)[source]#

Bases: BaseTool

llm: Any#
class sherpa_ai.tools.SearchArxivTool(*, name: str = 'Arxiv Search', description: str = 'Access all the papers from Arxiv to search for domain-specific scientific publication.Only use this tool when you need information in the scientific paper.', args_schema: Type[BaseModel] | Type[BaseModel] | None = None, return_direct: bool = False, verbose: bool = False, callbacks: List[BaseCallbackHandler] | BaseCallbackManager | None = None, callback_manager: BaseCallbackManager | None = None, tags: List[str] | None = None, metadata: Dict[str, Any] | None = None, handle_tool_error: bool | str | Callable[[ToolException], str] | None = False, handle_validation_error: bool | str | Callable[[ValidationError], str] | None = False, response_format: Literal['content', 'content_and_artifact'] = 'content')[source]#

Bases: BaseTool

class sherpa_ai.tools.SearchTool(*, name: str = 'Search', description: str = 'Access the internet to search for the information. Only use this tool when you cannot find the information using internal search.', args_schema: Type[BaseModel] | Type[BaseModel] | None = None, return_direct: bool = False, verbose: bool = False, callbacks: List[BaseCallbackHandler] | BaseCallbackManager | None = None, callback_manager: BaseCallbackManager | None = None, tags: List[str] | None = None, metadata: Dict[str, Any] | None = None, handle_tool_error: bool | str | Callable[[ToolException], str] | None = False, handle_validation_error: bool | str | Callable[[ValidationError], str] | None = False, response_format: Literal['content', 'content_and_artifact'] = 'content', top_k: int = 10, config: AgentConfig = AgentConfig(verbose=True, gsite=[], do_reflect=False, use_task_agent=False, search_domains=[], invalid_domains=[]))[source]#

Bases: BaseTool

top_k: int#
class sherpa_ai.tools.UserInputTool(*, name: str = 'UserInput', description: str = 'Access the user input for the task.You use this tool if you need more context and would like to ask clarifying questions to solve the task', args_schema: Type[BaseModel] | Type[BaseModel] | None = None, return_direct: bool = False, verbose: bool = False, callbacks: List[BaseCallbackHandler] | BaseCallbackManager | None = None, callback_manager: BaseCallbackManager | None = None, tags: List[str] | None = None, metadata: Dict[str, Any] | None = None, handle_tool_error: bool | str | Callable[[ToolException], str] | None = False, handle_validation_error: bool | str | Callable[[ValidationError], str] | None = False, response_format: Literal['content', 'content_and_artifact'] = 'content')[source]#

Bases: BaseTool

sherpa_ai.tools.get_tools(memory, config)[source]#

sherpa_ai.utils module#

sherpa_ai.utils.check_if_number_exist(result: str, source: str)[source]#
sherpa_ai.utils.check_url(url)[source]#

Performs an HTTP GET request on url to test its validity.

Returns True if GET succeeds, False otherwise.

sherpa_ai.utils.chunk_and_summarize(text_data: str, question: str, link: str, llm)[source]#
sherpa_ai.utils.chunk_and_summarize_file(text_data: str, question: str, file_name: str, file_format: str, llm, title: str = None)[source]#
sherpa_ai.utils.combined_number_extractor(text: str)[source]#

Extracts unique numeric values from the given text by combining results from two different extraction methods.

Parameters: - text (str): The input text from which numeric values are to be extracted.

Returns: - set: A set containing unique numeric values extracted from the input text.

sherpa_ai.utils.count_string_tokens(string: str, model_name: str) int[source]#

Returns the number of tokens in a text string.

Parameters:
  • string (str) – The text string.

  • model_name (str) – The name of the encoding to use. (e.g., “gpt-3.5-turbo”)

Returns:

The number of tokens in the text string.

Return type:

int

sherpa_ai.utils.extract_entities(text)[source]#

Extract entities of specific types NORP (Nationalities or Religious or Political Groups) ORG (Organization) GPE (Geopolitical Entity) LOC (Location) using spaCy.

Args: - text (str): Input text.

Returns: List[str]: List of extracted entities.

sherpa_ai.utils.extract_numbers_from_text(text)[source]#

Returns a list, possibly empty, of the strings of digits within text

sherpa_ai.utils.extract_numeric_entities(text: str | None, entity_types: List[str] = ['DATE', 'CARDINAL', 'QUANTITY', 'MONEY'])[source]#

Extracts numeric entities from the given text using spaCy and converts textualrepresentations of numbers to floats using the word_to_float function.

Parameters:
  • text (str) – The input text from which numeric entities will be extracted.

  • entity_types (List[str]) – A list of spaCy entity types to consider for extraction. Default is [“DATE”, “CARDINAL”, “QUANTITY”, “MONEY”].

Returns:

A list of numeric values extracted from the text.

Return type:

List[str]

sherpa_ai.utils.extract_text_from_pdf(pdf_path)[source]#
sherpa_ai.utils.extract_urls(text)[source]#
sherpa_ai.utils.file_text_splitter(data, meta_data)[source]#
sherpa_ai.utils.get_base_url(link)[source]#
sherpa_ai.utils.json_from_text(text: str)[source]#

Extract and parse JSON data from a text.

Args: - text (str): Input text containing JSON data.

Returns: dict: Parsed JSON data.

sherpa_ai.utils.load_files(files: List[str]) List[Document][source]#
sherpa_ai.utils.log_formatter(logs)[source]#

Formats the logger into readable string

sherpa_ai.utils.question_with_file_reconstructor(data: str, file_name: str, title: str | None, file_format: str, question: str)[source]#
sherpa_ai.utils.scrape_with_url(url: str)[source]#
sherpa_ai.utils.show_commands_only(logs)[source]#

Modified version of log_formatter that only shows commands

sherpa_ai.utils.string_comparison_with_jaccard_and_levenshtein(word1, word2, levenshtein_constant)[source]#

Calculate a combined similarity metric using Jaccard similarity and normalized Levenshtein distance.

Args: - word1 (str): First input string. - word2 (str): Second input string. - levenshtein_constant (float): Weight for the Levenshtein distance in the combined metric.

Returns: float: Combined similarity metric.

sherpa_ai.utils.text_similarity(check_entity: List[str], source_entity: List[str])[source]#

Check if entities from a reference list are present in another list.

Args: - check_entity ([str]): List of entities to check. - source_entity ([str]): List of reference entities.

Returns: dict: Result of the check containing ‘entity_exist’ and ‘messages’.

sherpa_ai.utils.text_similarity_by_llm(llm: BaseLanguageModel, source_entity: List[str], source, result, user_id=None, team_id=None)[source]#

Check if entities from a question are mentioned in some form inside the answer using a language model.

Args: - source_entity (List[str]): List of entities from the question. - source (str): Question text. - result (str): Answer text. - user_id (str): User ID (optional). - team_id (str): Team ID (optional).

Returns: dict: Result of the check containing ‘entity_exist’ and ‘messages’.

sherpa_ai.utils.text_similarity_by_metrics(check_entity: List[str], source_entity: List[str])[source]#

Check entity similarity based on Jaccard and Levenshtein metrics.

Args: - check_entity (List[str]): List of entities to check. - source_entity (List[str]): List of reference entities.

Returns: dict: Result of the check containing ‘entity_exist’ and ‘messages’.

sherpa_ai.utils.verify_numbers_against_source(text_to_test: str | None, source_text: str | None)[source]#

Verifies that all numbers in text_to_test exist in source_text. Returns True on success. Returns False and a feedback string on failure.

sherpa_ai.utils.word_to_float(text)[source]#

Converts a textual representation of a number to a float.

Parameters: - text (str): The input text containing a textual representation of a number.

Returns: dict: A dictionary with keys:

  • ‘success’ (bool): True if the conversion was successful, False otherwise.

  • ‘data’ (float): The converted float value if ‘success’ is True.

  • ‘message’ (str): An error message if ‘success’ is False.

Module contents#