import { AgentTrajectoryEvaluator, EvalOutputType, ExtractLLMCallOptions, LLMEvalChainInput, LLMTrajectoryEvaluatorArgs } from "../base.cjs";
import { AgentStep } from "@langchain/core/agents";
import { ChatGeneration, Generation } from "@langchain/core/outputs";
import { BaseLLMOutputParser } from "@langchain/core/output_parsers";
import { ChainValues } from "@langchain/core/utils/types";
import { BasePromptTemplate } from "@langchain/core/prompts";
import * as _$_langchain_core_prompt_values0 from "@langchain/core/prompt_values";
import { BaseCallbackConfig, Callbacks } from "@langchain/core/callbacks/manager";
import { StructuredToolInterface } from "@langchain/core/tools";
import { BaseChatModel } from "@langchain/core/language_models/chat_models";

//#region src/evaluation/agents/trajectory.d.ts
/**
 * A parser for the output of the TrajectoryEvalChain.
 */
declare class TrajectoryOutputParser extends BaseLLMOutputParser<EvalOutputType> {
  static lc_name(): string;
  lc_namespace: string[];
  parseResult(generations: Generation[] | ChatGeneration[], _callbacks: Callbacks | undefined): Promise<EvalOutputType>;
}
/**
 * A chain for evaluating ReAct style agents.
 *
 * This chain is used to evaluate ReAct style agents by reasoning about
 * the sequence of actions taken and their outcomes.
 */
declare class TrajectoryEvalChain extends AgentTrajectoryEvaluator {
  static lc_name(): string;
  criterionName?: string;
  evaluationName?: string;
  requiresInput: boolean;
  requiresReference: boolean;
  outputParser: TrajectoryOutputParser;
  static resolveTrajectoryPrompt(prompt?: BasePromptTemplate | undefined, agentTools?: StructuredToolInterface[]): BasePromptTemplate<any, _$_langchain_core_prompt_values0.BasePromptValueInterface, any>;
  /**
   * Get the description of the agent tools.
   *
   * @returns The description of the agent tools.
   */
  static toolsDescription(agentTools: StructuredToolInterface[]): string;
  /**
   * Create a new TrajectoryEvalChain.
   * @param llm
   * @param agentTools - The tools used by the agent.
   * @param chainOptions - The options for the chain.
   */
  static fromLLM(llm: BaseChatModel, agentTools?: StructuredToolInterface[], chainOptions?: Partial<Omit<LLMEvalChainInput, "llm">>): Promise<TrajectoryEvalChain>;
  _prepareOutput(result: ChainValues): any;
  /**
   * Get the agent trajectory as a formatted string.
   *
   * @param steps - The agent trajectory.
   * @returns The formatted agent trajectory.
   */
  getAgentTrajectory(steps: AgentStep[]): string;
  formatReference(reference?: string): string;
  _evaluateAgentTrajectory(args: LLMTrajectoryEvaluatorArgs, callOptions: ExtractLLMCallOptions<this["llm"]>, config?: Callbacks | BaseCallbackConfig): Promise<ChainValues>;
}
//#endregion
export { TrajectoryEvalChain, TrajectoryOutputParser };
//# sourceMappingURL=trajectory.d.cts.map