rageval - v0.1.1
    Preparing search index...

    Interface OpenAIProviderConfig

    Configuration for the OpenAI provider.

    Uses structural typing for the client so you can pass any compatible object — works with Azure OpenAI, proxies, or mocks in tests.

    interface OpenAIProviderConfig {
        type: "openai";
        client: {
            chat: {
                completions: {
                    create: (
                        params: {
                            model: string;
                            max_tokens: number;
                            temperature?: number;
                            messages: { role: "user" | "assistant" | "system"; content: string }[];
                        },
                        options?: { signal?: AbortSignal },
                    ) => Promise<
                        { choices: { message?: { content?: (...)
                        | (...)
                        | (...) } }[] },
                    >;
                };
            };
        };
        model?: string;
        maxTokens?: number;
        temperature?: number;
        retries?: number;
    }
    Index

    Properties

    type: "openai"
    client: {
        chat: {
            completions: {
                create: (
                    params: {
                        model: string;
                        max_tokens: number;
                        temperature?: number;
                        messages: { role: "user" | "assistant" | "system"; content: string }[];
                    },
                    options?: { signal?: AbortSignal },
                ) => Promise<
                    { choices: { message?: { content?: (...)
                    | (...)
                    | (...) } }[] },
                >;
            };
        };
    }

    An OpenAI client instance from the openai package.

    model?: string

    OpenAI model to use for judging.

    'gpt-4o'
    
    maxTokens?: number

    Maximum tokens in the judge's response.

    1024
    
    temperature?: number

    Sampling temperature for the judge LLM. Set to 0 for reproducible, deterministic evaluation runs. Leave undefined to use the provider's default.

    retries?: number

    Number of retry attempts for transient errors (rate limits, 5xx).

    2