Skip to content

generation

RAG Generation package.

Modules:

  • base

    Base classes for generation.

  • cohere

    CohereGen class for text generation using Cohere's API.

  • deepseek

    DeepSeek generation module.

  • fireworks

    FireworksGen class for text generation using Fireworks API.

  • gemini

    GeminiGen class for text generation using Google's Gemini model.

  • groq

    Groq class for text generation.

  • hugging_face

    Hugging Face classes for text generation.

  • hugging_face_inf

    Hugging Face inferencing classes for text generation.

  • llama

    Llama generation module.

  • openai

    OpenAI Generation Model class for flexible GPT-based text generation.

  • phi

    Phi generation module.

  • together

    TogetherGen class for text generation using Together AI's API.

Classes:

  • CohereGen

    Cohere generation model for text generation.

  • DeepSeekGen

    DeepSeek Generation class.

  • FireworksGen

    Fireworks AI generation model for text generation.

  • GeminiGen

    Gemini generation model for text generation.

  • GenerationBase

    Generic Generation class.

  • GroqGen

    Groq generation model for text generation.

  • HuggingFaceGen

    HuggingFaceGen.

  • HuggingFaceInfGen

    HuggingFaceGen with InferenceClient.

  • LlamaGen

    Llama Generation class.

  • OllamaGen

    Ollama Generation class for local inference via ollama-python.

  • OllamaOpenAIGen

    OllamaGen via the Ollama Python client.

  • OpenAIGen

    OpenAI generation model for text generation.

  • PhiGen

    Phi Generation class.

  • TogetherGen

    Together AI generation model for text generation.

CohereGen

CohereGen(
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
)

Bases: GenerationBase

Cohere generation model for text generation.

Methods:

  • generate

    Generate text using Cohere's API.

Source code in src/rago/generation/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
) -> None:
    """Initialize Generation class."""
    if logs is DEFAULT_LOGS:
        logs = {}
    super().__init__(api_key=api_key, cache=cache, logs=logs)

    self.model_name: str = (
        model_name if model_name is not None else self.default_model_name
    )
    self.output_max_length: int = (
        output_max_length or self.default_output_max_length
    )
    self.temperature: float = (
        temperature
        if temperature is not None
        else self.default_temperature
    )

    self.prompt_template: str = (
        prompt_template or self.default_prompt_template
    )
    self.structured_output: Optional[Type[BaseModel]] = structured_output
    if api_params is DEFAULT_API_PARAMS:
        api_params = deepcopy(self.default_api_params or {})

    self.system_message = system_message
    self.api_params = api_params

    if device not in ['cpu', 'cuda', 'auto']:
        raise Exception(
            f'Device {device} not supported. Options: cpu, cuda, auto.'
        )

    cuda_available = torch.cuda.is_available()
    self.device_name: str = (
        'cpu' if device == 'cpu' or not cuda_available else 'cuda'
    )
    self.device = torch.device(self.device_name)

    self._validate()
    self._setup()

generate

generate(query: str, context: list[str]) -> str | BaseModel

Generate text using Cohere's API.

Source code in src/rago/generation/cohere.py
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
def generate(self, query: str, context: list[str]) -> str | BaseModel:
    """Generate text using Cohere's API."""
    input_text = self.prompt_template.format(
        query=query, context=' '.join(context)
    )
    api_params = self.api_params or self.default_api_params

    if self.structured_output:
        messages = []
        # Explicit instruction to generate JSON output.
        system_instruction = (
            'Generate a JSON object that strictly follows the provided  '
            'JSON schema. Do not include any additional text.'
        )
        if self.system_message:
            system_instruction += ' ' + self.system_message
        messages.append({'role': 'system', 'content': system_instruction})
        messages.append({'role': 'user', 'content': input_text})

        response_format_config = {
            'type': 'json_object',
            'json_schema': (
                self.structured_output
                if isinstance(self.structured_output, dict)
                else self.structured_output.model_json_schema()
            ),
        }
        model_params = {
            'messages': messages,
            'max_tokens': self.output_max_length,
            'temperature': self.temperature,
            'model': self.model_name,
            'response_format': response_format_config,
            **api_params,
        }

        response = self.model.client.chat(**model_params)
        self.logs['model_params'] = model_params
        json_text = response.message.content[0].text
        parsed_dict = json.loads(json_text)
        parsed_model = self.structured_output(**parsed_dict)
        return parsed_model

    if self.system_message:
        messages = [
            {'role': 'system', 'content': self.system_message},
            {'role': 'user', 'content': input_text},
        ]
        model_params = {
            'model': self.model_name,
            'messages': messages,
            'max_tokens': self.output_max_length,
            'temperature': self.temperature,
            **api_params,
        }
        response = self.model.chat(**model_params)
        self.logs['model_params'] = model_params
        return cast(str, response.text)

    model_params = {
        'model': self.model_name,
        'prompt': input_text,
        'max_tokens': self.output_max_length,
        'temperature': self.temperature,
        **api_params,
    }
    response = self.model.generate(**model_params)
    self.logs['model_params'] = model_params
    return cast(str, response.generations[0].text.strip())

DeepSeekGen

DeepSeekGen(
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
)

Bases: GenerationBase

DeepSeek Generation class.

Methods:

  • generate

    Generate text using DeepSeek model with chat template.

Source code in src/rago/generation/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
) -> None:
    """Initialize Generation class."""
    if logs is DEFAULT_LOGS:
        logs = {}
    super().__init__(api_key=api_key, cache=cache, logs=logs)

    self.model_name: str = (
        model_name if model_name is not None else self.default_model_name
    )
    self.output_max_length: int = (
        output_max_length or self.default_output_max_length
    )
    self.temperature: float = (
        temperature
        if temperature is not None
        else self.default_temperature
    )

    self.prompt_template: str = (
        prompt_template or self.default_prompt_template
    )
    self.structured_output: Optional[Type[BaseModel]] = structured_output
    if api_params is DEFAULT_API_PARAMS:
        api_params = deepcopy(self.default_api_params or {})

    self.system_message = system_message
    self.api_params = api_params

    if device not in ['cpu', 'cuda', 'auto']:
        raise Exception(
            f'Device {device} not supported. Options: cpu, cuda, auto.'
        )

    cuda_available = torch.cuda.is_available()
    self.device_name: str = (
        'cpu' if device == 'cpu' or not cuda_available else 'cuda'
    )
    self.device = torch.device(self.device_name)

    self._validate()
    self._setup()

generate

generate(query: str, context: list[str]) -> str

Generate text using DeepSeek model with chat template.

Source code in src/rago/generation/deepseek.py
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
def generate(self, query: str, context: list[str]) -> str:
    """Generate text using DeepSeek model with chat template."""
    messages = [
        {
            'role': 'user',
            'content': f'{query}\nContext: {" ".join(context)}',
        }
    ]

    input_tensor = self.tokenizer.apply_chat_template(
        messages, add_generation_prompt=True, return_tensors='pt'
    ).to(self.model.device)

    model_params = dict(
        max_new_tokens=self.output_max_length,
        do_sample=True,
        temperature=self.temperature,
    )

    self.logs['model_params'] = model_params

    outputs = self.model.generate(input_tensor, **model_params)

    answer: str = str(
        self.tokenizer.decode(
            outputs[0][input_tensor.shape[1] :], skip_special_tokens=True
        )
    )

    return answer.strip()

FireworksGen

FireworksGen(
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
)

Bases: GenerationBase

Fireworks AI generation model for text generation.

Methods:

  • generate

    Generate text using Fireworks AI's API.

Source code in src/rago/generation/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
) -> None:
    """Initialize Generation class."""
    if logs is DEFAULT_LOGS:
        logs = {}
    super().__init__(api_key=api_key, cache=cache, logs=logs)

    self.model_name: str = (
        model_name if model_name is not None else self.default_model_name
    )
    self.output_max_length: int = (
        output_max_length or self.default_output_max_length
    )
    self.temperature: float = (
        temperature
        if temperature is not None
        else self.default_temperature
    )

    self.prompt_template: str = (
        prompt_template or self.default_prompt_template
    )
    self.structured_output: Optional[Type[BaseModel]] = structured_output
    if api_params is DEFAULT_API_PARAMS:
        api_params = deepcopy(self.default_api_params or {})

    self.system_message = system_message
    self.api_params = api_params

    if device not in ['cpu', 'cuda', 'auto']:
        raise Exception(
            f'Device {device} not supported. Options: cpu, cuda, auto.'
        )

    cuda_available = torch.cuda.is_available()
    self.device_name: str = (
        'cpu' if device == 'cpu' or not cuda_available else 'cuda'
    )
    self.device = torch.device(self.device_name)

    self._validate()
    self._setup()

generate

generate(query: str, context: list[str]) -> str | BaseModel

Generate text using Fireworks AI's API.

Source code in src/rago/generation/fireworks.py
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
def generate(self, query: str, context: list[str]) -> str | BaseModel:
    """Generate text using Fireworks AI's API."""
    input_text = self.prompt_template.format(
        query=query, context=' '.join(context)
    )

    api_params = self.api_params or self.default_api_params

    messages = []
    if self.system_message:
        messages.append({'role': 'system', 'content': self.system_message})
    messages.append({'role': 'user', 'content': input_text})

    model_params = {
        'model': self.model_name,
        'messages': messages,
        'max_tokens': self.output_max_length,
        'temperature': self.temperature,
        **api_params,
    }

    if self.structured_output:
        model_params['response_model'] = self.structured_output
        response = self.model.chat.completions.create(**model_params)
        self.logs['model_params'] = model_params
        return cast(BaseModel, response)

    response = self.model.chat.completions.create(**model_params)
    self.logs['model_params'] = model_params
    return cast(str, response.choices[0].message.content.strip())

GeminiGen

GeminiGen(
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
)

Bases: GenerationBase

Gemini generation model for text generation.

Methods:

  • generate

    Generate text using Gemini model support.

Source code in src/rago/generation/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
) -> None:
    """Initialize Generation class."""
    if logs is DEFAULT_LOGS:
        logs = {}
    super().__init__(api_key=api_key, cache=cache, logs=logs)

    self.model_name: str = (
        model_name if model_name is not None else self.default_model_name
    )
    self.output_max_length: int = (
        output_max_length or self.default_output_max_length
    )
    self.temperature: float = (
        temperature
        if temperature is not None
        else self.default_temperature
    )

    self.prompt_template: str = (
        prompt_template or self.default_prompt_template
    )
    self.structured_output: Optional[Type[BaseModel]] = structured_output
    if api_params is DEFAULT_API_PARAMS:
        api_params = deepcopy(self.default_api_params or {})

    self.system_message = system_message
    self.api_params = api_params

    if device not in ['cpu', 'cuda', 'auto']:
        raise Exception(
            f'Device {device} not supported. Options: cpu, cuda, auto.'
        )

    cuda_available = torch.cuda.is_available()
    self.device_name: str = (
        'cpu' if device == 'cpu' or not cuda_available else 'cuda'
    )
    self.device = torch.device(self.device_name)

    self._validate()
    self._setup()

generate

generate(query: str, context: list[str]) -> str | BaseModel

Generate text using Gemini model support.

Source code in src/rago/generation/gemini.py
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
def generate(self, query: str, context: list[str]) -> str | BaseModel:
    """Generate text using Gemini model support."""
    input_text = self.prompt_template.format(
        query=query, context=' '.join(context)
    )

    if not self.structured_output:
        models_params_gen = {'contents': input_text}
        response = self.model.generate_content(**models_params_gen)
        self.logs['model_params'] = models_params_gen
        return cast(str, response.text.strip())

    api_params = (
        self.api_params if self.api_params else self.default_api_params
    )

    messages = []
    if self.system_message:
        messages.append({'role': 'system', 'content': self.system_message})
    messages.append({'role': 'user', 'content': input_text})

    model_params = {
        'messages': messages,
        'response_model': self.structured_output,
        **api_params,
    }

    response = self.model.create(
        **model_params,
    )

    self.logs['model_params'] = model_params

    return cast(BaseModel, response)

GenerationBase

GenerationBase(
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
)

Bases: RagoBase

Generic Generation class.

Methods:

  • generate

    Generate text with optional language parameter.

Source code in src/rago/generation/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
) -> None:
    """Initialize Generation class."""
    if logs is DEFAULT_LOGS:
        logs = {}
    super().__init__(api_key=api_key, cache=cache, logs=logs)

    self.model_name: str = (
        model_name if model_name is not None else self.default_model_name
    )
    self.output_max_length: int = (
        output_max_length or self.default_output_max_length
    )
    self.temperature: float = (
        temperature
        if temperature is not None
        else self.default_temperature
    )

    self.prompt_template: str = (
        prompt_template or self.default_prompt_template
    )
    self.structured_output: Optional[Type[BaseModel]] = structured_output
    if api_params is DEFAULT_API_PARAMS:
        api_params = deepcopy(self.default_api_params or {})

    self.system_message = system_message
    self.api_params = api_params

    if device not in ['cpu', 'cuda', 'auto']:
        raise Exception(
            f'Device {device} not supported. Options: cpu, cuda, auto.'
        )

    cuda_available = torch.cuda.is_available()
    self.device_name: str = (
        'cpu' if device == 'cpu' or not cuda_available else 'cuda'
    )
    self.device = torch.device(self.device_name)

    self._validate()
    self._setup()

generate abstractmethod

generate(query: str, context: list[str]) -> str | BaseModel

Generate text with optional language parameter.

Parameters:

  • query (str) –

    The input query or prompt.

  • context (list[str]) –

    Additional context information for the generation.

Returns:

  • str

    Generated text based on query and context.

Source code in src/rago/generation/base.py
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
@abstractmethod
def generate(
    self,
    query: str,
    context: list[str],
) -> str | BaseModel:
    """Generate text with optional language parameter.

    Parameters
    ----------
    query : str
        The input query or prompt.
    context : list[str]
        Additional context information for the generation.

    Returns
    -------
    str
        Generated text based on query and context.
    """
    ...

GroqGen

GroqGen(
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
)

Bases: GenerationBase

Groq generation model for text generation.

Methods:

  • generate

    Generate text using the Groq AP.

Source code in src/rago/generation/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
) -> None:
    """Initialize Generation class."""
    if logs is DEFAULT_LOGS:
        logs = {}
    super().__init__(api_key=api_key, cache=cache, logs=logs)

    self.model_name: str = (
        model_name if model_name is not None else self.default_model_name
    )
    self.output_max_length: int = (
        output_max_length or self.default_output_max_length
    )
    self.temperature: float = (
        temperature
        if temperature is not None
        else self.default_temperature
    )

    self.prompt_template: str = (
        prompt_template or self.default_prompt_template
    )
    self.structured_output: Optional[Type[BaseModel]] = structured_output
    if api_params is DEFAULT_API_PARAMS:
        api_params = deepcopy(self.default_api_params or {})

    self.system_message = system_message
    self.api_params = api_params

    if device not in ['cpu', 'cuda', 'auto']:
        raise Exception(
            f'Device {device} not supported. Options: cpu, cuda, auto.'
        )

    cuda_available = torch.cuda.is_available()
    self.device_name: str = (
        'cpu' if device == 'cpu' or not cuda_available else 'cuda'
    )
    self.device = torch.device(self.device_name)

    self._validate()
    self._setup()

generate

generate(query: str, context: list[str]) -> str | BaseModel

Generate text using the Groq AP.

Source code in src/rago/generation/groq.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
def generate(
    self,
    query: str,
    context: list[str],
) -> str | BaseModel:
    """Generate text using the Groq AP."""
    input_text = self.prompt_template.format(
        query=query, context=' '.join(context)
    )

    if not self.model:
        raise Exception('The model was not created.')

    api_params = (
        self.api_params if self.api_params else self.default_api_params
    )

    messages = []
    if self.system_message:
        messages.append({'role': 'system', 'content': self.system_message})
    messages.append({'role': 'user', 'content': input_text})

    model_params = dict(
        model=self.model_name,
        messages=messages,
        max_completion_tokens=self.output_max_length,
        temperature=self.temperature,
        **api_params,
    )

    if self.structured_output:
        model_params['response_model'] = self.structured_output

    response = self.model.chat.completions.create(**model_params)
    self.logs['model_params'] = model_params

    if hasattr(response, 'choices') and isinstance(response.choices, list):
        return cast(str, response.choices[0].message.content.strip())

    return cast(BaseModel, response)

HuggingFaceGen

HuggingFaceGen(
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
)

Bases: GenerationBase

HuggingFaceGen.

Methods:

  • generate

    Generate the text from the query and augmented context.

Source code in src/rago/generation/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
) -> None:
    """Initialize Generation class."""
    if logs is DEFAULT_LOGS:
        logs = {}
    super().__init__(api_key=api_key, cache=cache, logs=logs)

    self.model_name: str = (
        model_name if model_name is not None else self.default_model_name
    )
    self.output_max_length: int = (
        output_max_length or self.default_output_max_length
    )
    self.temperature: float = (
        temperature
        if temperature is not None
        else self.default_temperature
    )

    self.prompt_template: str = (
        prompt_template or self.default_prompt_template
    )
    self.structured_output: Optional[Type[BaseModel]] = structured_output
    if api_params is DEFAULT_API_PARAMS:
        api_params = deepcopy(self.default_api_params or {})

    self.system_message = system_message
    self.api_params = api_params

    if device not in ['cpu', 'cuda', 'auto']:
        raise Exception(
            f'Device {device} not supported. Options: cpu, cuda, auto.'
        )

    cuda_available = torch.cuda.is_available()
    self.device_name: str = (
        'cpu' if device == 'cpu' or not cuda_available else 'cuda'
    )
    self.device = torch.device(self.device_name)

    self._validate()
    self._setup()

generate

generate(query: str, context: list[str]) -> str

Generate the text from the query and augmented context.

Source code in src/rago/generation/hugging_face.py
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
def generate(self, query: str, context: list[str]) -> str:
    """Generate the text from the query and augmented context."""
    with torch.no_grad():
        input_text = self.prompt_template.format(
            query=query, context=' '.join(context)
        )
        input_ids = self.tokenizer.encode(
            input_text,
            return_tensors='pt',
            truncation=True,
            max_length=512,
        ).to(self.device_name)

        api_params = (
            self.api_params if self.api_params else self.default_api_params
        )

        model_params = dict(
            inputs=input_ids,
            max_length=self.output_max_length,
            pad_token_id=self.tokenizer.eos_token_id,
            **api_params,
        )

        outputs = self.model.generate(**model_params)

        self.logs['model_params'] = model_params

        response = self.tokenizer.decode(
            outputs[0], skip_special_tokens=True
        )

    if self.device_name == 'cuda':
        torch.cuda.empty_cache()

    return str(response)

HuggingFaceInfGen

HuggingFaceInfGen(
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
)

Bases: GenerationBase

HuggingFaceGen with InferenceClient.

Methods:

  • generate

    Generate the text from the query and augmented context.

Source code in src/rago/generation/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
) -> None:
    """Initialize Generation class."""
    if logs is DEFAULT_LOGS:
        logs = {}
    super().__init__(api_key=api_key, cache=cache, logs=logs)

    self.model_name: str = (
        model_name if model_name is not None else self.default_model_name
    )
    self.output_max_length: int = (
        output_max_length or self.default_output_max_length
    )
    self.temperature: float = (
        temperature
        if temperature is not None
        else self.default_temperature
    )

    self.prompt_template: str = (
        prompt_template or self.default_prompt_template
    )
    self.structured_output: Optional[Type[BaseModel]] = structured_output
    if api_params is DEFAULT_API_PARAMS:
        api_params = deepcopy(self.default_api_params or {})

    self.system_message = system_message
    self.api_params = api_params

    if device not in ['cpu', 'cuda', 'auto']:
        raise Exception(
            f'Device {device} not supported. Options: cpu, cuda, auto.'
        )

    cuda_available = torch.cuda.is_available()
    self.device_name: str = (
        'cpu' if device == 'cpu' or not cuda_available else 'cuda'
    )
    self.device = torch.device(self.device_name)

    self._validate()
    self._setup()

generate

generate(query: str, context: list[str]) -> str | BaseModel

Generate the text from the query and augmented context.

Source code in src/rago/generation/hugging_face_inf.py
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
def generate(self, query: str, context: list[str]) -> str | BaseModel:
    """Generate the text from the query and augmented context."""
    input_text = self.prompt_template.format(
        query=query, context=' '.join(context)
    )
    if self.system_message:
        input_text = f'{self.system_message}\n{input_text}'

    api_params = self.api_params or self.default_api_params

    self.logs['model_params'] = {
        'model': self.model_name,
        'inputs': input_text,
        'parameters': api_params,
    }
    generated_text = self.client.text_generation(
        prompt=input_text,
        model=self.model_name,
        max_new_tokens=api_params['max_new_tokens'],
        temperature=api_params['temperature'],
    )

    return generated_text.strip()

LlamaGen

LlamaGen(
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
)

Bases: GenerationBase

Llama Generation class.

Methods:

  • generate

    Generate text using Llama model with language support.

Source code in src/rago/generation/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
) -> None:
    """Initialize Generation class."""
    if logs is DEFAULT_LOGS:
        logs = {}
    super().__init__(api_key=api_key, cache=cache, logs=logs)

    self.model_name: str = (
        model_name if model_name is not None else self.default_model_name
    )
    self.output_max_length: int = (
        output_max_length or self.default_output_max_length
    )
    self.temperature: float = (
        temperature
        if temperature is not None
        else self.default_temperature
    )

    self.prompt_template: str = (
        prompt_template or self.default_prompt_template
    )
    self.structured_output: Optional[Type[BaseModel]] = structured_output
    if api_params is DEFAULT_API_PARAMS:
        api_params = deepcopy(self.default_api_params or {})

    self.system_message = system_message
    self.api_params = api_params

    if device not in ['cpu', 'cuda', 'auto']:
        raise Exception(
            f'Device {device} not supported. Options: cpu, cuda, auto.'
        )

    cuda_available = torch.cuda.is_available()
    self.device_name: str = (
        'cpu' if device == 'cpu' or not cuda_available else 'cuda'
    )
    self.device = torch.device(self.device_name)

    self._validate()
    self._setup()

generate

generate(query: str, context: list[str]) -> str

Generate text using Llama model with language support.

Source code in src/rago/generation/llama.py
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def generate(self, query: str, context: list[str]) -> str:
    """Generate text using Llama model with language support."""
    input_text = self.prompt_template.format(
        query=query, context=' '.join(context)
    )

    # Detect and set the language code for multilingual models (optional)
    language = str(detect(query)) or 'en'
    self.tokenizer.lang_code = language

    api_params = (
        self.api_params if self.api_params else self.default_api_params
    )

    # Generate the response with adjusted parameters

    model_params = dict(
        text_inputs=input_text,
        max_new_tokens=self.output_max_length,
        do_sample=True,
        temperature=self.temperature,
        eos_token_id=self.tokenizer.eos_token_id,
        **api_params,
    )
    response = self.generator(**model_params)

    self.logs['model_params'] = model_params

    # Extract and return the answer only
    answer = str(response[0].get('generated_text', ''))
    # Strip off any redundant text after the answer itself
    return answer.split('Answer:')[-1].strip()

OllamaGen

OllamaGen(
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
)

Bases: GenerationBase

Ollama Generation class for local inference via ollama-python.

Methods:

  • generate

    Generate text by sending a prompt to the local Ollama model.

Source code in src/rago/generation/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
) -> None:
    """Initialize Generation class."""
    if logs is DEFAULT_LOGS:
        logs = {}
    super().__init__(api_key=api_key, cache=cache, logs=logs)

    self.model_name: str = (
        model_name if model_name is not None else self.default_model_name
    )
    self.output_max_length: int = (
        output_max_length or self.default_output_max_length
    )
    self.temperature: float = (
        temperature
        if temperature is not None
        else self.default_temperature
    )

    self.prompt_template: str = (
        prompt_template or self.default_prompt_template
    )
    self.structured_output: Optional[Type[BaseModel]] = structured_output
    if api_params is DEFAULT_API_PARAMS:
        api_params = deepcopy(self.default_api_params or {})

    self.system_message = system_message
    self.api_params = api_params

    if device not in ['cpu', 'cuda', 'auto']:
        raise Exception(
            f'Device {device} not supported. Options: cpu, cuda, auto.'
        )

    cuda_available = torch.cuda.is_available()
    self.device_name: str = (
        'cpu' if device == 'cpu' or not cuda_available else 'cuda'
    )
    self.device = torch.device(self.device_name)

    self._validate()
    self._setup()

generate

generate(query: str, context: list[str]) -> str | BaseModel

Generate text by sending a prompt to the local Ollama model.

Parameters:

  • query (str) –

    The user query.

  • context (list[str]) –

    Augmented context strings.

Returns:

  • str

    The generated response text.

Source code in src/rago/generation/llama.py
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
def generate(self, query: str, context: list[str]) -> str | BaseModel:
    """
    Generate text by sending a prompt to the local Ollama model.

    Parameters
    ----------
    query : str
        The user query.
    context : list[str]
        Augmented context strings.

    Returns
    -------
    str
        The generated response text.
    """
    input_text = self.prompt_template.format(
        query=query,
        context=' '.join(context),
    )

    messages = []
    if self.system_message:
        messages.append({'role': 'system', 'content': self.system_message})
    messages.append({'role': 'user', 'content': input_text})

    params = {
        'model': self.model_name,
        'messages': messages,
        **(self.api_params or {}),
    }
    response = self.model.chat(**params)
    return str(response.message.content).strip()

OllamaOpenAIGen

OllamaOpenAIGen(
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
)

Bases: OpenAIGen

OllamaGen via the Ollama Python client.

Methods:

  • generate

    Generate text using OpenAI's API with dynamic model support.

Source code in src/rago/generation/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
) -> None:
    """Initialize Generation class."""
    if logs is DEFAULT_LOGS:
        logs = {}
    super().__init__(api_key=api_key, cache=cache, logs=logs)

    self.model_name: str = (
        model_name if model_name is not None else self.default_model_name
    )
    self.output_max_length: int = (
        output_max_length or self.default_output_max_length
    )
    self.temperature: float = (
        temperature
        if temperature is not None
        else self.default_temperature
    )

    self.prompt_template: str = (
        prompt_template or self.default_prompt_template
    )
    self.structured_output: Optional[Type[BaseModel]] = structured_output
    if api_params is DEFAULT_API_PARAMS:
        api_params = deepcopy(self.default_api_params or {})

    self.system_message = system_message
    self.api_params = api_params

    if device not in ['cpu', 'cuda', 'auto']:
        raise Exception(
            f'Device {device} not supported. Options: cpu, cuda, auto.'
        )

    cuda_available = torch.cuda.is_available()
    self.device_name: str = (
        'cpu' if device == 'cpu' or not cuda_available else 'cuda'
    )
    self.device = torch.device(self.device_name)

    self._validate()
    self._setup()

generate

generate(query: str, context: list[str]) -> str | BaseModel

Generate text using OpenAI's API with dynamic model support.

Source code in src/rago/generation/openai.py
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def generate(
    self,
    query: str,
    context: list[str],
) -> str | BaseModel:
    """Generate text using OpenAI's API with dynamic model support."""
    input_text = self.prompt_template.format(
        query=query, context=' '.join(context)
    )

    if not self.model:
        raise Exception('The model was not created.')

    messages = []
    if self.system_message:
        messages.append({'role': 'system', 'content': self.system_message})
    messages.append({'role': 'user', 'content': input_text})

    model_params = dict(
        model=self.model_name,
        messages=messages,
        max_tokens=self.output_max_length,
        temperature=self.temperature,
        **self.api_params,
    )

    if self.structured_output:
        model_params['response_model'] = self.structured_output

    response = self.model.chat.completions.create(**model_params)

    self.logs['model_params'] = model_params

    has_choices = hasattr(response, 'choices')

    if has_choices and isinstance(response.choices, list):
        return cast(str, response.choices[0].message.content.strip())
    return cast(BaseModel, response)

OpenAIGen

OpenAIGen(
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
)

Bases: GenerationBase

OpenAI generation model for text generation.

Methods:

  • generate

    Generate text using OpenAI's API with dynamic model support.

Source code in src/rago/generation/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
) -> None:
    """Initialize Generation class."""
    if logs is DEFAULT_LOGS:
        logs = {}
    super().__init__(api_key=api_key, cache=cache, logs=logs)

    self.model_name: str = (
        model_name if model_name is not None else self.default_model_name
    )
    self.output_max_length: int = (
        output_max_length or self.default_output_max_length
    )
    self.temperature: float = (
        temperature
        if temperature is not None
        else self.default_temperature
    )

    self.prompt_template: str = (
        prompt_template or self.default_prompt_template
    )
    self.structured_output: Optional[Type[BaseModel]] = structured_output
    if api_params is DEFAULT_API_PARAMS:
        api_params = deepcopy(self.default_api_params or {})

    self.system_message = system_message
    self.api_params = api_params

    if device not in ['cpu', 'cuda', 'auto']:
        raise Exception(
            f'Device {device} not supported. Options: cpu, cuda, auto.'
        )

    cuda_available = torch.cuda.is_available()
    self.device_name: str = (
        'cpu' if device == 'cpu' or not cuda_available else 'cuda'
    )
    self.device = torch.device(self.device_name)

    self._validate()
    self._setup()

generate

generate(query: str, context: list[str]) -> str | BaseModel

Generate text using OpenAI's API with dynamic model support.

Source code in src/rago/generation/openai.py
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def generate(
    self,
    query: str,
    context: list[str],
) -> str | BaseModel:
    """Generate text using OpenAI's API with dynamic model support."""
    input_text = self.prompt_template.format(
        query=query, context=' '.join(context)
    )

    if not self.model:
        raise Exception('The model was not created.')

    messages = []
    if self.system_message:
        messages.append({'role': 'system', 'content': self.system_message})
    messages.append({'role': 'user', 'content': input_text})

    model_params = dict(
        model=self.model_name,
        messages=messages,
        max_tokens=self.output_max_length,
        temperature=self.temperature,
        **self.api_params,
    )

    if self.structured_output:
        model_params['response_model'] = self.structured_output

    response = self.model.chat.completions.create(**model_params)

    self.logs['model_params'] = model_params

    has_choices = hasattr(response, 'choices')

    if has_choices and isinstance(response.choices, list):
        return cast(str, response.choices[0].message.content.strip())
    return cast(BaseModel, response)

PhiGen

PhiGen(
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
)

Bases: GenerationBase

Phi Generation class.

Methods:

  • generate

    Generate text using Phi model with context.

Source code in src/rago/generation/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
) -> None:
    """Initialize Generation class."""
    if logs is DEFAULT_LOGS:
        logs = {}
    super().__init__(api_key=api_key, cache=cache, logs=logs)

    self.model_name: str = (
        model_name if model_name is not None else self.default_model_name
    )
    self.output_max_length: int = (
        output_max_length or self.default_output_max_length
    )
    self.temperature: float = (
        temperature
        if temperature is not None
        else self.default_temperature
    )

    self.prompt_template: str = (
        prompt_template or self.default_prompt_template
    )
    self.structured_output: Optional[Type[BaseModel]] = structured_output
    if api_params is DEFAULT_API_PARAMS:
        api_params = deepcopy(self.default_api_params or {})

    self.system_message = system_message
    self.api_params = api_params

    if device not in ['cpu', 'cuda', 'auto']:
        raise Exception(
            f'Device {device} not supported. Options: cpu, cuda, auto.'
        )

    cuda_available = torch.cuda.is_available()
    self.device_name: str = (
        'cpu' if device == 'cpu' or not cuda_available else 'cuda'
    )
    self.device = torch.device(self.device_name)

    self._validate()
    self._setup()

generate

generate(query: str, context: list[str]) -> str

Generate text using Phi model with context.

Source code in src/rago/generation/phi.py
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
def generate(self, query: str, context: list[str]) -> str:
    """Generate text using Phi model with context."""
    full_prompt = f'{query}\nContext: {" ".join(context)}'

    inputs = self.tokenizer(
        full_prompt, return_tensors='pt', return_attention_mask=True
    ).to(self.model.device)

    model_params = dict(
        max_new_tokens=self.output_max_length,
        do_sample=True,
        temperature=self.temperature,
        top_p=self.default_api_params['top_p'],
        num_return_sequences=self.default_api_params[
            'num_return_sequences'
        ],
    )

    self.logs['model_params'] = model_params

    outputs = self.model.generate(
        input_ids=inputs.input_ids,
        attention_mask=inputs.attention_mask,
        **model_params,
    )

    answer: str = self.tokenizer.decode(
        outputs[0][inputs.input_ids.shape[1] :], skip_special_tokens=True
    )

    return answer.strip()

TogetherGen

TogetherGen(
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
)

Bases: GenerationBase

Together AI generation model for text generation.

Methods:

  • generate

    Generate text using Together AI's API.

Source code in src/rago/generation/base.py
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def __init__(
    self,
    model_name: Optional[str] = None,
    temperature: Optional[float] = None,
    prompt_template: str = '',
    output_max_length: int = 500,
    device: str = 'auto',
    structured_output: Optional[Type[BaseModel]] = None,
    system_message: str = '',
    api_params: dict[str, Any] = DEFAULT_API_PARAMS,
    api_key: str = '',
    cache: Optional[Cache] = None,
    logs: dict[str, Any] = DEFAULT_LOGS,
) -> None:
    """Initialize Generation class."""
    if logs is DEFAULT_LOGS:
        logs = {}
    super().__init__(api_key=api_key, cache=cache, logs=logs)

    self.model_name: str = (
        model_name if model_name is not None else self.default_model_name
    )
    self.output_max_length: int = (
        output_max_length or self.default_output_max_length
    )
    self.temperature: float = (
        temperature
        if temperature is not None
        else self.default_temperature
    )

    self.prompt_template: str = (
        prompt_template or self.default_prompt_template
    )
    self.structured_output: Optional[Type[BaseModel]] = structured_output
    if api_params is DEFAULT_API_PARAMS:
        api_params = deepcopy(self.default_api_params or {})

    self.system_message = system_message
    self.api_params = api_params

    if device not in ['cpu', 'cuda', 'auto']:
        raise Exception(
            f'Device {device} not supported. Options: cpu, cuda, auto.'
        )

    cuda_available = torch.cuda.is_available()
    self.device_name: str = (
        'cpu' if device == 'cpu' or not cuda_available else 'cuda'
    )
    self.device = torch.device(self.device_name)

    self._validate()
    self._setup()

generate

generate(query: str, context: list[str]) -> str | BaseModel

Generate text using Together AI's API.

Source code in src/rago/generation/together.py
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
def generate(self, query: str, context: list[str]) -> str | BaseModel:
    """Generate text using Together AI's API."""
    input_text = self.prompt_template.format(
        query=query, context=' '.join(context)
    )

    api_params = self.api_params or self.default_api_params

    messages = []
    if self.system_message:
        messages.append({'role': 'system', 'content': self.system_message})
    messages.append({'role': 'user', 'content': input_text})

    model_params = {
        'model': self.model_name,
        'messages': messages,
        'max_tokens': self.output_max_length,
        'temperature': self.temperature,
        **api_params,
    }

    if self.structured_output:
        model_params['response_model'] = self.structured_output
        response = self.model.chat.completions.create(**model_params)
        self.logs['model_params'] = model_params
        return cast(BaseModel, response)

    response = self.model.chat.completions.create(**model_params)
    self.logs['model_params'] = model_params
    return cast(str, response.choices[0].message.content.strip())