xref: /aosp_15_r20/external/executorch/examples/mediatek/aot_utils/llm_utils/preformatter.py (revision 523fa7a60841cd1ecfb9cc4201f1ca8b03ed023a)
1import json
2import os
3from typing import Union
4
5
6class Preformatter(object):
7    __slots__ = ("template", "name", "_verbose")
8
9    def __init__(self, template_path: str = "", verbose: bool = False):
10        self._verbose = verbose
11        self.name = os.path.basename(template_path).rsplit(".", 1)[0]
12        if not os.path.exists(template_path):
13            raise ValueError(f"Can't read preformatter template json: {template_path}")
14        with open(template_path) as fp:
15            self.template = json.load(fp)
16        if self._verbose:
17            print(
18                f"Using prompt template {template_path}: {self.template['description']}"
19            )
20
21    def generate_prompt(
22        self,
23        instruction: str,
24        input: Union[None, str] = None,
25        label: Union[None, str] = None,
26    ) -> str:
27        # returns the full prompt from instruction and optional input
28        # if a label (=response, =output) is provided, it's also appended.
29        if input is not None:
30            res = self.template["prompt_input"].format(
31                instruction=instruction, input=input
32            )
33        else:
34            res = self.template["prompt_no_input"].format(instruction=instruction)
35        if label:
36            res = f"{res}{label}"
37        if self._verbose:
38            print(res)
39        return res
40
41    def get_response(self, output: str) -> str:
42        return output.split(self.template["response_split"])[1].strip()
43