Spaces:
Running
Running
| import os | |
| SYSTEM_PROMPT = "You are a programming assistant. You are solving the 2024 advent of code challenge." | |
| PROMPT_TEMPLATE = """You are solving the 2024 advent of code challenge. | |
| You will be provided the description of each challenge. You are to provide the solution to each given challenge. | |
| 1) You can reason and explain your logic before writing the code. | |
| 2) You must write the code such that it can be parsed into an actual python file. | |
| 3) It will be parsed by the evaluator, so it must be valid python code. | |
| 4) All of the code must be in a single code block, delimited by ```python and ```. | |
| 5) To count as a proper submission, the code must print the result to each question asked. | |
| 6) Each question will have a single string as an answer. Make sure to print it that string, and nothing else. | |
| 7) The actual input to the question will be provided in a file relative to the python file, e.g. "./input.txt". You must read and parse from the file accordingly. You can safely assume the file will always be relative to the python file. | |
| Here is an example of a proper submission: | |
| You reasoning goes here ... | |
| ```python | |
| file = "input.txt" | |
| def your_function(...) | |
| ... | |
| ... | |
| print(result1) | |
| def your_other_function(...) | |
| ... | |
| ... | |
| print(result2) | |
| ``` | |
| Here is today's challenge description: | |
| {problem_description} | |
| """ | |
| def build_prompt( | |
| problem_description: str, prompt_template: str = PROMPT_TEMPLATE | |
| ) -> str: | |
| return prompt_template.format(problem_description=problem_description) | |
| def get_completion( | |
| provider: str, | |
| user_prompt: str, | |
| system_prompt: str, | |
| model: str, | |
| temperature: float, | |
| ) -> str: | |
| """ | |
| Unified function to get completions from various LLM providers. | |
| """ | |
| if provider == "openai": | |
| from openai import OpenAI | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| assert ( | |
| OPENAI_API_KEY | |
| ), "OPENAI_API_KEY is not set, please set it in your environment variables." | |
| openai_client = OpenAI(api_key=OPENAI_API_KEY) | |
| completion = openai_client.chat.completions.create( | |
| model=model, | |
| messages=[ | |
| {"role": "system", "content": system_prompt}, | |
| {"role": "user", "content": user_prompt}, | |
| ], | |
| temperature=temperature, | |
| ) | |
| # logger.info("Completion: %s", completion) | |
| return completion.choices[0].message.content | |
| elif provider == "gemini": | |
| # Setup | |
| import google.generativeai as genai | |
| AI_STUDIO_API_KEY = os.getenv("AI_STUDIO_API_KEY") | |
| assert ( | |
| AI_STUDIO_API_KEY | |
| ), "AI_STUDIO_API_KEY is not set, please set it in your environment variables." | |
| genai.configure(api_key=AI_STUDIO_API_KEY) | |
| model = genai.GenerativeModel( | |
| model_name=model, | |
| system_instruction=system_prompt, | |
| ) | |
| response = model.generate_content( | |
| user_prompt, | |
| generation_config=genai.types.GenerationConfig(temperature=temperature), | |
| ) | |
| # logger.info("reponse: %s", response) | |
| return response.text | |
| elif provider == "anthropic": | |
| # Setup | |
| import anthropic | |
| ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY") | |
| assert ( | |
| ANTHROPIC_API_KEY | |
| ), "ANTHROPIC_API_KEY is not set, please set it in your environment variables." | |
| anthropic_client = anthropic.Anthropic(api_key=ANTHROPIC_API_KEY) | |
| response = anthropic_client.messages.create( | |
| model=model, | |
| max_tokens=2048, | |
| temperature=temperature, | |
| system=system_prompt, | |
| messages=[{"role": "user", "content": user_prompt}], | |
| ) | |
| # logger.info("Response: %s", response) | |
| return response.content[0].text | |
| else: | |
| raise ValueError(f"Unknown provider: {provider}") | |