vinny4 commited on
Commit
63c4985
·
1 Parent(s): af2a153

added current progress

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
app/streamlitApp.py ADDED
File without changes
src/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (156 Bytes). View file
 
src/__pycache__/consistency.cpython-310.pyc ADDED
Binary file (1.1 kB). View file
 
src/__pycache__/cot.cpython-310.pyc ADDED
Binary file (1.17 kB). View file
 
src/__pycache__/groq_client.cpython-310.pyc ADDED
Binary file (852 Bytes). View file
 
src/__pycache__/utils.cpython-310.pyc ADDED
Binary file (293 Bytes). View file
 
src/consistency.py CHANGED
@@ -1,25 +1,27 @@
1
  from typing import List, Tuple
2
  from .cot import generate_answer
 
3
 
4
  def sample_cot(
5
  question: str,
6
- model_id: str,
7
- temperature: float,
8
- max_tokens: int,
9
- exampler: List[Tuple[str, str]]
10
  ):
11
  """
12
  to be written
13
  """
14
- return generate_answer(question, model_id, temperature, max_tokens, exampler)
 
15
 
16
 
17
  def self_consistent_answer(
18
  question:str,
19
- model_id:str,
20
  temperature:float=0.5,
21
  max_tokens:int=200,
22
- exampler:List[Tuple[str, str]]=("",""),
23
  num_samples:int=3
24
  ):
25
  """
@@ -30,10 +32,9 @@ def self_consistent_answer(
30
  results = []
31
 
32
  for _ in range(num_samples):
33
- reasoning = sample_cot(question, model_id, temperature, max_tokens ,exampler)
34
  reasoning_paths.append(reasoning)
35
 
36
- last_line = reasoning.strip().split("\n")[-1]
37
  results.append(last_line)
38
 
39
  return reasoning_paths, results
 
1
  from typing import List, Tuple
2
  from .cot import generate_answer
3
+ from .utils import get_answer
4
 
5
  def sample_cot(
6
  question: str,
7
+ model_id: str="llama3-8b-8192",
8
+ temperature: float=0.5,
9
+ max_tokens: int=200,
10
+ exampler: List[Tuple[str, str]]=None
11
  ):
12
  """
13
  to be written
14
  """
15
+ reasoning, last_line = generate_answer(question, model_id, temperature, max_tokens, "cot",exampler)
16
+ return reasoning, last_line
17
 
18
 
19
  def self_consistent_answer(
20
  question:str,
21
+ model_id:str="llama3-8b-8192",
22
  temperature:float=0.5,
23
  max_tokens:int=200,
24
+ exampler:List[Tuple[str, str]]=None,
25
  num_samples:int=3
26
  ):
27
  """
 
32
  results = []
33
 
34
  for _ in range(num_samples):
35
+ reasoning, last_line = sample_cot(question, model_id, temperature, max_tokens ,exampler)
36
  reasoning_paths.append(reasoning)
37
 
 
38
  results.append(last_line)
39
 
40
  return reasoning_paths, results
src/cot.py CHANGED
@@ -1,10 +1,10 @@
1
  from typing import Literal, List, Tuple
2
  from .groq_client import groq_chat
3
- from .utils import clean_response
4
 
5
  def build_prompt(question:str,
6
  mode: Literal["cot", "base"]="base",
7
- exampler: List[Tuple[str, str]]=("",""),
8
  zero_shot: bool=False
9
  ):
10
  """
@@ -16,13 +16,14 @@ def build_prompt(question:str,
16
  if not zero_shot:
17
  prompt = ""
18
 
19
- for q, a in exampler:
20
- prompt+=f"Q: {q}\nA:{a}"
 
21
 
22
  prompt+=f"Q: {question}\nA:"
23
  return prompt
24
- else:
25
- return f"Q: {question}\nA:"
26
 
27
  return f"Q: {question}\nA:"
28
 
@@ -32,7 +33,7 @@ def generate_answer(
32
  temperature: float=0.5,
33
  max_tokens: int=200,
34
  mode: Literal["cot", "base"]="base",
35
- exampler: List[Tuple[str ,str]]=("",""),
36
  zero_shot: bool=False
37
  ):
38
  """
@@ -40,7 +41,8 @@ def generate_answer(
40
  """
41
 
42
  prompt = build_prompt(question, mode, exampler, zero_shot)
43
- response = groq_chat(prompt, model_id, temperature, max_tokens)
 
44
 
45
- return clean_response(response)
46
 
 
1
  from typing import Literal, List, Tuple
2
  from .groq_client import groq_chat
3
+ from .utils import get_answer
4
 
5
  def build_prompt(question:str,
6
  mode: Literal["cot", "base"]="base",
7
+ exampler: List[Tuple[str, str]]=None,
8
  zero_shot: bool=False
9
  ):
10
  """
 
16
  if not zero_shot:
17
  prompt = ""
18
 
19
+ if exampler:
20
+ for q, a in exampler:
21
+ prompt+=f"Q: {q}\nA:{a}"
22
 
23
  prompt+=f"Q: {question}\nA:"
24
  return prompt
25
+
26
+ return f"Q: {question}\nA:"
27
 
28
  return f"Q: {question}\nA:"
29
 
 
33
  temperature: float=0.5,
34
  max_tokens: int=200,
35
  mode: Literal["cot", "base"]="base",
36
+ exampler: List[Tuple[str ,str]]=None,
37
  zero_shot: bool=False
38
  ):
39
  """
 
41
  """
42
 
43
  prompt = build_prompt(question, mode, exampler, zero_shot)
44
+ reasoning = groq_chat(prompt, model_id, temperature, max_tokens)
45
+ last_line = get_answer(reasoning)
46
 
47
+ return reasoning, last_line
48
 
src/groq_client.py CHANGED
@@ -3,8 +3,8 @@ from groq import Groq
3
 
4
  def init_groq(api_key: str=None):
5
 
6
- api_key = api_key or os.get_env("GROQ_API_KEY")
7
- return Groq(api_key)
8
 
9
  def groq_chat(prompt: str, model_id: str, temperature: float = 0.7, max_tokens: int = 200):
10
 
 
3
 
4
  def init_groq(api_key: str=None):
5
 
6
+ api_key = api_key or os.getenv("GROQ_API_KEY")
7
+ return Groq(api_key=api_key)
8
 
9
  def groq_chat(prompt: str, model_id: str, temperature: float = 0.7, max_tokens: int = 200):
10
 
src/utils.py CHANGED
@@ -1,2 +1,2 @@
1
- def clean_response():
2
- pass
 
1
+ def get_answer(response):
2
+ return response.strip().split("\n")[-1]
tutorial.ipynb CHANGED
@@ -2,7 +2,7 @@
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
- "execution_count": 3,
6
  "id": "efbec4bf",
7
  "metadata": {},
8
  "outputs": [],
@@ -14,7 +14,7 @@
14
  },
15
  {
16
  "cell_type": "code",
17
- "execution_count": 4,
18
  "id": "e8943c7d",
19
  "metadata": {},
20
  "outputs": [],
@@ -24,7 +24,7 @@
24
  },
25
  {
26
  "cell_type": "code",
27
- "execution_count": 5,
28
  "id": "394c1f72",
29
  "metadata": {},
30
  "outputs": [],
@@ -34,12 +34,12 @@
34
  " if mode==\"cot\":\n",
35
  " return f\"Q: {question}\\nLet's think step by step:\\n\"\n",
36
  " else:\n",
37
- " return f\"Q: {question}\\nA:\"\n"
38
  ]
39
  },
40
  {
41
  "cell_type": "code",
42
- "execution_count": null,
43
  "id": "5d6dc71b",
44
  "metadata": {},
45
  "outputs": [
@@ -72,9 +72,87 @@
72
  },
73
  {
74
  "cell_type": "code",
75
- "execution_count": null,
76
  "id": "67b9d179",
77
  "metadata": {},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78
  "outputs": [],
79
  "source": []
80
  }
 
2
  "cells": [
3
  {
4
  "cell_type": "code",
5
+ "execution_count": 1,
6
  "id": "efbec4bf",
7
  "metadata": {},
8
  "outputs": [],
 
14
  },
15
  {
16
  "cell_type": "code",
17
+ "execution_count": 2,
18
  "id": "e8943c7d",
19
  "metadata": {},
20
  "outputs": [],
 
24
  },
25
  {
26
  "cell_type": "code",
27
+ "execution_count": 3,
28
  "id": "394c1f72",
29
  "metadata": {},
30
  "outputs": [],
 
34
  " if mode==\"cot\":\n",
35
  " return f\"Q: {question}\\nLet's think step by step:\\n\"\n",
36
  " else:\n",
37
+ " return f\"Q: {question}\\nA:\""
38
  ]
39
  },
40
  {
41
  "cell_type": "code",
42
+ "execution_count": 4,
43
  "id": "5d6dc71b",
44
  "metadata": {},
45
  "outputs": [
 
72
  },
73
  {
74
  "cell_type": "code",
75
+ "execution_count": 5,
76
  "id": "67b9d179",
77
  "metadata": {},
78
+ "outputs": [
79
+ {
80
+ "data": {
81
+ "text/plain": [
82
+ "'Since K is the husband of R, and V is the son of K, then R is the mother of V.'"
83
+ ]
84
+ },
85
+ "execution_count": 5,
86
+ "metadata": {},
87
+ "output_type": "execute_result"
88
+ }
89
+ ],
90
+ "source": [
91
+ "response.choices[0].message.content.strip().split(\"\\n\")[-1]"
92
+ ]
93
+ },
94
+ {
95
+ "cell_type": "code",
96
+ "execution_count": 6,
97
+ "id": "e84a6a58",
98
+ "metadata": {},
99
+ "outputs": [],
100
+ "source": [
101
+ "from src.cot import generate_answer\n",
102
+ "from src.consistency import self_consistent_answer"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "code",
107
+ "execution_count": 7,
108
+ "id": "c0b3a1b5",
109
+ "metadata": {},
110
+ "outputs": [],
111
+ "source": [
112
+ "ans = generate_answer(question, mode=\"cot\",zero_shot=True)"
113
+ ]
114
+ },
115
+ {
116
+ "cell_type": "code",
117
+ "execution_count": 8,
118
+ "id": "e9752254",
119
+ "metadata": {},
120
+ "outputs": [],
121
+ "source": [
122
+ "ans2 = self_consistent_answer(question)"
123
+ ]
124
+ },
125
+ {
126
+ "cell_type": "code",
127
+ "execution_count": 10,
128
+ "id": "3678816d",
129
+ "metadata": {},
130
+ "outputs": [
131
+ {
132
+ "data": {
133
+ "text/plain": [
134
+ "([\"Let's break it down:\\n\\n* V is the son of K, so K is V's parent.\\n* K is the father of S, so K is S's parent.\\n* K is the husband of R, which means R is K's spouse.\\n\\nSince K is V's parent, and K is married to R, it means R is V's grandparent. Specifically, R is V's mother's mother, or V's maternal grandmother.\",\n",
135
+ " 'Since K is the husband of R, and V is the son of K, that makes R the mother of V.',\n",
136
+ " 'Since K is the husband of R, and V is the son of K, then R is the mother of V.'],\n",
137
+ " [\"Since K is V's parent, and K is married to R, it means R is V's grandparent. Specifically, R is V's mother's mother, or V's maternal grandmother.\",\n",
138
+ " 'Since K is the husband of R, and V is the son of K, that makes R the mother of V.',\n",
139
+ " 'Since K is the husband of R, and V is the son of K, then R is the mother of V.'])"
140
+ ]
141
+ },
142
+ "execution_count": 10,
143
+ "metadata": {},
144
+ "output_type": "execute_result"
145
+ }
146
+ ],
147
+ "source": [
148
+ "ans2"
149
+ ]
150
+ },
151
+ {
152
+ "cell_type": "code",
153
+ "execution_count": null,
154
+ "id": "72fd9b9d",
155
+ "metadata": {},
156
  "outputs": [],
157
  "source": []
158
  }