elma-dev commited on
Commit
a19b9f5
·
1 Parent(s): 1112958
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -18,6 +18,8 @@ model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path,
18
  token = os.environ['TOKEN']
19
  )
20
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
 
 
21
 
22
  @spaces.GPU
23
  def generate(prompt, temperature=0.7, top_k=50, repetition_penalty=1.2):
@@ -30,7 +32,7 @@ def generate(prompt, temperature=0.7, top_k=50, repetition_penalty=1.2):
30
  top_k=top_k)
31
  output=tokenizer.decode(output_ids[0],skip_special_tokens=True)
32
  # Remove the prompt from the output
33
- output=output[len(formatted_prompt):]
34
  return output
35
 
36
  prompt_input=gr.Textbox(
 
18
  token = os.environ['TOKEN']
19
  )
20
  tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
21
+ if tokenizer.pad_token is None:
22
+ tokenizer.pad_token = tokenizer.eos_token
23
 
24
  @spaces.GPU
25
  def generate(prompt, temperature=0.7, top_k=50, repetition_penalty=1.2):
 
32
  top_k=top_k)
33
  output=tokenizer.decode(output_ids[0],skip_special_tokens=True)
34
  # Remove the prompt from the output
35
+ #output=output[len(formatted_prompt):]
36
  return output
37
 
38
  prompt_input=gr.Textbox(