braindeck commited on
Commit
fb0bb52
·
1 Parent(s): a23e411

Use do_sample=False for greedy decoding

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -14,7 +14,7 @@ def generate_response(prompt):
14
  """
15
  chat = [{"role": "user", "content": prompt}]
16
  inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)
17
- outputs = model.generate(inputs, max_new_tokens=512, temperature=0.0)
18
 
19
  # Decode the generated text
20
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
@@ -42,4 +42,4 @@ with gr.Blocks() as demo:
42
  )
43
 
44
  if __name__ == "__main__":
45
- demo.launch()
 
14
  """
15
  chat = [{"role": "user", "content": prompt}]
16
  inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)
17
+ outputs = model.generate(inputs, max_new_tokens=512, do_sample=False)
18
 
19
  # Decode the generated text
20
  generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
42
  )
43
 
44
  if __name__ == "__main__":
45
+ demo.launch()