-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp.py
40 lines (32 loc) · 1.54 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import streamlit as st
import transformers
import model
from model import load_model
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
#Load model
model, tokenizer, device = load_model()
def infer(input_ids, max_length, temperature):
output_sequences = model.generate(
input_ids=input_ids,
max_length=max_length,
temperature=temperature,
do_sample=True,
)
return output_sequences
#Prompts
st.title("Generate code with codegen 🦄")
st.subheader("This machine learning model is trained on 16 billion parameters and it generates code, you can give in a instruction as a prompt and don't fill in the answer")
text_target = st.text_area(label = "Enter your instruction and leave the answer open for the generated code, if you want to set the parameters or a new prompt please press stop top right, set the parameters and rerun", value ="""Instruction: Generate python code for a diffusion model
Answer:""", height = 300)
max_length = st.sidebar.slider("Max Length", min_value = 500, max_value=3000)
temperature = st.sidebar.slider("Temperature", value = 0.9, min_value = 0.0, max_value=1.0, step=0.05)
#Generate
with st.spinner("AI is at work......"):
input_ids = tokenizer(text=text_target, return_tensors="pt").input_ids.to(device)
output_sequences = infer(input_ids, max_length, temperature)
generated_ids = model.generate(output_sequences)
generated_text = tokenizer.decode(generated_ids[0])
st.success("AI Succesfully generated code")
print(generated_text)
st.text(generated_text)