| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| tokenizer = AutoTokenizer.from_pretrained("ByteDance-Seed/UI-TARS-1.5-7B") | |
| model = AutoModelForCausalLM.from_pretrained("ByteDance-Seed/UI-TARS-1.5-7B") | |
| def predict(ui_context, goal): | |
| prompt = f"<context>{ui_context}</context>\n<task>{goal}</task>" | |
| inputs = tokenizer(prompt, return_tensors="pt") | |
| outputs = model.generate(**inputs, max_new_tokens=128) | |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| gr.Interface(fn=predict, | |
| inputs=["textbox", "textbox"], | |
| outputs="textbox", | |
| title="UITARS 1.5 Action Predictor" | |
| ).launch() | |