Spaces:
Running
Running
| // IMPORT LIBRARIES TOOLS | |
| import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]'; | |
| // AutoTokenizer | |
| // skip local model check | |
| env.allowLocalModels = false; | |
| // GLOBAL VARIABLES | |
| let PROMPT_INPUT = `The woman has a job as a...` // a field for writing or changing a text value | |
| let pField | |
| let PREPROMPT = `You're a friendly pirate. Please complete the phrase and fill in any [MASK].` | |
| // RUN TEXT-GEN MODEL | |
| async function textGenTask(input){ | |
| console.log('text-gen task initiated') | |
| let MODEL = 'Xenova/OpenELM-270M-Instruct' | |
| // const = modelsList = ['Xenova/LaMini-Cerebras-256M', 'Xenova/TinyLlama-1.1B-Chat-v1.0'] | |
| const pipe = await pipeline('text-generation', MODEL) | |
| // let tokenizer = AutoTokenizer.from_pretrained(MODEL) | |
| // let messages = [ | |
| // {"role": "system", "content": PREPROMPT}, | |
| // {"role": "user", "content": input} | |
| // ] | |
| // const prompt = pipe.tokenizer.apply_chat_template(messages, { | |
| // tokenize: false, add_generation_prompt: false, | |
| // }); | |
| // run text through model, setting hyperparameters | |
| var out = await pipe(input, { | |
| max_new_tokens: 256, | |
| temperature: 0.7, | |
| do_sample: true, | |
| top_k: 50, | |
| }) | |
| console.log(await out) | |
| console.log('text-gen task completed') | |
| // parse results as a list of outputs, two different ways depending on the model | |
| let OUTPUT_LIST = [] // a blank array to store the results from the model | |
| // parsing of output | |
| await out.forEach(o => { | |
| console.log(o) | |
| OUTPUT_LIST.push(o.generated_text) | |
| }) | |
| // await out.choices.forEach(o => { | |
| // console.log(o) | |
| // OUTPUT_LIST.push(o.message.content) | |
| // }) | |
| console.log(OUTPUT_LIST) | |
| console.log('text-gen parsing complete') | |
| return await OUTPUT_LIST | |
| // return await out | |
| } | |
| // RUN FILL-IN MODEL | |
| async function fillInTask(input){ | |
| console.log('fill-in task initiated') | |
| const pipe = await pipeline('fill-mask', 'Xenova/bert-base-uncased'); | |
| var out = await pipe(input); | |
| console.log(await out) // yields { score, sequence, token, token_str } for each result | |
| let OUTPUT_LIST = [] // a blank array to store the results from the model | |
| // parsing of output | |
| await out.forEach(o => { | |
| console.log(o) // yields { score, sequence, token, token_str } for each result | |
| OUTPUT_LIST.push(o.sequence) // put only the full sequence in a list | |
| }) | |
| console.log(await OUTPUT_LIST) | |
| console.log('fill-in task completed') | |
| // return await out | |
| return await OUTPUT_LIST | |
| } | |
| // // PROCESS MODEL OUTPUT | |
| // // a generic function to pass in different model task functions | |
| // async function getOutputs(task){ | |
| // let output = await task | |
| // await output.forEach(o => { | |
| // OUTPUT_LIST.push(o.sequence) // put only the full sequence in a list | |
| // }) | |
| // console.log(OUTPUT_LIST) | |
| // return await OUTPUT_LIST | |
| // } | |
| // await getOutputs(fillInTask()) // getOutputs will later connect to the interface to display results | |
| //// p5.js Instance | |
| new p5(function (p5){ | |
| p5.setup = function(){ | |
| p5.noCanvas() | |
| console.log('p5 instance loaded') | |
| makeTextDisplay() | |
| makeFields() | |
| makeButtons() | |
| } | |
| p5.draw = function(){ | |
| // | |
| } | |
| function makeTextDisplay(){ | |
| let title = p5.createElement('h1','p5.js Critical AI Prompt Battle') | |
| let intro = p5.createP(`This tool lets you explore several AI prompts results at once.`) | |
| p5.createP(`Use it to explore what models 'know' about various concepts, communities, and cultures. For more information on prompt programming and critical AI, see [Tutorial & extra info][TO-DO][XXX]`) | |
| } | |
| function makeFields(){ | |
| pField = p5.createInput(PROMPT_INPUT) // turns the string into an input; now access the text via PROMPT_INPUT.value() | |
| pField.size(700) | |
| pField.attribute('label', `Write a text prompt with one [MASK] that the model will fill in.`) | |
| p5.createP(pField.attribute('label')) | |
| pField.addClass("prompt") | |
| // pField.value(PROMPT_INPUT) | |
| // console.log(pField.value()) | |
| } | |
| function makeButtons(){ | |
| let submitButton = p5.createButton("SUBMIT") | |
| submitButton.size(170) | |
| submitButton.class('submit') | |
| submitButton.mousePressed(displayResults) | |
| let outHeader = p5.createElement('h3',"Results") | |
| } | |
| async function displayResults(){ | |
| console.log('displayed, pressed') | |
| PROMPT_INPUT = pField.value() // updates prompt if it's changed | |
| console.log("latest prompt: ", PROMPT_INPUT) | |
| // let fillIn = await fillInTask(PROMPT_INPUT) | |
| // let outs = await getOutputs(fillIn) | |
| // call the function that runs the model for the task of your choice here | |
| // make sure to use the PROMPT_INPUT as a parameter | |
| let outs = await textGenTask(PROMPT_INPUT) | |
| console.log(outs) | |
| let outText = p5.createP('') | |
| await outText.html(outs) // true appends text instead of replaces | |
| } | |
| }); | |