use model without pipeline
Browse files
index.js
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
|
|
|
2 |
|
3 |
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
|
4 |
env.allowLocalModels = false;
|
@@ -13,6 +14,16 @@ const EXAMPLE_URL = 'https://huggingface.co/datasets/Xenova/transformers.js-docs
|
|
13 |
|
14 |
// Create a new object detection pipeline
|
15 |
status.textContent = 'Loading model...';
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
const detector = await pipeline('feature-extraction','Xenova/colbertv2.0');
|
17 |
|
18 |
const output = await detector('This is a simple test.');
|
|
|
1 |
import { pipeline, env } from 'https://cdn.jsdelivr.net/npm/@xenova/[email protected]';
|
2 |
+
import { AutoModel, AutoTokenizer } from '@xenova/transformers';
|
3 |
|
4 |
// Since we will download the model from the Hugging Face Hub, we can skip the local model check
|
5 |
env.allowLocalModels = false;
|
|
|
14 |
|
15 |
// Create a new object detection pipeline
|
16 |
status.textContent = 'Loading model...';
|
17 |
+
|
18 |
+
|
19 |
+
let tokenizer = await AutoTokenizer.from_pretrained('Xenova/colbertv2.0');
|
20 |
+
let model = await AutoModel.from_pretrained('Xenova/colbertv2.0');
|
21 |
+
|
22 |
+
let inputs = await tokenizer('I love transformers!');
|
23 |
+
let { logits } = await model(inputs);
|
24 |
+
|
25 |
+
console.log(logits);
|
26 |
+
|
27 |
const detector = await pipeline('feature-extraction','Xenova/colbertv2.0');
|
28 |
|
29 |
const output = await detector('This is a simple test.');
|