metadata
library_name: transformers.js
tags:
- vision
- background-removal
- portrait-matting
license: apache-2.0
pipeline_tag: image-segmentation
wuchendi/MODNet (Matting Objective Decomposition Network)
Trimap-Free Portrait Matting in Real Time
- Repository: https://github.com/WuChenDi/MODNet
- Spaces: https://huggingface.co/spaces/wuchendi/MODNet
- SwanLab/MODNet: https://swanlab.cn/@wudi/MODNet/overview
π¦ Usage with Transformers.js
First, install the @huggingface/transformers
library from PNPM:
pnpm add @huggingface/transformers
Then, use the following code to perform portrait matting with the wuchendi/MODNet
model:
/* eslint-disable no-console */
import { AutoModel, AutoProcessor, RawImage } from '@huggingface/transformers'
async function main() {
try {
console.log('π Initializing MODNet...')
// Load model
console.log('π¦ Loading model...')
const model = await AutoModel.from_pretrained('wuchendi/MODNet', {
dtype: 'fp32',
progress_callback: (progress) => {
// @ts-ignore
if (progress.progress) {
// @ts-ignore
console.log(`Model loading progress: ${(progress.progress).toFixed(2)}%`)
}
}
})
console.log('β
Model loaded successfully')
// Load processor
console.log('π§ Loading processor...')
const processor = await AutoProcessor.from_pretrained('wuchendi/MODNet', {})
console.log('β
Processor loaded successfully')
// Load image from URL
const url = 'https://res.cloudinary.com/dhzm2rp05/image/upload/samples/logo.jpg'
console.log('πΌοΈ Loading image:', url)
const image = await RawImage.fromURL(url)
console.log('β
Image loaded successfully', `Dimensions: ${image.width}x${image.height}`)
// Pre-process image
console.log('π Preprocessing image...')
const { pixel_values } = await processor(image)
console.log('β
Image preprocessing completed')
// Generate alpha matte
console.log('π― Generating alpha matte...')
const startTime = performance.now()
const { output } = await model({ input: pixel_values })
const inferenceTime = performance.now() - startTime
console.log('β
Alpha matte generated', `Time: ${inferenceTime.toFixed(2)}ms`)
// Save output mask
console.log('πΎ Saving output...')
const mask = await RawImage.fromTensor(output[0].mul(255).to('uint8')).resize(image.width, image.height)
await mask.save('src/assets/mask.png')
console.log('β
Output saved to assets/mask.png')
} catch (error) {
console.error('β Error during processing:', error)
throw error
}
}
main().catch(console.error)