Language-Assisted Feature Transformation for Anomaly Detection
EungGu Yun, Heonjin Ha, Yeongwoo Nam, Bryan Dongik Lee
ICLR 2025
conda create -n laft python=3.11
conda activate laftWe recommend to install PyTorch from the official website first.
pip install -r requirements.txtWe use:
torch==2.5.1torchvision==0.20.1torcheval==0.0.7open_clip_torch==2.29.0
- MNIST:
data/MNIST - Waterbirds:
data/waterbirds_v1.0 - CelebA:
data/celeba - MVTec AD:
data/mvtec_anomaly_detection - VisA:
data/VisA_20220922
- repeat1:
checkpoints/clipn/repeat1.pt - repeat2:
checkpoints/clipn/repeat2.pt - repeat3:
checkpoints/clipn/repeat3.pt
import laft
import torch
torch.set_grad_enabled(False) # disable Autograd (prevents OOM)
# assume image tensor is already loaded
# Load CLIP model and prompts
model, transform = laft.load_clip("ViT-B-16-quickgelu:dfn2b")
prompts = laft.prompts.get_prompts("color_mnist", "number")
# Encode image
image_features = model.encode_image(images)
# Construct concept subspace
text_features = model.encode_text(prompts["all"])
pair_diffs = laft.prompt_pair(features)
concept_basis = laft.pca(pair_diffs, n_components=24)
# Language-assisted feature transformation
guided_image_features = laft.inner(image_features, concept_basis)
ignored_image_features = laft.orthogonal(image_features, concept_basis)See runs/ directory for running scripts.
@inproceedings{yun2025laft,
title={Language-Assisted Feature Transformation for Anomaly Detection},
author={EungGu Yun and Heonjin Ha and Yeongwoo Nam and Bryan Dongik Lee},
booktitle={The Thirteenth International Conference on Learning Representations},
year={2025},
url={https://openreview.net/forum?id=2p03KljxE9}
}