@inproceedings{zhang2023Query,title={Query-Aware Quantization for Maximum Inner Product Search},author={Zhang, Jin and Lian, Defu and Zhang, Haodi and Wang, Baoyun and Chen, Enhong},booktitle={Proceedings of the AAAI Conference on Artificial Intelligence},year={2023},}
WWW2023
Differentiable Optimized Product Quantization and Beyond
Zepu Lu, Defu Lian, Jin Zhang, Zaixi Zhang, Chao Feng, Hao Wang, and Enhong Chen
In Proceedings of the ACM Web Conference 2023, 2023
Vector quantization techniques, such as Product Quantization (PQ), play a vital role in approximate nearest neighbor search (ANNs) and maximum inner product search (MIPS) owing to their remarkable search and storage efficiency. However, the indexes in vector quantization cannot be trained together with the inference models since data indexing is not differentiable. To this end, differentiable vector quantization approaches, such as DiffPQ and DeepPQ, have been recently proposed, but existing methods have two drawbacks. First, they do not impose any constraints on codebooks, such that the resultant codebooks lack diversity, leading to limited retrieval performance. Second, since data indexing resorts to operator, differentiability is usually achieved by either relaxation or Straight-Through Estimation (STE), which leads to biased gradient and slow convergence. To address these problems, we propose a Differentiable Optimized Product Quantization method (DOPQ) and beyond in this paper. Particularly, each data is projected into multiple orthogonal spaces, to generate multiple views of data. Thus, each codebook is learned with one view of data, guaranteeing the diversity of codebooks. Moreover, instead of simple differentiable relaxation, DOPQ optimizes the loss based on direct loss minimization, significantly reducing the gradient bias problem. Finally, DOPQ is evaluated with seven datasets of both recommendation and image search tasks. Extensive experimental results show that DOPQ outperforms state-of-the-art baselines by a large margin.
@inproceedings{10.1145/3543507.3583482,author={Lu, Zepu and Lian, Defu and Zhang, Jin and Zhang, Zaixi and Feng, Chao and Wang, Hao and Chen, Enhong},title={Differentiable Optimized Product Quantization and Beyond},year={2023},isbn={9781450394161},publisher={Association for Computing Machinery},address={New York, NY, USA},url={https://doi.org/10.1145/3543507.3583482},doi={10.1145/3543507.3583482},booktitle={Proceedings of the ACM Web Conference 2023},pages={3353–3363},numpages={11},keywords={Direct Loss Minimization, Orthogonal Matrix, Approximate Nearest Neighbor Search, Product Quantization},location={Austin, TX, USA},series={WWW '23},}
2022
SIGIR2022
Improving Implicit Alternating Least Squares with Ring-based Regularization
@inproceedings{fan2022improving,title={Improving Implicit Alternating Least Squares with Ring-based Regularization},author={Fan, Rui and Chen, Jin and Zhang, Jin and Lian, Defu and Chen, Enhong},booktitle={Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval},pages={102--111},year={2022},doi={10.1145/3477495.3531995}}
AAAI2022
Anisotropic additive quantization for fast inner product search
@inproceedings{zhang2022anisotropic,title={Anisotropic additive quantization for fast inner product search},author={Zhang, Jin and Liu, Qi and Lian, Defu and Liu, Zheng and Wu, Le and Chen, Enhong},booktitle={Proceedings of the AAAI Conference on Artificial Intelligence},volume={36},number={4},pages={4354--4362},year={2022},doi={10.1609/aaai.v36i4.20356}}
@inproceedings{liu2021online,title={Online additive quantization},author={Liu, Qi and Zhang, Jin and Lian, Defu and Ge, Yong and Ma, Jianhui and Chen, Enhong},booktitle={Proceedings of the 27th ACM SIGKDD Conference on Knowledge Discovery \& Data Mining},pages={1098--1108},year={2021},doi={10.1145/3447548.3467441},}