diff --git a/_bibliography/pubs.bib b/_bibliography/pubs.bib index bc58bbf8f9c4..4825eee46ad6 100644 --- a/_bibliography/pubs.bib +++ b/_bibliography/pubs.bib @@ -9,6 +9,9 @@ @inproceedings{qin2023generalizable booktitle={The 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining (KDD)}, year={2023}, corr={true}, + + arxiv={http://arxiv.org/abs/2306.04641}, + code={https://github.com/microsoft/robustlearn} } @inproceedings{zhang2023domain, @@ -88,6 +91,16 @@ @inproceedings{wang2023robustness zhihu={https://zhuanlan.zhihu.com/p/612391048}, } +@inproceedings{lu2023towards, + title={Towards Optimization and Model Selection for Domain Generalization: A Mixup-guided Solution}, + author={Lu, Wang and Wang, Jindong and Wang, Yidong and Ren, Kan and Chen, Yiqiang and Xie, Xing}, + booktitle={KDD 2023 workshop on Causal Discovery, Prediction and Decision}, + year={2023}, + + arxiv={https://arxiv.org/abs/2209.00652}, + corr={true} +} + @inproceedings{lu2023fedclip, title={FedCLIP: Fast Generalization and Personalization for CLIP in Federated Learning}, author={Lu, Wang and Hu, Xixu and Wang, Jindong and Xie, Xing}, diff --git a/_news/kddworkshop23.md b/_news/kddworkshop23.md new file mode 100644 index 000000000000..9c9f15fc92d5 --- /dev/null +++ b/_news/kddworkshop23.md @@ -0,0 +1,7 @@ +--- +layout: post +date: 2023-06-17 +inline: true +--- + +Paper *Towards Optimization and Model Selection for Domain Generalization: A Mixup-guided Solution* is accepted by KDD 2023 workshop on Causal Discovery, Prediction and Decision. [[arxiv](https://arxiv.org/abs/2209.00652)] \ No newline at end of file diff --git a/_news/llmsurvey23.md b/_news/llmsurvey23.md new file mode 100644 index 000000000000..9e61ed2246cb --- /dev/null +++ b/_news/llmsurvey23.md @@ -0,0 +1,7 @@ +--- +layout: post +date: 2023-07-07 +inline: true +--- + +We present the first survey on *Evaluation of large language models*! [[arxiv](https://arxiv.org/abs/2307.03109)] [[code](https://github.com/MLGroupJLU/LLM-eval-survey)] \ No newline at end of file diff --git a/_news/nips_ac23.md b/_news/nips_ac23.md new file mode 100644 index 000000000000..ea1a26b7ff28 --- /dev/null +++ b/_news/nips_ac23.md @@ -0,0 +1,7 @@ +--- +layout: post +date: 2023-06-07 +inline: true +--- + +I was nominated as the Area Chair (AC) for NeurIPS 2023 Dataset and Benchmark track! \ No newline at end of file diff --git a/_news/pandalm.md b/_news/pandalm.md index ed15650c28f2..cc417b771ade 100644 --- a/_news/pandalm.md +++ b/_news/pandalm.md @@ -4,4 +4,4 @@ date: 2023-05-04 inline: true --- -The large model for large model evaluation "PandaLM" is released on Github! [[PandaLM](https://github.com/WeOpenML/PandaLM)] +The large model for large model evaluation "PandaLM" is released on Github! [[PandaLM](https://github.com/WeOpenML/PandaLM)] [[paper](https://arxiv.org/abs/2306.05087)] diff --git a/_news/promptbench.md b/_news/promptbench.md new file mode 100644 index 000000000000..c11f462bd7e5 --- /dev/null +++ b/_news/promptbench.md @@ -0,0 +1,7 @@ +--- +layout: post +date: 2023-06-07 +inline: true +--- + +PromptBench: a unified benchmark to evaluate the adversarial robustness of prompts of large language models! [[arxiv](https://arxiv.org/abs/2306.04528)] [[code](https://github.com/microsoft/promptbench)] \ No newline at end of file diff --git a/_pages/others.md b/_pages/others.md index 6a3ac600c142..a2a0bda5eda7 100644 --- a/_pages/others.md +++ b/_pages/others.md @@ -10,11 +10,11 @@ nav: true Current interns: - 2023.05 -- present, [Hao Chen](https://scholar.google.com/citations?hl=en&user=tktqkhwAAAAJ&view_op=list_works&sortby=pubdate), PhD @ Carnegie Mellon University. -- 2023.03 -- present, Kaijie Zhu, Master @ Institute of Automation, CAS. +- 2023.06 -- present, Yachuan Liu, PhD @ University of Michigan, Ann Arbor. Alumni: -- 2023.03 -- 2023.04, Lu Tan, Master @ Tsinghua University. +- 2023.03 -- 2023.06, Kaijie Zhu, Master @ Institute of Automation, CAS. - 2022.10 -- 2023.03, [Xixu Hu](https://xixuhu.github.io/), Ph.D @ City University of Hong Kong. - 2022.07 -- 2023.03, [Runkai Zheng](https://scholar.google.com/citations?user=52haRQ0AAAAJ&hl=en), Master @ Chinese University of Hong Kong (Shenzhen). - 2021.11 -- 2022.10, [Yidong Wang](https://qianlanwyd.github.io/), Master @ Tokyo Institute of Technology. Now: Ph.D in PKU. [[MSRA official blog](https://www.msra.cn/zh-cn/news/outreach-articles/%e5%ae%9e%e4%b9%a0%e6%b4%be%ef%bd%9c%e7%8e%8b%e4%b8%80%e6%a0%8b%ef%bc%9a%e4%b8%bb%e5%8a%a8%e5%b0%b1%e4%bc%9a%e6%9c%89%e6%95%85%e4%ba%8b%ef%bc%81%e9%ab%98%e6%95%88%e7%a7%91%e7%a0%94%e7%a7%98%e8%af%80)] @@ -22,7 +22,7 @@ Alumni: - Publications during internship: NeurIPS'22, ACML'22, COLING'22 - 2021.06 -- 2021.11, [Wang Lu](https://scholar.google.com.hk/citations?user=r0C8zaMAAAAJ&hl=zh-CN), Ph.D @ ICT, Chinese Academy of Sciences. Now: continue his Ph.D in ICT. - Topics: domain generalization, federated learning, transfer learning. - - Publications during internship: TKDE'22, TMLR'22, Ubicomp'22, IEEE TBD'22, ICASSP'22, IJCAI'22 workshop. + - Publications during internship: ICLR'23, TKDE'22, TMLR'22, Ubicomp'22, IEEE TBD'22, ICASSP'22, IJCAI'22 workshop. National scholarship. - 2020.12 -- 2021.05, [Wenxin Hou](https://houwx.net), Master @ Tokyo Institute of Technology. Now: SDE at Microsoft. - Topics: speech recognition, semi-supervised learning. - Publications during internship: NeurIPS'21, TASLP'22, Interspeech'21. @@ -34,8 +34,7 @@ Alumni: #### Collaborating students -- Ph.D students at ICT, CAS: Xin Qin, Wang Lu, Yuxin Zhang. -- Master student at Tsinghua University: Lu Tan. +- Ph.D students at ICT, CAS: Xin Qin (Ubicomp, KDD, national scholarship), Wang Lu, Yuxin Zhang. - Ph.D student at Institute of Acoustics, CAS: Han Zhu. - Ph.D student at Carnegie Mellon University: Hao Chen. - Master/Ph.D students at Institute of Automation, CAS: YiFan Zhang, Kaijie Zhu. diff --git a/_pages/publications.md b/_pages/publications.md index f08a4d2ff56a..6d9b059bd88f 100644 --- a/_pages/publications.md +++ b/_pages/publications.md @@ -11,12 +11,16 @@ nav: true #### Preprints + +- A Survey on Evaluation of Large Language Models. Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Kaijie Zhu, Hao Chen, Linyi Yang, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, Wei Ye, Yue Zhang, Yi Chang, Philip S. Yu, Qiang Yang, Xing Xie. [[arxiv](https://arxiv.org/abs/2307.03109)] [[code](https://github.com/MLGroupJLU/LLM-eval-survey)] +- PromptBench: Towards Evaluating the Robustness of Large Language Models on Adversarial Prompts. Kaijie Zhu, Jindong Wang, Jiaheng Zhou, Zichen Wang, Hao Chen, Yidong Wang, Linyi Yang, Wei Ye, Neil Zhenqiang Gong, Yue Zhang, Xing Xie. [[arxiv](https://arxiv.org/abs/2306.04528)] [[code](https://github.com/microsoft/promptbench)] +- PandaLM: An Automatic Evaluation Benchmark for LLM Instruction Tuning Optimization. Yidong Wang, Zhuohao Yu, Zhengran Zeng, Linyi Yang, Cunxiang Wang, Hao Chen, Chaoya Jiang, Rui Xie, Jindong Wang, Xing Xie, Wei Ye, Shikun Zhang, Yue Zhang. [[arxiv](https://arxiv.org/abs/2306.05087)] [[code](https://github.com/WeOpenML/PandaLM)] +- Selective Mixup Helps with Distribution Shifts, But Not (Only) because of Mixup. Damien Teney, Jindong Wang, Ehsan Abbasnejad. [[arxiv](https://arxiv.org/abs/2305.16817)] - Imprecise Label Learning: A Unified Framework for Learning with Various Imprecise Label Configurations. Hao Chen, Ankit Shah, Jindong Wang, Ran Tao, Yidong Wang, Xing Xie, Masashi Sugiyama, Rita Singh, Bhiksha Raj. [[arxiv](https://arxiv.org/abs/2305.12715)] - Exploring Vision-Language Models for Imbalanced Learning. Yidong Wang, Zhuohao Yu, **Jindong Wang**, Qiang Heng, Hao Chen, Wei Ye, Rui Xie, Xing Xie, Shikun Zhang. [[arxiv](https://arxiv.org/abs/2304.01457)] [[code](https://github.com/Imbalance-VLM/Imbalance-VLM)] - An Embarrassingly Simple Baseline for Imbalanced Semi-Supervised Learning. Hao Chen, Yue Fan, Yidong Wang, **Jindong Wang**, Bernt Schiele, Xing Xie, Marios Savvides, Bhiksha Raj. [[arxiv](https://arxiv.org/abs/2211.11086)] - FIXED: Frustratingly Easy Domain Generalization with Mixup. Wang Lu, **Jindong Wang**, Han Yu, Lei Huang, Xiang Zhang, Yiqiang Chen, Xing Xie. [[arxiv](https://arxiv.org/abs/2211.05228)] - Conv-Adapter: Exploring Parameter Efficient Transfer Learning for ConvNets. Hao Chen, Ran Tao, Han Zhang, Yidong Wang, Wei Ye, Jindong Wang, Guosheng Hu, and Marios Savvides. [[arxiv](https://arxiv.org/abs/2208.07463)] -- Towards Optimization and Model Selection for Domain Generalization: A Mixup-guided Solution. Wang Lu, **Jindong Wang**, Yidong Wang, Kan Ren, Yiqiang Chen, Xing Xie. [[arxiv](https://arxiv.org/abs/2209.00652)] - Equivariant Disentangled Transformation for Domain Generalization under Combination Shift. Yivan Zhang, **Jindong Wang**, Xing Xie, and Masashi Sugiyama. [[arxiv](https://arxiv.org/abs/2208.02011)] - Boosting Cross-Domain Speech Recognition with Self-Supervision. Han Zhu, Gaofeng Cheng, **Jindong Wang**, Wenxin Hou, Pengyuan Zhang, and Yonghong Yan. [[arxiv](https://arxiv.org/abs/2206.09783)] - Learning Invariant Representations across Domains and Tasks. **Jindong Wang**, Wenjie Feng, Chang Liu, Chaohui Yu, Mingxuan Du, Renjun Xu, Tao Qin, and Tie-Yan Liu. [[arxiv](https://arxiv.org/abs/2103.05114)] diff --git a/_pages/research.md b/_pages/research.md index b7020d555513..5ec65cc85d84 100644 --- a/_pages/research.md +++ b/_pages/research.md @@ -10,16 +10,26 @@ nav: true The long-term research goal is to build robust models for modern AI, such as pre-trained models and large models. We create new theory, algorithms, applications, and open-sourced library to achieve our goal. -These days, we are specifically interested in robustness in large language models (LLMs). +These days, we are specifically interested in **robustness in large language models (LLMs)**. Our research consists of the following topics with selected publications: [[View by year](https://jd92.wang/publications/)] +##### New: large models + +- [arXiv'23] [A Survey on Evaluation of Large Language Models](https://arxiv.org/abs/2307.03109). Yupeng Chang, Xu Wang, Jindong Wang, Yuan Wu, Kaijie Zhu, Hao Chen, Linyi Yang, Xiaoyuan Yi, Cunxiang Wang, Yidong Wang, Wei Ye, Yue Zhang, Yi Chang, Philip S. Yu, Qiang Yang, Xing Xie. [[code](https://github.com/MLGroupJLU/LLM-eval-survey)] +- [arXiv'23] [PromptBench: Towards Evaluating the Robustness of Large Language Models on Adversarial Prompts](https://arxiv.org/abs/2306.04528). Kaijie Zhu, Jindong Wang, Jiaheng Zhou, Zichen Wang, Hao Chen, Yidong Wang, Linyi Yang, Wei Ye, Neil Zhenqiang Gong, Yue Zhang, Xing Xie. [[code](https://github.com/microsoft/promptbench)] +- [arXiv'23] [PandaLM: An Automatic Evaluation Benchmark for LLM Instruction Tuning Optimization](https://arxiv.org/abs/2306.05087). Yidong Wang, Zhuohao Yu, Zhengran Zeng, Linyi Yang, Cunxiang Wang, Hao Chen, Chaoya Jiang, Rui Xie, Jindong Wang, Xing Xie, Wei Ye, Shikun Zhang, Yue Zhang. [[code](https://github.com/WeOpenML/PandaLM)] +- **[ACL'23 findings]** [GLUE-X: Evaluating Natural Language Understanding Models from an Out-of-distribution Generalization Perspective](https://arxiv.org/abs/2211.08073). Linyi Yang, Shuibai Zhang, Libo Qin, Yafu Li, Yidong Wang, Hanmeng Liu, Jindong Wang, Xing Xie, Yue Zhang. +- **[ICLR'23 large model workshop]** [On the Robustness of ChatGPT: An Adversarial and Out-of-distribution Perspective](https://arxiv.org/abs/2302.12095). Jindong Wang, Xixu Hu, Wenxin Hou, Hao Chen, Runkai Zheng, Yidong Wang, Linyi Yang, Haojun Huang, Wei Ye, Xiubo Geng, Binxin Jiao, Yue Zhang, and Xing Xie. +- [arXiv'23] [Exploring Vision-Language Models for Imbalanced Learning](https://arxiv.org/abs/2304.01457). Yidong Wang, Zhuohao Yu, Jindong Wang, Qiang Heng, Hao Chen, Wei Ye, Rui Xie, Xing Xie, Shikun Zhang. [[code](https://github.com/Imbalance-VLM/Imbalance-VLM)] + ##### Out-of-distribution (Domain) generalization and adaptation for distribution shift - **[ICLR'23]** [Out-of-distribution Representation Learning for Time Series Classification](https://arxiv.org/abs/2209.07027). Wang Lu, Jindong Wang, Xinwei Sun, Yiqiang Chen, and Xing Xie. - **[KDD'23]** [Domain-Specific Risk Minimization for Out-of-Distribution Generalization](https://arxiv.org/pdf/2208.08661.pdf). YiFan Zhang, Jindong Wang, Jian Liang, Zhang Zhang, Baosheng Yu, Liang Wang, Xing Xie, and Dacheng Tao. - **[KDD'23]** [Generalizable Low-Resource Activity Recognition with Diverse and Discriminative Representation Learning](ddd). Xin Qin, Jindong Wang, Shuo Ma, Wang Lu, Yongchun Zhu, Xing Xie, Yiqiang Chen. - **[ACL'23 findings]** [GLUE-X: Evaluating Natural Language Understanding Models from an Out-of-distribution Generalization Perspective](https://arxiv.org/abs/2211.08073). Linyi Yang, Shuibai Zhang, Libo Qin, Yafu Li, Yidong Wang, Hanmeng Liu, Jindong Wang, Xing Xie, Yue Zhang. +- **[KDD'23 workshop]** [Towards Optimization and Model Selection for Domain Generalization: A Mixup-guided Solution](https://arxiv.org/abs/2209.00652). Wang Lu, Jindong Wang, Yidong Wang, Kan Ren, Yiqiang Chen, Xing Xie. - **[TKDE'22]** [Generalizing to Unseen Domains: A Survey on Domain Generalization](https://arxiv.org/abs/2103.03097). Jindong Wang, Cuiling Lan, Chang Liu, Yidong Ouyang, Tao Qin, Wang Lu, Yiqiang Chen, Wenjun Zeng, and Philip S. Yu. - **[TMLR'22]** [Domain-invariant Feature Exploration for Domain Generalization](https://arxiv.org/abs/2207.12020). Wang Lu, Jindong Wang, Haoliang Li, Yiqiang Chen, and Xing Xie. - **[UbiComp'22]** [Semantic-Discriminative Mixup for Generalizable Sensor-based Cross-domain Activity Recognition](http://arxiv.org/abs/2206.06629). Wang Lu, Jindong Wang, Yiqiang Chen, Sinno Pan, Chunyu Hu, and Xin Qin. @@ -47,7 +57,6 @@ Our research consists of the following topics with selected publications: [[View ##### Safe transfer learning for security -- **[arXiv'23]** [On the Robustness of ChatGPT: An Adversarial and Out-of-distribution Perspective](https://arxiv.org/abs/2302.12095). Jindong Wang, Xixu Hu, Wenxin Hou, Hao Chen, Runkai Zheng, Yidong Wang, Linyi Yang, Haojun Huang, Wei Ye, Xiubo Geng, Binxin Jiao, Yue Zhang, and Xing Xie. - **[ICSE'22]** [ReMoS: Reducing Defect Inheritance in Transfer Learning via Relevant Model Slicing](https://jd92.wang/assets/files/icse22-remos.pdf). Ziqi Zhang, Yuanchun Li, Jindong Wang, Bingyan Liu, Ding Li, Xiangqun Chen, Yao Guo, and Yunxin Liu. - **[IEEE TBD'22]** [Personalized Federated Learning with Adaptive Batchnorm for Healthcare](https://arxiv.org/abs/2112.00734). Wang Lu, Jindong Wang, Yiqiang Chen, Xin Qin, Renjun Xu, Dimitrios Dimitriadis, and Tao Qin. - **[TKDE'22]** [Unsupervised deep anomaly detection for multi-sensor time-series signals](https://arxiv.org/abs/2107.12626). Yuxin Zhang, Yiqiang Chen, Jindong Wang, and Zhiwen Pan. diff --git a/_pages/service.md b/_pages/service.md index 4865e9041ce7..ce84f6b0d2f8 100644 --- a/_pages/service.md +++ b/_pages/service.md @@ -13,6 +13,8 @@ nav: true - IJCAI 2019 Publicity co-chair - ICDM 2019 transfer learning session chair - Conference senior member/area chair: + - NeurIPS 2023 Area Chair for Dataset and Benchmark track + - AAAI 2023 senior PC - IJCAI 2023 senior PC - AAAI 2023 senior PC - Conference PC member: diff --git a/_pages/talks.md b/_pages/talks.md index b4d9d1113747..59ef77fd190e 100644 --- a/_pages/talks.md +++ b/_pages/talks.md @@ -12,6 +12,7 @@ nav: true #### Invited talks +- Invited talk: **Towards robustness research in the era of large models**, at Southern University of Science Technology (Sustech), Shenzhen. June 2023. - Invited talk: **Safe, efficient, and generalizable transfer learning**, at NCIIP 2023. Changchun. May 2023. - Invited talk: **Robust machine learning for responsible AI**, at Hefei University of Technology. Mar. 2023. [[video at Bilibili](https://www.bilibili.com/video/BV1184y1M7V4/)] - Invited talk: **Building robust machine learning models**, at MLNLP community. Sep. 2022. [[Video & PDF](https://www.bilibili.com/video/BV1hP411V7SP/)]