- This repository has python code (in jupyter notebooks) for both of the following papers:
- To set it up in conda:
conda create --name MLpersref --file requirements.txt
- If you find this code helpful, please cite our papers:
@inproceedings{10.1145/3462244.3479910,
author = {Gomaa, Amr and Reyes, Guillermo and Feld, Michael},
title = {ML-PersRef: A Machine Learning-Based Personalized Multimodal Fusion Approach for Referencing Outside Objects From a Moving Vehicle},
year = {2021},
isbn = {9781450384810},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3462244.3479910},
doi = {10.1145/3462244.3479910},
pages = {318–327},
numpages = {10},
keywords = {Pointing, Deep Learning, Machine Learning, Personalized Models, Eye Gaze, Object Referencing, Multimodal Fusion},
location = {Montr\'{e}al, QC, Canada},
series = {ICMI '21}
}
@inproceedings{10.1145/3382507.3418817,
author = {Gomaa, Amr and Reyes, Guillermo and Alles, Alexandra and Rupp, Lydia and Feld, Michael},
title = {Studying Person-Specific Pointing and Gaze Behavior for Multimodal Referencing of Outside Objects from a Moving Vehicle},
year = {2020},
isbn = {9781450375818},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3382507.3418817},
doi = {10.1145/3382507.3418817},
booktitle = {Proceedings of the 2020 International Conference on Multimodal Interaction},
pages = {501–509},
numpages = {9},
keywords = {head pose, pointing gestures, object referencing, personalized models, eye gaze, multimodal interaction},
location = {Virtual Event, Netherlands},
series = {ICMI '20}
}