Deaf communities around the world use sign languages as their first language. Signs in sign languages are composed of phonological components put together under certain rules. Linguists identify the following main components present in signs: handshapes, location on the body, movement, orientation, facial expressions and lip-patterns. Similar to spoken languages, each country or region has its own sign language of varying grammar and rules, leading to a few hundreds of sign languages that exist today. While automatic speech recognition has progressed to being commercially available, automatic Sign Language Recognition (SLR) is still in its infancy.
This project aims to create the first corpus of sign language used in Kazakhstan appropriate for machine learning and linguistics research. As with any video dataset, manual annotation of sign languages is extremely time and resource consuming. In contrast to ELAN tool, which is usually used for sign annotation, we aim to create a semi-automatic tool, which will automatically annotate sign's manual and non-manual components, thus contributing to a challenging vision-based automatic SLR. And finally, we aim to develop a robust algorithm, which will firstly be used during annotation process and then further applied to automatic SLR for various human-computer/robot interaction applications.
We closely collaborate with a sign language linguist, Associate Professor Dr. Vadim Kimmelman from Bergen University in Norway.
This project is funded by the NU Faculty Development Program (2019-2021).
Imashev, A., Kydyrbekova, A., Oralbayeva, N., Kenzhekhan, A., & Sandygulova, A. (2024). Learning sign language with mixed reality applications-the exploratory case study with deaf students. Education and Information Technologies, 1-32.
[BIBTEX][PDF]
@article{imashev2024learning,
title={Learning sign language with mixed reality applications-the exploratory case study with deaf students},
author={Imashev, Alfarabi and Kydyrbekova, Aigerim and Oralbayeva, Nurziya and Kenzhekhan, Azamat and Sandygulova, Anara},
journal={Education and Information Technologies},
pages={1--32},
year={2024},
publisher={Springer}
}
Imashev, A., Oralbayeva, N., Baizhanova, G., & Sandygulova, A. (2024, May). Comparative Analysis of Sign Language Interpreting Agents Perception: A Study of the Deaf. In Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024) (pp. 3603-3609).
[BIBTEX][PDF]
@inproceedings{imashev2024comparative,
title={Comparative Analysis of Sign Language Interpreting Agents Perception: A Study of the Deaf},
author={Imashev, Alfarabi and Oralbayeva, Nurziya and Baizhanova, Gulmira and Sandygulova, Anara},
booktitle={Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)},
pages={3603--3609},
year={2024}
}
Imashev, A., Kydyrbekova, A., Mukushev, M., Sandygulova, A., Islam, S., Israilov, K., ... & Yessenbayev, Z. (2024, May). Retrospective of Kazakh-Russian Sign Language Corpus Formation. In Proceedings of the LREC-COLING 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources (pp. 111-122).
[BIBTEX][PDF]
@inproceedings{imashev2024retrospective,
title={Retrospective of Kazakh-Russian Sign Language Corpus Formation},
author={Imashev, Alfarabi and Kydyrbekova, Aigerim and Mukushev, Medet and Sandygulova, Anara and Islam, Shynggys and Israilov, Khassan and Makazhanov, Aibek and Yessenbayev, Zhandos},
booktitle={Proceedings of the LREC-COLING 2024 11th Workshop on the Representation and Processing of Sign Languages: Evaluation of Sign Language Resources},
pages={111--122},
year={2024}
}
Kydyrbekova, A., Kenzhekhan, A., Omirbayev, S., Oralbayeva, N., Imashev, A., & Sandygulova, A. (2023). Interaction Design of the Mixed Reality Application for Deaf Children.
[BIBTEX][PDF]
@article{kydyrbekova2023interaction,
title={Interaction Design of the Mixed Reality Application for Deaf Children},
author={Kydyrbekova, Aigerim and Kenzhekhan, Azamat and Omirbayev, Sultan and Oralbayeva, Nurziya and Imashev, Alfarabi and Sandygulova, Anara},
year={2023}
}
Mukushev, M., Ubingazhibov, A., Kydyrbekova, A., Imashev, A., Kimmelman, V., & Sandygulova, A. (2022). FluentSigners-50: A signer independent benchmark dataset for sign language processing. Plos one, 17(9), e0273649.
@article{mukushev2022fluentsigners,
title={FluentSigners-50: A signer independent benchmark dataset for sign language processing},
author={Mukushev, Medet and Ubingazhibov, Aidyn and Kydyrbekova, Aigerim and Imashev, Alfarabi and Kimmelman, Vadim and Sandygulova, Anara},
journal={Plos one},
volume={17},
number={9},
pages={e0273649},
year={2022},
publisher={Public Library of Science San Francisco, CA USA}
}
Mukushev, M., Kydyrbekova, A., Imashev, A., Kimmelman, V., & Sandygulova, A. (2022, June). Crowdsourcing Kazakh-Russian Sign Language: FluentSigners-50. In Proceedings of the Thirteenth Language Resources and Evaluation Conference (pp. 2541-2547).
[BIBTEX][PDF]
@inproceedings{mukushev2022crowdsourcing,
title={Crowdsourcing Kazakh-Russian Sign Language: FluentSigners-50},
author={Mukushev, Medet and Kydyrbekova, Aigerim and Imashev, Alfarabi and Kimmelman, Vadim and Sandygulova, Anara},
booktitle={Proceedings of the Thirteenth Language Resources and Evaluation Conference},
pages={2541--2547},
year={2022}
}
Imashev, A., Oralbayeva, N., Kimmelman, V., & Sandygulova, A. (2022, December). A User-Centered Evaluation of the Data-Driven Sign Language Avatar System: A Pilot Study. In Proceedings of the 10th International Conference on Human-Agent Interaction (pp. 194-202).
[BIBTEX][PDF]
@inproceedings{imashev2022user,
title={A User-Centered Evaluation of the Data-Driven Sign Language Avatar System: A Pilot Study},
author={Imashev, Alfarabi and Oralbayeva, Nurziya and Kimmelman, Vadim and Sandygulova, Anara},
booktitle={Proceedings of the 10th International Conference on Human-Agent Interaction},
pages={194--202},
year={2022}
}
Koishybay, K., Mukushev, M., & Sandygulova, A. (2021, January). Continuous Sign Language Recognition with Iterative Spatiotemporal Fine-tuning. In 2020 25th International Conference on Pattern Recognition (ICPR) (pp. 10211-10218). IEEE.
@inproceedings{koishybay2021continuous,
title={Continuous Sign Language Recognition with Iterative Spatiotemporal Fine-tuning},
author={Koishybay, Kenessary and Mukushev, Medet and Sandygulova, Anara},
booktitle={2020 25th International Conference on Pattern Recognition (ICPR)},
pages={10211--10218},
year={2021},
organization={IEEE}
}
Kuznetsova, A., Imashev, A., Mukushev, M., Sandygulova, A., & Kimmelman, V. (2021, August). Using Computer Vision to Analyze Non-manual Marking of Questions in KRSL. In Proceedings of the 1st International Workshop on Automatic Translation for Signed and Spoken Languages (AT4SSL) (pp. 49-59).
[BIBTEX][PDF]
@inproceedings{kuznetsova2021using,
title={Using Computer Vision to Analyze Non-manual Marking of Questions in KRSL},
author={Kuznetsova, Anna and Imashev, Alfarabi and Mukushev, Medet and Sandygulova, Anara and Kimmelman, Vadim},
booktitle={Proceedings of the 1st International Workshop on Automatic Translation for Signed and Spoken Languages (AT4SSL)},
pages={49--59},
year={2021}
}
Kimmelman, V., A. Imashev, M. Mukushev & A. Sandygulova. (2020). Eyebrow position in grammatical and emotional expressions in Kazakh-Russian Sign Language: A quantitative study. PLOS ONE 15(6). https://doi.org/10.1371/journal.pone.0233731 (open access)
[BIBTEX][DOI]
@article{10.1371/journal.pone.0233731,
author = {Kimmelman, Vadim AND Imashev, Alfarabi AND Mukushev, Medet AND Sandygulova, Anara},
journal = {PLOS ONE},
publisher = {Public Library of Science},
title = {Eyebrow position in grammatical and emotional expressions in Kazakh-Russian Sign Language: A quantitative study},
year = {2020},
month = {06},
volume = {15},
url = {https://doi.org/10.1371/journal.pone.0233731},
pages = {1-16},
number = {6},
doi = {10.1371/journal.pone.0233731}
}
Imashev, A., Mukushev, M., Kimmelman, V., & Sandygulova, A. (2020). K-RSL: a Corpus for Linguistic Understanding, Visual Evaluation, and Recognition of Sign Languages. In Proceedings of the 24th Conference on Computational Natural Language Learning. Association for Computational Linguistics.
[BIBTEX][PDF]
@inproceedings{imashev2020k,
title={K-RSL: a Corpus for Linguistic Understanding, Visual Evaluation, and Recognition of Sign Languages},
author={Imashev, Alfarabi and Mukushev, Medet and Kimmelman, Vadim and Sandygulova, Anara},
booktitle={Proceedings of the 24th Conference on Computational Natural Language Learning},
year={2020},
organization={Association for Computational Linguistics}
}