{"payload":{"pageCount":1,"repositories":[{"type":"Public","name":"LiMuSE","owner":"aispeech-lab","isFork":false,"description":"PyTorch implementation of LiMuSE","allTopics":["speaker-extraction","pytorch"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":27,"forksCount":9,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-10-11T08:36:36.912Z"}},{"type":"Public","name":"advr-avss","owner":"aispeech-lab","isFork":false,"description":"Pytorch implementation of our paper: Audio-Visual Speech Separation with Visual Features Enhanced by Adversarial Training.","allTopics":["audio-visual-speech-separation"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":2,"starsCount":17,"forksCount":4,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-07-11T10:33:11.266Z"}},{"type":"Public","name":"VisBCI","owner":"aispeech-lab","isFork":false,"description":"Pytorch codes of our paper \"Improving Cross-State and Cross-Subject Visual ERP-based BCI with Temporal Modeling and Adversarial Training\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":9,"forksCount":3,"license":"Other","participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-02-23T09:43:39.656Z"}},{"type":"Public","name":"WASE_202112","owner":"aispeech-lab","isFork":false,"description":"PyTorch implementation of WASE described in our ICASSP 2021: \"Wase: Learning When to Attend for Speaker Extraction in Cocktail Party Environments\"","allTopics":["speaker-extraction"],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":6,"forksCount":3,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2022-01-11T14:07:27.373Z"}},{"type":"Public","name":"w2v-cif-bert","owner":"aispeech-lab","isFork":false,"description":"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":36,"forksCount":11,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-06-28T07:09:28.579Z"}},{"type":"Public","name":"TinyWASE","owner":"aispeech-lab","isFork":false,"description":"PyTorch implementation of TinyWASE described in our paper \"Compressing Speaker Extraction Model with Ultra-low Precision Quantization and Knowledge Distillation\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":10,"forksCount":4,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-06-28T02:46:49.450Z"}},{"type":"Public","name":"HCRN","owner":"aispeech-lab","isFork":false,"description":"PyTorch implementation of HCRN described in our paper \"Towards Modeling Auditory Restoration in Noisy Environments\"","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":0,"starsCount":0,"forksCount":1,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-06-22T09:39:29.785Z"}},{"type":"Public","name":"SDNet","owner":"aispeech-lab","isFork":false,"description":"Pytorch implemention of SDNet","allTopics":[],"primaryLanguage":{"name":"Python","color":"#3572A5"},"pullRequestCount":0,"issueCount":1,"starsCount":20,"forksCount":4,"license":null,"participation":null,"lastUpdated":{"hasBeenPushedTo":true,"timestamp":"2021-06-01T07:49:49.727Z"}}],"repositoryCount":8,"userInfo":null,"searchable":true,"definitions":[],"typeFilters":[{"id":"all","text":"All"},{"id":"public","text":"Public"},{"id":"source","text":"Sources"},{"id":"fork","text":"Forks"},{"id":"archived","text":"Archived"},{"id":"template","text":"Templates"}],"compactMode":false},"title":"aispeech-lab repositories"}