Results
The gold labels, submissions and scores for all teams can be found here
The gold labels inside the test XML can be found here
The task description paper is here.
@InProceedings{nakov-EtAl:2017:SemEval,
author = {Nakov, Preslav and Hoogeveen, Doris and M\`{a}rquez, Llu\'{i}s and Moschitti, Alessandro and Mubarak, Hamdy and Baldwin, Timothy and Verspoor, Karin},
title = {{SemEval-2017 Task 3}: Community Question Answering},
booktitle = {Proceedings of the 11th International Workshop on Semantic Evaluation},
series = {SemEval~'17},
month = {August},
year = {2017},
address = {Vancouver, Canada},
publisher = {Association for Computational Linguistics},
pages = {27--48},
abstract = {We describe SemEval--2017 Task 3 on Community Question Answering. This year,
we reran the four subtasks from SemEval-2016: (A) Question--Comment Similarity,
(B) Question--Question Similarity, (C) Question--External Comment Similarity,
and (D) Rerank the correct answers for a new question in Arabic, providing all
the data from 2015 and 2016 for training, and fresh data for testing.
Additionally, we added a new subtask E in order to enable experimentation with
Multi-domain Question Duplicate Detection in a larger-scale scenario, using
StackExchange subforums. A total of 23 teams participated in the task, and
submitted a total of 85 runs (36 primary and 49 contrastive) for subtasks A--D.
Unfortunately, no teams participated in subtask E. A variety of approaches and
features were used by the participating systems to address the different
subtasks. The best systems achieved an official score (MAP) of 88.43, 47.22,
15.46, and 61.16 in subtasks A, B, C, and D, respectively. These scores are
better than the baselines, especially for subtasks A--C.},
url = {http://www.aclweb.org/anthology/S17-2003}
}