@inproceedings{yao-koller-2022-structural,
title = "Structural generalization is hard for sequence-to-sequence models",
author = "Yao, Yuekun and
Koller, Alexander",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.emnlp-main.337",
doi = "10.18653/v1/2022.emnlp-main.337",
pages = "5048--5062",
abstract = "Sequence-to-sequence (seq2seq) models have been successful across many NLP tasks,including ones that require predicting linguistic structure. However, recent work on compositional generalization has shown that seq2seq models achieve very low accuracy in generalizing to linguistic structures that were not seen in training. We present new evidence that this is a general limitation of seq2seq models that is present not just in semantic parsing, but also in syntactic parsing and in text-to-text tasks, and that this limitation can often be overcome by neurosymbolic models that have linguistic knowledge built in. We further report on some experiments that give initial answers on the reasons for these limitations.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yao-koller-2022-structural">
<titleInfo>
<title>Structural generalization is hard for sequence-to-sequence models</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yuekun</namePart>
<namePart type="family">Yao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alexander</namePart>
<namePart type="family">Koller</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2022-12</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yoav</namePart>
<namePart type="family">Goldberg</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zornitsa</namePart>
<namePart type="family">Kozareva</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yue</namePart>
<namePart type="family">Zhang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Abu Dhabi, United Arab Emirates</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Sequence-to-sequence (seq2seq) models have been successful across many NLP tasks,including ones that require predicting linguistic structure. However, recent work on compositional generalization has shown that seq2seq models achieve very low accuracy in generalizing to linguistic structures that were not seen in training. We present new evidence that this is a general limitation of seq2seq models that is present not just in semantic parsing, but also in syntactic parsing and in text-to-text tasks, and that this limitation can often be overcome by neurosymbolic models that have linguistic knowledge built in. We further report on some experiments that give initial answers on the reasons for these limitations.</abstract>
<identifier type="citekey">yao-koller-2022-structural</identifier>
<identifier type="doi">10.18653/v1/2022.emnlp-main.337</identifier>
<location>
<url>https://aclanthology.org/2022.emnlp-main.337</url>
</location>
<part>
<date>2022-12</date>
<extent unit="page">
<start>5048</start>
<end>5062</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Structural generalization is hard for sequence-to-sequence models
%A Yao, Yuekun
%A Koller, Alexander
%Y Goldberg, Yoav
%Y Kozareva, Zornitsa
%Y Zhang, Yue
%S Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing
%D 2022
%8 December
%I Association for Computational Linguistics
%C Abu Dhabi, United Arab Emirates
%F yao-koller-2022-structural
%X Sequence-to-sequence (seq2seq) models have been successful across many NLP tasks,including ones that require predicting linguistic structure. However, recent work on compositional generalization has shown that seq2seq models achieve very low accuracy in generalizing to linguistic structures that were not seen in training. We present new evidence that this is a general limitation of seq2seq models that is present not just in semantic parsing, but also in syntactic parsing and in text-to-text tasks, and that this limitation can often be overcome by neurosymbolic models that have linguistic knowledge built in. We further report on some experiments that give initial answers on the reasons for these limitations.
%R 10.18653/v1/2022.emnlp-main.337
%U https://aclanthology.org/2022.emnlp-main.337
%U https://doi.org/10.18653/v1/2022.emnlp-main.337
%P 5048-5062
Markdown (Informal)
[Structural generalization is hard for sequence-to-sequence models](https://aclanthology.org/2022.emnlp-main.337) (Yao & Koller, EMNLP 2022)
ACL