@inproceedings{15e75cb7b5244efb8ebcd0419c67f79a,
title = "Ensemble-based Fine-Tuning Strategy for Temporal Relation Extraction from the Clinical Narrative",
abstract = "In this paper, we investigate ensemble methods for fine-tuning transformer-based pretrained models for clinical natural language processing tasks, specifically temporal relation extraction from the clinical narrative. Our experimental results on the THYME data show that ensembling as a fine-tuning strategy can further boost model performance over single learners optimized for hyperparameters. Dynamic snapshot ensembling is particularly beneficial as it fine-tunes a wide array of parameters and results in a 2.8% absolute improvement in F1 over the base single learner.",
author = "Lijing Wang and Timothy Miller and Steven Bethard and Guergana Savova",
note = "Funding Information: The study was funded by R01LM013486, R01LM10090 and U24CA248010 from the Unites States National Institutes of Health. The content is solely the responsibility of the authors and does not necessarily represent the official views of the National Institutes of Health. Publisher Copyright: {\textcopyright} 2022 Association for Computational Linguistics.; 4th Workshop on Clinical Natural Language Processing, ClinicalNLP 2022 ; Conference date: 14-07-2022",
year = "2022",
language = "English (US)",
series = "ClinicalNLP 2022 - 4th Workshop on Clinical Natural Language Processing, Proceedings",
publisher = "Association for Computational Linguistics (ACL)",
pages = "103--108",
editor = "Tristan Naumann and Steven Bethard and Kirk Roberts and Anna Rumshisky",
booktitle = "ClinicalNLP 2022 - 4th Workshop on Clinical Natural Language Processing, Proceedings",
}