{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2026,4,7]],"date-time":"2026-04-07T16:23:37Z","timestamp":1775579017934,"version":"3.50.1"},"reference-count":46,"publisher":"IEEE","content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2016,6]]},"DOI":"10.1109\/cvpr.2016.502","type":"proceedings-article","created":{"date-parts":[[2016,12,13]],"date-time":"2016-12-13T01:38:49Z","timestamp":1481593129000},"page":"4641-4650","source":"Crossref","is-referenced-by-count":153,"title":["TGIF: A New Dataset and Benchmark on Animated GIF Description"],"prefix":"10.1109","author":[{"given":"Yuncheng","family":"Li","sequence":"first","affiliation":[]},{"given":"Yale","family":"Song","sequence":"additional","affiliation":[]},{"given":"Liangliang","family":"Cao","sequence":"additional","affiliation":[]},{"given":"Joel","family":"Tetreault","sequence":"additional","affiliation":[]},{"given":"Larry","family":"Goldberg","sequence":"additional","affiliation":[]},{"given":"Alejandro","family":"Jaimes","sequence":"additional","affiliation":[]},{"given":"Jiebo","family":"Luo","sequence":"additional","affiliation":[]}],"member":"263","reference":[{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.3115\/v1\/N15-1173"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.515"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2013.61"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298940"},{"key":"ref30","doi-asserted-by":"crossref","DOI":"10.1162\/tacl_a_00207","article-title":"Grounding action descriptions in videos","author":"regneri","year":"2013","journal-title":"TACL"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7299087"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.510"},{"key":"ref35","article-title":"Enriching the knowledge sources used in a maximum entropy part-of-speech tagger","author":"toutanova","year":"2010","journal-title":"ACL"},{"key":"ref34","article-title":"Using descriptive video services to create a large data source for video annotation research","author":"torabi","year":"2015","journal-title":"ArXiv Preprint"},{"key":"ref10","article-title":"Every picture tells a story: Generating sentences from images","author":"farhadi","year":"2010","journal-title":"ECCV"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298935"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2013.337"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1145\/2647868.2654889"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298932"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.3115\/1557769.1557821"},{"key":"ref16","article-title":"Imagenet classification with deep convolutional neural networks","author":"krizhevsky","year":"2012","journal-title":"NIPS"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2012.162"},{"key":"ref18","article-title":"Meteor universal: Language specific translation evaluation for any target language","author":"lavie","year":"2014","journal-title":"ACL"},{"key":"ref19","article-title":"Rouge: A package for automatic evaluation of summaries","author":"lin","year":"2004","journal-title":"ACL"},{"key":"ref28","article-title":"Collecting image annotations using amazon's mechanical turk","author":"rashtchian","year":"2010","journal-title":"HLT-NAACL"},{"key":"ref4","doi-asserted-by":"crossref","DOI":"10.21437\/Interspeech.2014-564","article-title":"One billion word benchmark for measuring progress in statistical language modeling","author":"chelba","year":"2014","journal-title":"InterSpeech"},{"key":"ref27","article-title":"Bleu: a method for automatic evaluation of machine translation","author":"papineni","year":"2002","journal-title":"ACL"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1145\/2858036.2858532"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1145\/2506182.2506198"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1007\/s10994-011-5256-5"},{"key":"ref5","article-title":"Collecting highly parallel data for paraphrase evaluation","author":"chen","year":"2011","journal-title":"ACL"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2013.340"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1162\/COLI_a_00163"},{"key":"ref2","article-title":"The berkeley framenet project","author":"baker","year":"1998","journal-title":"ACL"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298754"},{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.279"},{"key":"ref46","article-title":"Learning deep features for scene recognition using places database","author":"zhou","year":"2014","journal-title":"NIPS"},{"key":"ref20","article-title":"Microsoft COCO: common objects in context","author":"lin","year":"2014","journal-title":"ECCV"},{"key":"ref45","author":"zauner","year":"2010","journal-title":"Implementation and Benchmarking of Perceptual Image Hash Functions"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/219717.219748"},{"key":"ref21","article-title":"Deep captioning with multimodal recurrent neural networks (m-rnn)","author":"mao","year":"2014","journal-title":"arXiv preprint arXiv 1412 6632"},{"key":"ref42","article-title":"I2t: Image parsing to text description","author":"yao","year":"1998"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2013.231"},{"key":"ref41","article-title":"Show, attend and tell: Neural image caption generation with visual attention","author":"xu","year":"0"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2012.6248097"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.283"},{"key":"ref26","article-title":"Jointly modeling embedding and translation to bridge video and language","volume":"abs 1505 1861","author":"pan","year":"2015","journal-title":"CoRR"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/ICCV.2015.512"},{"key":"ref25","article-title":"Im2text: Describing images using 1 million captioned photographs","author":"ordonez","year":"2011","journal-title":"NIPS"}],"event":{"name":"2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)","location":"Las Vegas, NV, USA","start":{"date-parts":[[2016,6,27]]},"end":{"date-parts":[[2016,6,30]]}},"container-title":["2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/7776647\/7780329\/07780871.pdf?arnumber=7780871","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,6,13]],"date-time":"2025-06-13T14:32:13Z","timestamp":1749825133000},"score":1,"resource":{"primary":{"URL":"http:\/\/ieeexplore.ieee.org\/document\/7780871\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2016,6]]},"references-count":46,"URL":"https:\/\/doi.org\/10.1109\/cvpr.2016.502","relation":{},"subject":[],"published":{"date-parts":[[2016,6]]}}}