@conference {756, title = {Exploring the Impact of Training Data Bias on Automatic Generation of Video Captions}, booktitle = {MultiMedia Modeling}, year = {2019}, pages = {178{\textendash}190}, publisher = {Springer International Publishing}, organization = {Springer International Publishing}, address = {Cham}, abstract = {

A major issue in machine learning is availability of training data. While this historically referred to the availability of a sufficient volume of training data, recently this has shifted to the availability of sufficient unbiased training data. In this paper we focus on the effect of training data bias on an emerging multimedia application, the automatic captioning of short video clips. We use subsets of the same training data to generate different models for video captioning using the same machine learning technique and we evaluate the performances of different training data subsets using a well-known video caption benchmark, TRECVid. We train using the MSR-VTT video-caption pairs and we prune this to reduce and make the set of captions describing a video more homogeneously similar, or more diverse, or we prune randomly. We then assess the effectiveness of caption-generating trained with these variations using automatic metrics as well as direct assessment by human assessors. Our findings are preliminary and show that randomly pruning captions from the training data yields the worst performance and that pruning to make the data more homogeneous, or diverse, does improve performance slightly when compared to random. Our work points to the need for more training data, both more video clips but, more importantly, more captions for those videos.

}, isbn = {978-3-030-05710-7}, author = {Smeaton, Alan F. and Graham, Yvette and McGuinness, Kevin and O{\textquoteright}Connor, Noel E. and Quinn, Se{\'a}n and Arazo Sanchez, Eric} }