image
stringlengths
15
15
id
stringlengths
8
12
conversations
stringlengths
79
268
00000000460.jpg
460_evjvqa
[{"from": "human", "value": "how many people are standing right behind the woman in the green shirt grilling the food?\n<image>"}, {"from": "gpt", "value": "a woman wearing yellow shirt"}]
00000000462.jpg
462_evjvqa
[{"from": "human", "value": "how many people are buying at the stall of the woman sitting on the left?\n<image>"}, {"from": "gpt", "value": "no one is buying"}]
00000000462.jpg
462_evjvqa
[{"from": "human", "value": "what is the woman sitting in the stall on the left doing?\n<image>"}, {"from": "gpt", "value": "scrolling on the phone"}]
00000000463.jpg
463_evjvqa
[{"from": "human", "value": "what items are passersby looking?\n<image>"}, {"from": "gpt", "value": "peach blossom branch"}]
00000000463.jpg
463_evjvqa
[{"from": "human", "value": "what season do passersby buy peach blossom?\n<image>"}, {"from": "gpt", "value": "spring"}]
00000000463.jpg
463_evjvqa
[{"from": "human", "value": "what is the woman in the red jacket wearing a conical hat holding in her hand?\n<image>"}, {"from": "gpt", "value": "a peach blossom branch"}]
00000000465.jpg
465_evjvqa
[{"from": "human", "value": "how are the gardens lit?\n<image>"}, {"from": "gpt", "value": "by using light bulbs"}]
00000000467.jpg
467_evjvqa
[{"from": "human", "value": "where are these people flocking?\n<image>"}, {"from": "gpt", "value": "in front of a church"}]
00000000467.jpg
467_evjvqa
[{"from": "human", "value": "where is this scene recorded?\n<image>"}, {"from": "gpt", "value": "outside a church"}]
00000000473.jpg
473_evjvqa
[{"from": "human", "value": "where are the passersby visiting?\n<image>"}, {"from": "gpt", "value": "2019 spring fair"}]
00000000475.jpg
475_evjvqa
[{"from": "human", "value": "where is this scene recorded?\n<image>"}, {"from": "gpt", "value": "in a supermarket"}]
00000000476.jpg
476_evjvqa
[{"from": "human", "value": "what store is the woman in the black polka dot shirt buying at?\n<image>"}, {"from": "gpt", "value": "fruit and vegetable store"}]
00000000476.jpg
476_evjvqa
[{"from": "human", "value": "how many people are shopping at the fruit and vegetable store?\n<image>"}, {"from": "gpt", "value": "five"}]
00000000476.jpg
476_evjvqa
[{"from": "human", "value": "which store is the man standing in front of?\n<image>"}, {"from": "gpt", "value": "fruit and vegetable store"}]
00000000479.jpg
479_evjvqa
[{"from": "human", "value": "who is the girl in the yellow dress being photographed with?\n<image>"}, {"from": "gpt", "value": "a man wearing a white shirt and black pants"}]
00000000479.jpg
479_evjvqa
[{"from": "human", "value": "how many people is the girl in the yellow dress being photographed with?\n<image>"}, {"from": "gpt", "value": "one"}]
00000000479.jpg
479_evjvqa
[{"from": "human", "value": "how many people is the boy in white shirt and black pants being photographed with?\n<image>"}, {"from": "gpt", "value": "one"}]
00000000479.jpg
479_evjvqa
[{"from": "human", "value": "how many people are crossing the street behind the boy in white shirt and black pants?\n<image>"}, {"from": "gpt", "value": "two"}]
00000000480.jpg
480_evjvqa
[{"from": "human", "value": "where are the two people standing?\n<image>"}, {"from": "gpt", "value": "in the supermarket"}]
00000000480.jpg
480_evjvqa
[{"from": "human", "value": "which counter are the two people standing at?\n<image>"}, {"from": "gpt", "value": "fruit and vegetable"}]
00000000480.jpg
480_evjvqa
[{"from": "human", "value": "how many people are buying at the fruit and vegetable counter?\n<image>"}, {"from": "gpt", "value": "two"}]
00000000480.jpg
480_evjvqa
[{"from": "human", "value": "how many people does the man go to the supermarket with?\n<image>"}, {"from": "gpt", "value": "one"}]
00000000482.jpg
482_evjvqa
[{"from": "human", "value": "what are the students doing?\n<image>"}, {"from": "gpt", "value": "doing an incense offering in a epitaph relic"}]
00000000483.jpg
483_evjvqa
[{"from": "human", "value": "how many people are standing on the podium?\n<image>"}, {"from": "gpt", "value": "a man is standing on the podium"}]
00000000483.jpg
483_evjvqa
[{"from": "human", "value": "on what date is the program held?\n<image>"}, {"from": "gpt", "value": "april 9, 2019"}]
00000000484.jpg
484_evjvqa
[{"from": "human", "value": "where is this scene recorded?\n<image>"}, {"from": "gpt", "value": "at the bridge over a river"}]
00000000485.jpg
485_evjvqa
[{"from": "human", "value": "what is the woman in the orange shirt doing?\n<image>"}, {"from": "gpt", "value": "cleaning the counter"}]
00000000485.jpg
485_evjvqa
[{"from": "human", "value": "where is the woman in the orange shirt standing?\n<image>"}, {"from": "gpt", "value": "in a supermarket"}]
00000000485.jpg
485_evjvqa
[{"from": "human", "value": "what color uniform do supermarket staff wear?\n<image>"}, {"from": "gpt", "value": "orange"}]
00000000485.jpg
485_evjvqa
[{"from": "human", "value": "where is the woman in the orange shirt standing?\n<image>"}, {"from": "gpt", "value": "fruit and vegetable counter in the supermarket"}]
00000000496.jpg
496_evjvqa
[{"from": "human", "value": "where is this scene recorded?\n<image>"}, {"from": "gpt", "value": "in a historical site"}]
00000006100.jpg
6100_evjvqa
[{"from": "human", "value": "what is the dominant color tone of the room?\n<image>"}, {"from": "gpt", "value": "white is the color tone chosen for the interior design of the room. "}]
00000006100.jpg
6100_evjvqa
[{"from": "human", "value": "how many girls are printed on the big picture in the room?\n<image>"}, {"from": "gpt", "value": "there are four girls printed on the big picture in the room."}]
00000006100.jpg
6100_evjvqa
[{"from": "human", "value": "what color is the sofa set in the room?\n<image>"}, {"from": "gpt", "value": "it is white. "}]
00000006102.jpg
6102_evjvqa
[{"from": "human", "value": "what is the expression on her face?\n<image>"}, {"from": "gpt", "value": "she is smiling. "}]
00000006102.jpg
6102_evjvqa
[{"from": "human", "value": "what costume is she wearing?\n<image>"}, {"from": "gpt", "value": "she is wearing the traditional white ao dai costume. "}]
00000006102.jpg
6102_evjvqa
[{"from": "human", "value": "what is in her hands?\n<image>"}, {"from": "gpt", "value": "she is holding a bunch of red flowers in her hands. "}]
00000006108.jpg
6108_evjvqa
[{"from": "human", "value": "what is she doing?\n<image>"}, {"from": "gpt", "value": "she is posing by the sea. "}]
00000006108.jpg
6108_evjvqa
[{"from": "human", "value": "what kind of terrains do the vehicles behind the girl cover?\n<image>"}, {"from": "gpt", "value": "the ones behind the girl are seagoing vessels."}]
00000006108.jpg
6108_evjvqa
[{"from": "human", "value": "what vehicles are there behind the girl's back?\n<image>"}, {"from": "gpt", "value": "there are boats behind her back."}]
00000006109.jpg
6109_evjvqa
[{"from": "human", "value": "what is she doing?\n<image>"}, {"from": "gpt", "value": "she is on the phone. "}]
00000006109.jpg
6109_evjvqa
[{"from": "human", "value": "what does the female shopkeeper sell?\n<image>"}, {"from": "gpt", "value": "the female shopkeeper sells various types of dried fruits. "}]
00000006109.jpg
6109_evjvqa
[{"from": "human", "value": "what is she wearing?\n<image>"}, {"from": "gpt", "value": "she is wearing a black and white polka dot set of clothes."}]
00000006111.jpg
6111_evjvqa
[{"from": "human", "value": "what is the expression on his face?\n<image>"}, {"from": "gpt", "value": "he is smiling. "}]
00000006111.jpg
6111_evjvqa
[{"from": "human", "value": "where is he?\n<image>"}, {"from": "gpt", "value": "he is standing by the pool."}]
00000006111.jpg
6111_evjvqa
[{"from": "human", "value": "what color is his tie?\n<image>"}, {"from": "gpt", "value": "it is black. "}]
00000006112.jpg
6112_evjvqa
[{"from": "human", "value": "how many people are sitting on the bridge?\n<image>"}, {"from": "gpt", "value": "there are ten people sitting on the bridge. "}]
00000006112.jpg
6112_evjvqa
[{"from": "human", "value": "what color is the shirt worn by the security guard sitting on land?\n<image>"}, {"from": "gpt", "value": "it is blue. "}]
00000006115.jpg
6115_evjvqa
[{"from": "human", "value": "what are the two women doing?\n<image>"}, {"from": "gpt", "value": "the two women are picking vegetables. "}]
00000006115.jpg
6115_evjvqa
[{"from": "human", "value": "what kind of vegetable is the woman in black and purple t-shirt checking?\n<image>"}, {"from": "gpt", "value": "the woman in black and purple is checking the tomatoes."}]
00000006115.jpg
6115_evjvqa
[{"from": "human", "value": "how many women are browsing at the vegetable stall?\n<image>"}, {"from": "gpt", "value": "there are two women browsing at the vegetable stall. "}]
00000006116.jpg
6116_evjvqa
[{"from": "human", "value": "what are they doing?\n<image>"}, {"from": "gpt", "value": "they are browsing at a shopping area. "}]
00000006116.jpg
6116_evjvqa
[{"from": "human", "value": "what color is the top worn by the woman checking the t-shirts?\n<image>"}, {"from": "gpt", "value": "it is a black, white and red striped t-shirt. "}]
00000006116.jpg
6116_evjvqa
[{"from": "human", "value": "what color is the tarpaulin covering the roof of the shopping area?\n<image>"}, {"from": "gpt", "value": "it is yellow. "}]
00000006120.jpg
6120_evjvqa
[{"from": "human", "value": "what is the man doing?\n<image>"}, {"from": "gpt", "value": "he is sorting the dried herbs."}]
00000006120.jpg
6120_evjvqa
[{"from": "human", "value": "what color is the man' shirt?\n<image>"}, {"from": "gpt", "value": "the man is wearing a blue shirt. "}]
00000006122.jpg
6122_evjvqa
[{"from": "human", "value": "how many men are driving on the road?\n<image>"}, {"from": "gpt", "value": "there is one man driving on the road. "}]
00000006122.jpg
6122_evjvqa
[{"from": "human", "value": "what color is the motorbike that the man is driving?\n<image>"}, {"from": "gpt", "value": "he is driving a black and white motorbike."}]
00000006127.jpg
6127_evjvqa
[{"from": "human", "value": "what color is the shirt worn by the woman checking the vacuum flask counter?\n<image>"}, {"from": "gpt", "value": "her shirt is moss green."}]
00000006127.jpg
6127_evjvqa
[{"from": "human", "value": "how many women are carrying a white handbag?\n<image>"}, {"from": "gpt", "value": "there is one woman carrying a white handbag."}]
00000006128.jpg
6128_evjvqa
[{"from": "human", "value": "how many headlights does the train have?\n<image>"}, {"from": "gpt", "value": "the train has two yellow headlights. "}]
00000006130.jpg
6130_evjvqa
[{"from": "human", "value": "what is the dominant color of the couple's costume?\n<image>"}, {"from": "gpt", "value": "the couple's costume is pastel blue. "}]
00000006130.jpg
6130_evjvqa
[{"from": "human", "value": "what is the expression on the man and woman's faces?\n<image>"}, {"from": "gpt", "value": "the man and woman are smiling."}]
00000006130.jpg
6130_evjvqa
[{"from": "human", "value": "what is the couple doing?\n<image>"}, {"from": "gpt", "value": "the couple is holding hands. "}]
00000006133.jpg
6133_evjvqa
[{"from": "human", "value": "what is the man in blue shirt doing?\n<image>"}, {"from": "gpt", "value": "he is selling iron supplies."}]
00000006133.jpg
6133_evjvqa
[{"from": "human", "value": "what is the expression on the man sitting in the picture's face?\n<image>"}, {"from": "gpt", "value": "the man seems cheerful. "}]
00000006133.jpg
6133_evjvqa
[{"from": "human", "value": "how many men in pink shirt are there?\n<image>"}, {"from": "gpt", "value": "there is one man. "}]
00000006142.jpg
6142_evjvqa
[{"from": "human", "value": "what is the boy in blue doing?\n<image>"}, {"from": "gpt", "value": "the boy in blue is sitting and writing a document. "}]
00000006142.jpg
6142_evjvqa
[{"from": "human", "value": "how many women in white shirt are greeting guests?\n<image>"}, {"from": "gpt", "value": "there are two women."}]
00000006142.jpg
6142_evjvqa
[{"from": "human", "value": "what is the two women in white shirt doing?\n<image>"}, {"from": "gpt", "value": "they are greeting guests."}]
00000006143.jpg
6143_evjvqa
[{"from": "human", "value": "what color is the female employee's uniform?\n<image>"}, {"from": "gpt", "value": "her uniform is black."}]
00000006143.jpg
6143_evjvqa
[{"from": "human", "value": "how many men in yellow shirt are standing in the agency?\n<image>"}, {"from": "gpt", "value": "there is one man in yellow shirt standing in the agency. "}]
00000006146.jpg
6146_evjvqa
[{"from": "human", "value": "what are they doing?\n<image>"}, {"from": "gpt", "value": "they are conducting trade at a market."}]
00000006146.jpg
6146_evjvqa
[{"from": "human", "value": "what is the expression on the woman in dark green's face?\n<image>"}, {"from": "gpt", "value": "the woman in dark green is smiling."}]
00000006146.jpg
6146_evjvqa
[{"from": "human", "value": "what kind of roof tiles are they?\n<image>"}, {"from": "gpt", "value": "they are plain roof tiles."}]
00000006148.jpg
6148_evjvqa
[{"from": "human", "value": "what is the boy in blue shirt doing?\n<image>"}, {"from": "gpt", "value": "he is reading a notice."}]
00000006148.jpg
6148_evjvqa
[{"from": "human", "value": "how many women in red shirt are standing in front of the house?\n<image>"}, {"from": "gpt", "value": "there is one woman in red shirt standing in front of the house."}]
00000006150.jpg
6150_evjvqa
[{"from": "human", "value": "what is the woman in purple pants doing?\n<image>"}, {"from": "gpt", "value": "the woman in purple pants is shopping."}]
00000006150.jpg
6150_evjvqa
[{"from": "human", "value": "what are they doing?\n<image>"}, {"from": "gpt", "value": "they are conducting trade."}]
00000006150.jpg
6150_evjvqa
[{"from": "human", "value": "what color are the high heels worn by the woman in black shirt?\n<image>"}, {"from": "gpt", "value": "they are black. "}]
00000006153.jpg
6153_evjvqa
[{"from": "human", "value": "what is the boy doing?\n<image>"}, {"from": "gpt", "value": "he is scaling the fish."}]
00000006153.jpg
6153_evjvqa
[{"from": "human", "value": "what color are the boy's rubber boots?\n<image>"}, {"from": "gpt", "value": "the boy is wearing a pair of blue rubber boots."}]
00000006153.jpg
6153_evjvqa
[{"from": "human", "value": "what color is the boy' shirt?\n<image>"}, {"from": "gpt", "value": "it is red."}]
00000006155.jpg
6155_evjvqa
[{"from": "human", "value": "what color are the girls' shirts?\n<image>"}, {"from": "gpt", "value": "the girls' shirts are white."}]
00000006155.jpg
6155_evjvqa
[{"from": "human", "value": "what are the girls doing?\n<image>"}, {"from": "gpt", "value": "they are stacking hands together."}]
00000006155.jpg
6155_evjvqa
[{"from": "human", "value": "what are the girls' expressions?\n<image>"}, {"from": "gpt", "value": "they are smiling broadly."}]
00000006158.jpg
6158_evjvqa
[{"from": "human", "value": "what color is the shirt worn by the girl standing at the middle of the railway?\n<image>"}, {"from": "gpt", "value": "it is black and white striped."}]
00000006158.jpg
6158_evjvqa
[{"from": "human", "value": "what is the girl at the middle of the railway's facial expression?\n<image>"}, {"from": "gpt", "value": "she is smiling."}]
00000006158.jpg
6158_evjvqa
[{"from": "human", "value": "how many girls are dressed in black skirt?\n<image>"}, {"from": "gpt", "value": "there is one girl dressed in black skirt."}]
00000006160.jpg
6160_evjvqa
[{"from": "human", "value": "what are they doing?\n<image>"}, {"from": "gpt", "value": "they are marching into the hung temple."}]
00000006160.jpg
6160_evjvqa
[{"from": "human", "value": "what character are they dressing as?\n<image>"}, {"from": "gpt", "value": "they are dressing as soldiers from ancient dynasties."}]
00000006164.jpg
6164_evjvqa
[{"from": "human", "value": "what are they doing?\n<image>"}, {"from": "gpt", "value": "they are shopping at a fruit booth. "}]
00000006164.jpg
6164_evjvqa
[{"from": "human", "value": "what color is the woman standing in the booth' shirt?\n<image>"}, {"from": "gpt", "value": "it is black."}]
00000006164.jpg
6164_evjvqa
[{"from": "human", "value": "what is the expression on the woman dressed in black in the booth's face?\n<image>"}, {"from": "gpt", "value": "the woman dressed in black in the booth is smiling. "}]
00000006166.jpg
6166_evjvqa
[{"from": "human", "value": "what does she have in her hand?\n<image>"}, {"from": "gpt", "value": "she is holding a blue phone in her hand."}]
00000006166.jpg
6166_evjvqa
[{"from": "human", "value": "what color is her beret?\n<image>"}, {"from": "gpt", "value": "her beret is red-orange. "}]
00000006166.jpg
6166_evjvqa
[{"from": "human", "value": "what color are the flowers that the girl is taking pictures with?\n<image>"}, {"from": "gpt", "value": "they are white. "}]
00000006167.jpg
6167_evjvqa
[{"from": "human", "value": "what are they doing?\n<image>"}, {"from": "gpt", "value": "they are shopping in a supermarket."}]
00000006167.jpg
6167_evjvqa
[{"from": "human", "value": "what do men and women in purple shirts do?\n<image>"}, {"from": "gpt", "value": "they are mall staffs. "}]
00000006167.jpg
6167_evjvqa
[{"from": "human", "value": "what color is the mall staff's uniform shirt?\n<image>"}, {"from": "gpt", "value": "it is purple."}]