image
stringlengths 15
15
| id
stringlengths 8
12
| conversations
stringlengths 79
268
|
---|---|---|
00000009552.jpg
|
9552_evjvqa
|
[{"from": "human", "value": "what color is the shopping basket of this supermarket?\n<image>"}, {"from": "gpt", "value": "the shopping basket of this supermarket is red"}]
|
00000009555.jpg
|
9555_evjvqa
|
[{"from": "human", "value": "what project is this?\n<image>"}, {"from": "gpt", "value": "this is a cable car project"}]
|
00000009558.jpg
|
9558_evjvqa
|
[{"from": "human", "value": "what color is the shirt that this person wears?\n<image>"}, {"from": "gpt", "value": "this person wears black shirt"}]
|
00000009559.jpg
|
9559_evjvqa
|
[{"from": "human", "value": "how many boats are there in total?\n<image>"}, {"from": "gpt", "value": "there are four boats in total"}]
|
00000009560.jpg
|
9560_evjvqa
|
[{"from": "human", "value": "what are these counters used for?\n<image>"}, {"from": "gpt", "value": "these counters are used for displaying products"}]
|
00000009561.jpg
|
9561_evjvqa
|
[{"from": "human", "value": "what does this store use to advertise its products?\n<image>"}, {"from": "gpt", "value": "this store uses photos of models to advertise its products"}]
|
00000009564.jpg
|
9564_evjvqa
|
[{"from": "human", "value": "what color is the car?\n<image>"}, {"from": "gpt", "value": "the car is orange"}]
|
00000009566.jpg
|
9566_evjvqa
|
[{"from": "human", "value": "what is this woman doing?\n<image>"}, {"from": "gpt", "value": "this woman is picking water lily"}]
|
00000009567.jpg
|
9567_evjvqa
|
[{"from": "human", "value": "what do these three women sell?\n<image>"}, {"from": "gpt", "value": "these three women sell vegetables"}]
|
00000009568.jpg
|
9568_evjvqa
|
[{"from": "human", "value": "how many ships are offshore?\n<image>"}, {"from": "gpt", "value": "there are four ships offshore"}]
|
00000009569.jpg
|
9569_evjvqa
|
[{"from": "human", "value": "what color is the shirt that the man at the highest floor of the building wears?\n<image>"}, {"from": "gpt", "value": "the man at the highest floor of the building wears white shirt"}]
|
00000009570.jpg
|
9570_evjvqa
|
[{"from": "human", "value": "what color are the leaves that this cake dish is wrapped in?\n<image>"}, {"from": "gpt", "value": "this cake dish is wrapped in green leaves"}]
|
00000009571.jpg
|
9571_evjvqa
|
[{"from": "human", "value": "what animal is the mascot representing this shop?\n<image>"}, {"from": "gpt", "value": "the mascot representing this shop is bee"}]
|
00000009572.jpg
|
9572_evjvqa
|
[{"from": "human", "value": "what color are the outfits that the employees of this restaurant wear?\n<image>"}, {"from": "gpt", "value": "the employees of this restaurant wear red outfits"}]
|
00000009573.jpg
|
9573_evjvqa
|
[{"from": "human", "value": "what color is the shirt that the woman in the field wears?\n<image>"}, {"from": "gpt", "value": "the woman in the field wears red shirt"}]
|
00000009574.jpg
|
9574_evjvqa
|
[{"from": "human", "value": "what is the man doing?\n<image>"}, {"from": "gpt", "value": "the man is considering which product to buy"}]
|
00000009575.jpg
|
9575_evjvqa
|
[{"from": "human", "value": "what is everyone doing?\n<image>"}, {"from": "gpt", "value": "everyone is selecting products to buy"}]
|
00000009576.jpg
|
9576_evjvqa
|
[{"from": "human", "value": "what color is the umbrella that the person cycling uses?\n<image>"}, {"from": "gpt", "value": "the person cycling uses red umbrella"}]
|
00000009578.jpg
|
9578_evjvqa
|
[{"from": "human", "value": "what is the person wearing black face mask doing?\n<image>"}, {"from": "gpt", "value": "the person wearing a black face mask is climbing over the fence"}]
|
00000009579.jpg
|
9579_evjvqa
|
[{"from": "human", "value": "what is the man doing?\n<image>"}, {"from": "gpt", "value": "the man is using the stick to get the product the woman wants to see"}]
|
00000009580.jpg
|
9580_evjvqa
|
[{"from": "human", "value": "what is the woman in white shirt doing?\n<image>"}, {"from": "gpt", "value": "the woman in white shirt is arranging goods"}]
|
00000009583.jpg
|
9583_evjvqa
|
[{"from": "human", "value": "what are these?\n<image>"}, {"from": "gpt", "value": "these are the counters in the supermarket"}]
|
00000009584.jpg
|
9584_evjvqa
|
[{"from": "human", "value": "what color is this construction?\n<image>"}, {"from": "gpt", "value": "this construction is purple"}]
|
00000009587.jpg
|
9587_evjvqa
|
[{"from": "human", "value": "what color is the main color of this store?\n<image>"}, {"from": "gpt", "value": "the main color of this store is yellow"}]
|
00000009588.jpg
|
9588_evjvqa
|
[{"from": "human", "value": "what color is the shirt that the man driving motorbike is wearing?\n<image>"}, {"from": "gpt", "value": "the man driving motorbike is wearing white shirt"}]
|
00000009589.jpg
|
9589_evjvqa
|
[{"from": "human", "value": "what is the woman looking at?\n<image>"}, {"from": "gpt", "value": "the woman is looking at the airplane on display"}]
|
00000009593.jpg
|
9593_evjvqa
|
[{"from": "human", "value": "how many people want to buy goods from the woman in orange shirt?\n<image>"}, {"from": "gpt", "value": "four people want to buy goods from the woman in orange shirt"}]
|
00000009594.jpg
|
9594_evjvqa
|
[{"from": "human", "value": "what color is the handbag that the woman is wearing?\n<image>"}, {"from": "gpt", "value": "the woman is wearing gray handbag"}]
|
00000009598.jpg
|
9598_evjvqa
|
[{"from": "human", "value": "what is the main means of transportation on this road?\n<image>"}, {"from": "gpt", "value": "the main means of transportation on this road is motorbike"}]
|
00000003002.jpg
|
3002_evjvqa
|
[{"from": "human", "value": "what events are these people participating in?\n<image>"}, {"from": "gpt", "value": "participating in the opening ceremony of a supermarket"}]
|
00000003002.jpg
|
3002_evjvqa
|
[{"from": "human", "value": "what event is this supermarket organizing?\n<image>"}, {"from": "gpt", "value": "organizing an opening ceremony"}]
|
00000003004.jpg
|
3004_evjvqa
|
[{"from": "human", "value": "what outfit is this woman wearing?\n<image>"}, {"from": "gpt", "value": "wearing a violet dress"}]
|
00000003004.jpg
|
3004_evjvqa
|
[{"from": "human", "value": "where is this lady standing?\n<image>"}, {"from": "gpt", "value": "in an ecotourism centre"}]
|
00000003004.jpg
|
3004_evjvqa
|
[{"from": "human", "value": "what is this lady doing?\n<image>"}, {"from": "gpt", "value": "crossing a bridge"}]
|
00000003008.jpg
|
3008_evjvqa
|
[{"from": "human", "value": "by what means of transportation does the girl wearing pink t-shirt move?\n<image>"}, {"from": "gpt", "value": "by motorbike"}]
|
00000003008.jpg
|
3008_evjvqa
|
[{"from": "human", "value": "what is the helmet girl doing?\n<image>"}, {"from": "gpt", "value": "choosing christmas decorations"}]
|
00000003012.jpg
|
3012_evjvqa
|
[{"from": "human", "value": "where are these stalls laid out?\n<image>"}, {"from": "gpt", "value": "in the campus in front of a building"}]
|
00000003015.jpg
|
3015_evjvqa
|
[{"from": "human", "value": "what is the woman in the white shirt doing?\n<image>"}, {"from": "gpt", "value": "standing and posing for the male black shirt friend to take photos"}]
|
00000003015.jpg
|
3015_evjvqa
|
[{"from": "human", "value": "what is the man in the black shirt doing?\n<image>"}, {"from": "gpt", "value": "taking photos for a girl in white"}]
|
00000003015.jpg
|
3015_evjvqa
|
[{"from": "human", "value": "where is the white shirt girl standing for taking pictures?\n<image>"}, {"from": "gpt", "value": "next to a booth"}]
|
00000003015.jpg
|
3015_evjvqa
|
[{"from": "human", "value": "who is the male friend holding the camera to take photos for?\n<image>"}, {"from": "gpt", "value": "for the girl who wears a white t-shirt"}]
|
00000003015.jpg
|
3015_evjvqa
|
[{"from": "human", "value": "who is the male friend holding the camera going with?\n<image>"}, {"from": "gpt", "value": "with a girl friend who wears a white t-shirt"}]
|
00000003015.jpg
|
3015_evjvqa
|
[{"from": "human", "value": "who is the female friend in the white shirt with?\n<image>"}, {"from": "gpt", "value": "with a male friend holding a camera"}]
|
00000003017.jpg
|
3017_evjvqa
|
[{"from": "human", "value": "where are these three girls standing?\n<image>"}, {"from": "gpt", "value": "in a shopping centre"}]
|
00000003017.jpg
|
3017_evjvqa
|
[{"from": "human", "value": "with whom is the girl friend wearing a pink shirt shopping?\n<image>"}, {"from": "gpt", "value": "with two girls in black"}]
|
00000003017.jpg
|
3017_evjvqa
|
[{"from": "human", "value": "what are two girls wearing black shirts doing?\n<image>"}, {"from": "gpt", "value": "selecting a product"}]
|
00000003017.jpg
|
3017_evjvqa
|
[{"from": "human", "value": "what is the girl in pink doing?\n<image>"}, {"from": "gpt", "value": "browsing stuffs"}]
|
00000003017.jpg
|
3017_evjvqa
|
[{"from": "human", "value": "who is the girl in the pink shirt standing with?\n<image>"}, {"from": "gpt", "value": "with two girls in black"}]
|
00000003021.jpg
|
3021_evjvqa
|
[{"from": "human", "value": "where is this female friend sitting?\n<image>"}, {"from": "gpt", "value": "in a bookshop"}]
|
00000003021.jpg
|
3021_evjvqa
|
[{"from": "human", "value": "what is the girl friend doing?\n<image>"}, {"from": "gpt", "value": "holding a book"}]
|
00000003021.jpg
|
3021_evjvqa
|
[{"from": "human", "value": "where is the girl reading books?\n<image>"}, {"from": "gpt", "value": "in a bookshop"}]
|
00000003024.jpg
|
3024_evjvqa
|
[{"from": "human", "value": "what events are these people attending?\n<image>"}, {"from": "gpt", "value": "attending a traditional festival"}]
|
00000003025.jpg
|
3025_evjvqa
|
[{"from": "human", "value": "what scene is the man holding the camera taking photos of?\n<image>"}, {"from": "gpt", "value": "photographing the peach trees"}]
|
00000003025.jpg
|
3025_evjvqa
|
[{"from": "human", "value": "where is the man with the camera standing?\n<image>"}, {"from": "gpt", "value": "on a street"}]
|
00000003025.jpg
|
3025_evjvqa
|
[{"from": "human", "value": "what is the man with the camera doing?\n<image>"}, {"from": "gpt", "value": "photographing the peach trees"}]
|
00000003028.jpg
|
3028_evjvqa
|
[{"from": "human", "value": "where does this plant grow?\n<image>"}, {"from": "gpt", "value": "on the water"}]
|
00000003035.jpg
|
3035_evjvqa
|
[{"from": "human", "value": "where is this man in black standing?\n<image>"}, {"from": "gpt", "value": "in a gallery room of statues"}]
|
00000003035.jpg
|
3035_evjvqa
|
[{"from": "human", "value": "what artifacts are displayed in this room?\n<image>"}, {"from": "gpt", "value": "display the statues"}]
|
00000003035.jpg
|
3035_evjvqa
|
[{"from": "human", "value": "how many people come to visit the gallery room of statues?\n<image>"}, {"from": "gpt", "value": "there are two people who came to visit"}]
|
00000003035.jpg
|
3035_evjvqa
|
[{"from": "human", "value": "where are these statues exhibited?\n<image>"}, {"from": "gpt", "value": "in the gallery room in a museum"}]
|
00000003037.jpg
|
3037_evjvqa
|
[{"from": "human", "value": "what products does this market sell?\n<image>"}, {"from": "gpt", "value": "selling fruits and vegetables"}]
|
00000003037.jpg
|
3037_evjvqa
|
[{"from": "human", "value": "what do the stalls on either side of the road sell?\n<image>"}, {"from": "gpt", "value": "fruits and vegetables goods"}]
|
00000003037.jpg
|
3037_evjvqa
|
[{"from": "human", "value": "what do people come here mainly to buy?\n<image>"}, {"from": "gpt", "value": "to buy fruits and vegetables"}]
|
00000003037.jpg
|
3037_evjvqa
|
[{"from": "human", "value": "what do people sell on either side of the aisle?\n<image>"}, {"from": "gpt", "value": "fruits and vegetables"}]
|
00000003037.jpg
|
3037_evjvqa
|
[{"from": "human", "value": "where are these fruit and vegetable bundles laid out?\n<image>"}, {"from": "gpt", "value": "on the stalls on both sides of the road"}]
|
00000003041.jpg
|
3041_evjvqa
|
[{"from": "human", "value": "where is this path built?\n<image>"}, {"from": "gpt", "value": "on the sea surface"}]
|
00000003041.jpg
|
3041_evjvqa
|
[{"from": "human", "value": "where does this path lead to?\n<image>"}, {"from": "gpt", "value": "lead to the sea"}]
|
00000003042.jpg
|
3042_evjvqa
|
[{"from": "human", "value": "what products are on display in this area?\n<image>"}, {"from": "gpt", "value": "mainly washing machines"}]
|
00000003042.jpg
|
3042_evjvqa
|
[{"from": "human", "value": "what products are sold in the booth area?\n<image>"}, {"from": "gpt", "value": "sell washing machines"}]
|
00000003042.jpg
|
3042_evjvqa
|
[{"from": "human", "value": "where are these washing machines sold?\n<image>"}, {"from": "gpt", "value": "in an electronics supermarket"}]
|
00000003045.jpg
|
3045_evjvqa
|
[{"from": "human", "value": "what is the woman in black doing?\n<image>"}, {"from": "gpt", "value": "choosing fruit"}]
|
00000003045.jpg
|
3045_evjvqa
|
[{"from": "human", "value": "what stand is the woman in black standing at?\n<image>"}, {"from": "gpt", "value": "the fruit stand"}]
|
00000003045.jpg
|
3045_evjvqa
|
[{"from": "human", "value": "how many people are standing to choose fruit near the woman in black?\n<image>"}, {"from": "gpt", "value": "there are three people also standing and choosing fruit"}]
|
00000003049.jpg
|
3049_evjvqa
|
[{"from": "human", "value": "what products does this shelf sell?\n<image>"}, {"from": "gpt", "value": "selling juice"}]
|
00000003050.jpg
|
3050_evjvqa
|
[{"from": "human", "value": "what products does this shelf sell?\n<image>"}, {"from": "gpt", "value": "fruits, vegetables"}]
|
00000003050.jpg
|
3050_evjvqa
|
[{"from": "human", "value": "what kind of products are on the trolley?\n<image>"}, {"from": "gpt", "value": "fresh fruits and vegetables"}]
|
00000003053.jpg
|
3053_evjvqa
|
[{"from": "human", "value": "where are these people going sightseeing?\n<image>"}, {"from": "gpt", "value": "in a stilt house"}]
|
00000003053.jpg
|
3053_evjvqa
|
[{"from": "human", "value": "where are these tourists going sightseeing?\n<image>"}, {"from": "gpt", "value": "in a stilt house"}]
|
00000003054.jpg
|
3054_evjvqa
|
[{"from": "human", "value": "what is the woman in the gray shirt doing?\n<image>"}, {"from": "gpt", "value": "choosing to buy a suitcase"}]
|
00000003054.jpg
|
3054_evjvqa
|
[{"from": "human", "value": "what is the woman in red doing?\n<image>"}, {"from": "gpt", "value": "preparing suitcases for customers to see"}]
|
00000003054.jpg
|
3054_evjvqa
|
[{"from": "human", "value": "what kind of goods is the place where the woman in the gray shirt is standing selling?\n<image>"}, {"from": "gpt", "value": "selling all kinds of suitcases"}]
|
00000003054.jpg
|
3054_evjvqa
|
[{"from": "human", "value": "what is the woman going to buy?\n<image>"}, {"from": "gpt", "value": "buy suitcases"}]
|
00000003054.jpg
|
3054_evjvqa
|
[{"from": "human", "value": "what items are displayed where the woman in the red shirt is sitting?\n<image>"}, {"from": "gpt", "value": "display suitcases"}]
|
00000003054.jpg
|
3054_evjvqa
|
[{"from": "human", "value": "what products does this shop sell?\n<image>"}, {"from": "gpt", "value": "this store mainly sells suitcases"}]
|
00000003055.jpg
|
3055_evjvqa
|
[{"from": "human", "value": "what's wrong with this road?\n<image>"}, {"from": "gpt", "value": "being flooded"}]
|
00000003055.jpg
|
3055_evjvqa
|
[{"from": "human", "value": "what is the man in green raincoat doing?\n<image>"}, {"from": "gpt", "value": "walking with the motorbike"}]
|
00000003055.jpg
|
3055_evjvqa
|
[{"from": "human", "value": "what vehicle does the man in the green raincoat use?\n<image>"}, {"from": "gpt", "value": "by motorbike"}]
|
00000003055.jpg
|
3055_evjvqa
|
[{"from": "human", "value": "why does the man in the green raincoat have to lead the motorbike?\n<image>"}, {"from": "gpt", "value": "because the road is flooded, motorbikes cannot run"}]
|
00000003057.jpg
|
3057_evjvqa
|
[{"from": "human", "value": "what are these women doing?\n<image>"}, {"from": "gpt", "value": "picking fruit from the tree"}]
|
00000003057.jpg
|
3057_evjvqa
|
[{"from": "human", "value": "how many people are standing picking fruit?\n<image>"}, {"from": "gpt", "value": "there are four ladies standing picking fruit"}]
|
00000003057.jpg
|
3057_evjvqa
|
[{"from": "human", "value": "what fruit are these four women picking?\n<image>"}, {"from": "gpt", "value": "picking oranges"}]
|
00000003057.jpg
|
3057_evjvqa
|
[{"from": "human", "value": "where are these four women standing?\n<image>"}, {"from": "gpt", "value": "in a fruit garden"}]
|
00000003057.jpg
|
3057_evjvqa
|
[{"from": "human", "value": "what are these women wearing?\n<image>"}, {"from": "gpt", "value": "wearing south vietnamese pajama"}]
|
00000003062.jpg
|
3062_evjvqa
|
[{"from": "human", "value": "what products does this stall sell?\n<image>"}, {"from": "gpt", "value": "sell clothes"}]
|
00000003062.jpg
|
3062_evjvqa
|
[{"from": "human", "value": "what products does this stall display?\n<image>"}, {"from": "gpt", "value": "display clothes"}]
|
00000003062.jpg
|
3062_evjvqa
|
[{"from": "human", "value": "what do the sitting women come here to buy?\n<image>"}, {"from": "gpt", "value": "come to buy clothes"}]
|
00000003062.jpg
|
3062_evjvqa
|
[{"from": "human", "value": "how many young boys come to buy clothes?\n<image>"}, {"from": "gpt", "value": "there is a guy who came to buy clothes"}]
|
00000003062.jpg
|
3062_evjvqa
|
[{"from": "human", "value": "where is this clothing stall set up?\n<image>"}, {"from": "gpt", "value": "in a market"}]
|
00000003063.jpg
|
3063_evjvqa
|
[{"from": "human", "value": "where are these people sitting?\n<image>"}, {"from": "gpt", "value": "sitting on a mountainside"}]
|
00000003064.jpg
|
3064_evjvqa
|
[{"from": "human", "value": "what is the man in black doing?\n<image>"}, {"from": "gpt", "value": "taking a picture of a little girl"}]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.