image
stringlengths 15
15
| id
stringlengths 8
12
| conversations
stringlengths 79
268
|
|---|---|---|
00000000209.jpg
|
209_evjvqa
|
[{"from": "human", "value": "how many towers are captured?\n<image>"}, {"from": "gpt", "value": "one tower"}]
|
00000000209.jpg
|
209_evjvqa
|
[{"from": "human", "value": "how many people are visiting the tower?\n<image>"}, {"from": "gpt", "value": "one person"}]
|
00000000209.jpg
|
209_evjvqa
|
[{"from": "human", "value": "how many pillars are erected outside the tower?\n<image>"}, {"from": "gpt", "value": "two pillars"}]
|
00000000209.jpg
|
209_evjvqa
|
[{"from": "human", "value": "what is the outfit of the girl standing at the foot of the tower?\n<image>"}, {"from": "gpt", "value": "wearing non la and a white dress"}]
|
00000000210.jpg
|
210_evjvqa
|
[{"from": "human", "value": "how many people are sitting on the boats?\n<image>"}, {"from": "gpt", "value": "two people"}]
|
00000000210.jpg
|
210_evjvqa
|
[{"from": "human", "value": "how many boats are carrying people?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000210.jpg
|
210_evjvqa
|
[{"from": "human", "value": "how many running boats are there?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000215.jpg
|
215_evjvqa
|
[{"from": "human", "value": "how many paintings are hanging on the wall?\n<image>"}, {"from": "gpt", "value": "three"}]
|
00000000215.jpg
|
215_evjvqa
|
[{"from": "human", "value": "how many people come to view the paintings?\n<image>"}, {"from": "gpt", "value": "one person"}]
|
00000000215.jpg
|
215_evjvqa
|
[{"from": "human", "value": "how many men come to view the paintings?\n<image>"}, {"from": "gpt", "value": "nobody comes to view the paintings"}]
|
00000000215.jpg
|
215_evjvqa
|
[{"from": "human", "value": "how many women come to view the paintings?\n<image>"}, {"from": "gpt", "value": "one person"}]
|
00000000216.jpg
|
216_evjvqa
|
[{"from": "human", "value": "how many people are visiting the temple?\n<image>"}, {"from": "gpt", "value": "nobody"}]
|
00000000219.jpg
|
219_evjvqa
|
[{"from": "human", "value": "how many children are in the stroller?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000219.jpg
|
219_evjvqa
|
[{"from": "human", "value": "what items does this stall area sell?\n<image>"}, {"from": "gpt", "value": "fast food"}]
|
00000000219.jpg
|
219_evjvqa
|
[{"from": "human", "value": "how many families use trolleys to buy things?\n<image>"}, {"from": "gpt", "value": "three families"}]
|
00000000222.jpg
|
222_evjvqa
|
[{"from": "human", "value": "how many people are standing to buy things in the store?\n<image>"}, {"from": "gpt", "value": "two people"}]
|
00000000222.jpg
|
222_evjvqa
|
[{"from": "human", "value": "how many people are in the shop?\n<image>"}, {"from": "gpt", "value": "three people"}]
|
00000000222.jpg
|
222_evjvqa
|
[{"from": "human", "value": "what is the person in the blue shirt doing?\n<image>"}, {"from": "gpt", "value": "choosing goods"}]
|
00000000222.jpg
|
222_evjvqa
|
[{"from": "human", "value": "where is the shopkeeper sitting?\n<image>"}, {"from": "gpt", "value": "next to the person wearing a white shirt dotted with black stars"}]
|
00000000223.jpg
|
223_evjvqa
|
[{"from": "human", "value": "how many yellow shirt staff are standing near the dining table in front of the rock?\n<image>"}, {"from": "gpt", "value": "one person"}]
|
00000000224.jpg
|
224_evjvqa
|
[{"from": "human", "value": "what is the man in an orange hat wearing a striped shirt doing?\n<image>"}, {"from": "gpt", "value": "buying a bread"}]
|
00000000226.jpg
|
226_evjvqa
|
[{"from": "human", "value": "where is this scene recorded?\n<image>"}, {"from": "gpt", "value": "at a historic site"}]
|
00000000226.jpg
|
226_evjvqa
|
[{"from": "human", "value": "where are these people standing?\n<image>"}, {"from": "gpt", "value": "standing at a historical site"}]
|
00000000229.jpg
|
229_evjvqa
|
[{"from": "human", "value": "how many balloons are being used to decorate the shelves?\n<image>"}, {"from": "gpt", "value": "four"}]
|
00000000229.jpg
|
229_evjvqa
|
[{"from": "human", "value": "what products are mainly displayed at the opposite shelves?\n<image>"}, {"from": "gpt", "value": "shower gels"}]
|
00000000231.jpg
|
231_evjvqa
|
[{"from": "human", "value": "how many people are moving in the alley?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000232.jpg
|
232_evjvqa
|
[{"from": "human", "value": "where is the girl wearing a white dress?\n<image>"}, {"from": "gpt", "value": "on a rooftop"}]
|
00000000234.jpg
|
234_evjvqa
|
[{"from": "human", "value": "where is this scene recorded?\n<image>"}, {"from": "gpt", "value": "in front of ben thanh market"}]
|
00000000234.jpg
|
234_evjvqa
|
[{"from": "human", "value": "what location was this scene recorded at?\n<image>"}, {"from": "gpt", "value": "in front of ben thanh market"}]
|
00000000236.jpg
|
236_evjvqa
|
[{"from": "human", "value": "where is the woman going sightseeing?\n<image>"}, {"from": "gpt", "value": "in a mangrove forest"}]
|
00000000236.jpg
|
236_evjvqa
|
[{"from": "human", "value": "what means of transport is the woman going sightseeing by?\n<image>"}, {"from": "gpt", "value": "the woman is traveling by canoe"}]
|
00000000236.jpg
|
236_evjvqa
|
[{"from": "human", "value": "where is the lady sitting?\n<image>"}, {"from": "gpt", "value": "on a canoe"}]
|
00000000237.jpg
|
237_evjvqa
|
[{"from": "human", "value": "how many people are buying vegetables?\n<image>"}, {"from": "gpt", "value": "one person"}]
|
00000000237.jpg
|
237_evjvqa
|
[{"from": "human", "value": "what stalls are these?\n<image>"}, {"from": "gpt", "value": "fruits and vegetables stall"}]
|
00000000237.jpg
|
237_evjvqa
|
[{"from": "human", "value": "how many fruit and vegetable stall owners are selling to the customers?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000238.jpg
|
238_evjvqa
|
[{"from": "human", "value": "where is the woman standing?\n<image>"}, {"from": "gpt", "value": "at a historic site"}]
|
00000000238.jpg
|
238_evjvqa
|
[{"from": "human", "value": "where is the woman going sightseeing?\n<image>"}, {"from": "gpt", "value": "inside a historic site"}]
|
00000000238.jpg
|
238_evjvqa
|
[{"from": "human", "value": "how does the woman standing in the historical site dress?\n<image>"}, {"from": "gpt", "value": "a purple ao dai, wearing non la"}]
|
00000000241.jpg
|
241_evjvqa
|
[{"from": "human", "value": "what is the woman in the black hat doing?\n<image>"}, {"from": "gpt", "value": "picking clothes"}]
|
00000000241.jpg
|
241_evjvqa
|
[{"from": "human", "value": "what color of the hat is the woman picking clothes wearing?\n<image>"}, {"from": "gpt", "value": "black"}]
|
00000000241.jpg
|
241_evjvqa
|
[{"from": "human", "value": "what is the woman carrying a white handbag doing?\n<image>"}, {"from": "gpt", "value": "picking clothes"}]
|
00000000241.jpg
|
241_evjvqa
|
[{"from": "human", "value": "how many people are making choice of clothes with the woman in the black hat?\n<image>"}, {"from": "gpt", "value": "no one else"}]
|
00000000241.jpg
|
241_evjvqa
|
[{"from": "human", "value": "how many people are standing picking clothes?\n<image>"}, {"from": "gpt", "value": "one person"}]
|
00000000241.jpg
|
241_evjvqa
|
[{"from": "human", "value": "what commodities does the woman standing to choose clothes sale?\n<image>"}, {"from": "gpt", "value": "trousers, shirts, fashion hats"}]
|
00000000242.jpg
|
242_evjvqa
|
[{"from": "human", "value": "how many people are standing to narrate the statue?\n<image>"}, {"from": "gpt", "value": "one person"}]
|
00000000242.jpg
|
242_evjvqa
|
[{"from": "human", "value": "what is the woman in white shirt and black pants with microphone doing?\n<image>"}, {"from": "gpt", "value": "narrating the statue"}]
|
00000000242.jpg
|
242_evjvqa
|
[{"from": "human", "value": "how many people are holding the phone?\n<image>"}, {"from": "gpt", "value": "one person"}]
|
00000000242.jpg
|
242_evjvqa
|
[{"from": "human", "value": "how many statues are being shown?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000242.jpg
|
242_evjvqa
|
[{"from": "human", "value": "what are the people standing in the gallery doing?\n<image>"}, {"from": "gpt", "value": "listening to the narration for the statue"}]
|
00000000246.jpg
|
246_evjvqa
|
[{"from": "human", "value": "how many people are bathing in the pool?\n<image>"}, {"from": "gpt", "value": "no one"}]
|
00000000246.jpg
|
246_evjvqa
|
[{"from": "human", "value": "what is this man doing?\n<image>"}, {"from": "gpt", "value": "walking around the pool"}]
|
00000000246.jpg
|
246_evjvqa
|
[{"from": "human", "value": "what is the man wearing the hat doing?\n<image>"}, {"from": "gpt", "value": "walking around the pool"}]
|
00000000248.jpg
|
248_evjvqa
|
[{"from": "human", "value": "where is the girl standing?\n<image>"}, {"from": "gpt", "value": "mid-autumn festival lantern shop"}]
|
00000000251.jpg
|
251_evjvqa
|
[{"from": "human", "value": "how many children are sitting at the table?\n<image>"}, {"from": "gpt", "value": "four"}]
|
00000000251.jpg
|
251_evjvqa
|
[{"from": "human", "value": "what things does the child wearing a purple shirt reach out to?\n<image>"}, {"from": "gpt", "value": "bananas"}]
|
00000000251.jpg
|
251_evjvqa
|
[{"from": "human", "value": "what is the girl standing next to the child who is touching the banana doing?\n<image>"}, {"from": "gpt", "value": "standing to see the banh tet cakes"}]
|
00000000251.jpg
|
251_evjvqa
|
[{"from": "human", "value": "how many children are wearing orange shirts?\n<image>"}, {"from": "gpt", "value": "three"}]
|
00000000252.jpg
|
252_evjvqa
|
[{"from": "human", "value": "where is this scene recorded?\n<image>"}, {"from": "gpt", "value": "on a stretch of river"}]
|
00000000252.jpg
|
252_evjvqa
|
[{"from": "human", "value": "where are these people going sightseeing?\n<image>"}, {"from": "gpt", "value": "in a river in an eco-tourism area"}]
|
00000000253.jpg
|
253_evjvqa
|
[{"from": "human", "value": "how many people are in the clothing shop?\n<image>"}, {"from": "gpt", "value": "four people"}]
|
00000000253.jpg
|
253_evjvqa
|
[{"from": "human", "value": "where is the woman in the orange shirt coming out from?\n<image>"}, {"from": "gpt", "value": "from a clothing shop"}]
|
00000000254.jpg
|
254_evjvqa
|
[{"from": "human", "value": "what is the scale weighing?\n<image>"}, {"from": "gpt", "value": "rambutan"}]
|
00000000254.jpg
|
254_evjvqa
|
[{"from": "human", "value": "how many knives are in the fruit stand?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000254.jpg
|
254_evjvqa
|
[{"from": "human", "value": "how many scales are in the fruit stand?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000254.jpg
|
254_evjvqa
|
[{"from": "human", "value": "what kind of products does this stall sell?\n<image>"}, {"from": "gpt", "value": "fruits"}]
|
00000000255.jpg
|
255_evjvqa
|
[{"from": "human", "value": "how many people are standing right next to the tank?\n<image>"}, {"from": "gpt", "value": "one person"}]
|
00000000255.jpg
|
255_evjvqa
|
[{"from": "human", "value": "what things are currently on display in the exhibition area?\n<image>"}, {"from": "gpt", "value": "the tank"}]
|
00000000257.jpg
|
257_evjvqa
|
[{"from": "human", "value": "how many people are on the glass panel?\n<image>"}, {"from": "gpt", "value": "one person"}]
|
00000000257.jpg
|
257_evjvqa
|
[{"from": "human", "value": "where is the girl lying?\n<image>"}, {"from": "gpt", "value": "on the glass panel of a high-rise building"}]
|
00000000257.jpg
|
257_evjvqa
|
[{"from": "human", "value": "how many people are going with the girl?\n<image>"}, {"from": "gpt", "value": "the girl is walking alone"}]
|
00000000261.jpg
|
261_evjvqa
|
[{"from": "human", "value": "how many people are standing to pick goods at the confectionery shelves?\n<image>"}, {"from": "gpt", "value": "three people"}]
|
00000000261.jpg
|
261_evjvqa
|
[{"from": "human", "value": "how many people are coming to view the confectionery shelves right now?\n<image>"}, {"from": "gpt", "value": "three people"}]
|
00000000265.jpg
|
265_evjvqa
|
[{"from": "human", "value": "where is the young man standing?\n<image>"}, {"from": "gpt", "value": "under the tree"}]
|
00000000265.jpg
|
265_evjvqa
|
[{"from": "human", "value": "how many people are standing at the foot of the tree?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000267.jpg
|
267_evjvqa
|
[{"from": "human", "value": "how many people are sitting in the house?\n<image>"}, {"from": "gpt", "value": "five people"}]
|
00000000267.jpg
|
267_evjvqa
|
[{"from": "human", "value": "how many people are entering the house?\n<image>"}, {"from": "gpt", "value": "one person"}]
|
00000000267.jpg
|
267_evjvqa
|
[{"from": "human", "value": "what is the outermost guest in the black shirt doing?\n<image>"}, {"from": "gpt", "value": "drinking water"}]
|
00000000267.jpg
|
267_evjvqa
|
[{"from": "human", "value": "where in the house is the man sitting?\n<image>"}, {"from": "gpt", "value": "between two women"}]
|
00000000267.jpg
|
267_evjvqa
|
[{"from": "human", "value": "how many people are standing in the yard?\n<image>"}, {"from": "gpt", "value": "two people"}]
|
00000000270.jpg
|
270_evjvqa
|
[{"from": "human", "value": "where is the girl in white sitting?\n<image>"}, {"from": "gpt", "value": "outside a bookshop"}]
|
00000000270.jpg
|
270_evjvqa
|
[{"from": "human", "value": "how many people are there in the bookstore?\n<image>"}, {"from": "gpt", "value": "two people"}]
|
00000000270.jpg
|
270_evjvqa
|
[{"from": "human", "value": "how many people is the girl sitting outside the bookstore with?\n<image>"}, {"from": "gpt", "value": "going with no one"}]
|
00000000272.jpg
|
272_evjvqa
|
[{"from": "human", "value": "what is the man in the white shirt doing?\n<image>"}, {"from": "gpt", "value": "going jogging"}]
|
00000000272.jpg
|
272_evjvqa
|
[{"from": "human", "value": "what is the boy in the red striped shirt doing?\n<image>"}, {"from": "gpt", "value": "exercising"}]
|
00000000272.jpg
|
272_evjvqa
|
[{"from": "human", "value": "how many women are exercising?\n<image>"}, {"from": "gpt", "value": "four women"}]
|
00000000272.jpg
|
272_evjvqa
|
[{"from": "human", "value": "what is the man in the white shirt and shorts doing?\n<image>"}, {"from": "gpt", "value": "jogging"}]
|
00000000272.jpg
|
272_evjvqa
|
[{"from": "human", "value": "what color of the shirt is the man jogging wearing?\n<image>"}, {"from": "gpt", "value": "white"}]
|
00000000273.jpg
|
273_evjvqa
|
[{"from": "human", "value": "what do the stalls beside the right pillar sell?\n<image>"}, {"from": "gpt", "value": "tet ornaments"}]
|
00000000274.jpg
|
274_evjvqa
|
[{"from": "human", "value": "what is in the left of the ho chi minh statue?\n<image>"}, {"from": "gpt", "value": "the flag"}]
|
00000000274.jpg
|
274_evjvqa
|
[{"from": "human", "value": "how many people are standing on the podium?\n<image>"}, {"from": "gpt", "value": "two people"}]
|
00000000274.jpg
|
274_evjvqa
|
[{"from": "human", "value": "what color of the shirt are the two people standing on the podium wearing?\n<image>"}, {"from": "gpt", "value": "white"}]
|
00000000282.jpg
|
282_evjvqa
|
[{"from": "human", "value": "where is this scene recorded?\n<image>"}, {"from": "gpt", "value": "in an airport"}]
|
00000000290.jpg
|
290_evjvqa
|
[{"from": "human", "value": "where is the lady sitting?\n<image>"}, {"from": "gpt", "value": "on a tree branch in a forest"}]
|
00000000290.jpg
|
290_evjvqa
|
[{"from": "human", "value": "how do the woman wear clothes?\n<image>"}, {"from": "gpt", "value": "a blue south vietnamese pajama shirt, south vietnamese pajama pants"}]
|
00000000290.jpg
|
290_evjvqa
|
[{"from": "human", "value": "where is the woman?\n<image>"}, {"from": "gpt", "value": "sitting in a forest"}]
|
00000000290.jpg
|
290_evjvqa
|
[{"from": "human", "value": "where is the woman going sightseeing?\n<image>"}, {"from": "gpt", "value": "in a tropical jungle"}]
|
00000000293.jpg
|
293_evjvqa
|
[{"from": "human", "value": "how many vehicles are running on the road?\n<image>"}, {"from": "gpt", "value": "there is a motorbike running on the street"}]
|
00000000295.jpg
|
295_evjvqa
|
[{"from": "human", "value": "what color of the shirt does the woman wearing a non la wear?\n<image>"}, {"from": "gpt", "value": "the woman in a non la is wearing a green shirt"}]
|
00000000295.jpg
|
295_evjvqa
|
[{"from": "human", "value": "how many people wearing non la are moving on the street?\n<image>"}, {"from": "gpt", "value": "there is a person who is wearing non la and moving on the street"}]
|
00000000298.jpg
|
298_evjvqa
|
[{"from": "human", "value": "where are the houses built?\n<image>"}, {"from": "gpt", "value": "on the water"}]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.