image
stringlengths 15
15
| id
stringlengths 8
12
| conversations
stringlengths 79
268
|
---|---|---|
00000004883.jpg
|
4883_evjvqa
|
[{"from": "human", "value": "what color is the car parked by the street?\n<image>"}, {"from": "gpt", "value": "the car is white"}]
|
00000004887.jpg
|
4887_evjvqa
|
[{"from": "human", "value": "on which hand is the girl wearing the watch?\n<image>"}, {"from": "gpt", "value": "the girl is wearing the watch on her left hand"}]
|
00000004887.jpg
|
4887_evjvqa
|
[{"from": "human", "value": "what is the man leaning against?\n<image>"}, {"from": "gpt", "value": "leaning against the wall"}]
|
00000004888.jpg
|
4888_evjvqa
|
[{"from": "human", "value": "what is this area?\n<image>"}, {"from": "gpt", "value": "this is a seaport"}]
|
00000004888.jpg
|
4888_evjvqa
|
[{"from": "human", "value": "how many big ships are docked in the seaport?\n<image>"}, {"from": "gpt", "value": "there are two big ship docked in the seaport"}]
|
00000004888.jpg
|
4888_evjvqa
|
[{"from": "human", "value": "what color are the cranes?\n<image>"}, {"from": "gpt", "value": "the cranes are yellow"}]
|
00000004889.jpg
|
4889_evjvqa
|
[{"from": "human", "value": "how many christmas trees are in the mall lobby?\n<image>"}, {"from": "gpt", "value": "there is one christmas tree in the mall lobby"}]
|
00000004889.jpg
|
4889_evjvqa
|
[{"from": "human", "value": "how many reindeer figures are hanged on the mall?\n<image>"}, {"from": "gpt", "value": "there are two reindeer figures"}]
|
00000004889.jpg
|
4889_evjvqa
|
[{"from": "human", "value": "what color suit is santa claus wearing?\n<image>"}, {"from": "gpt", "value": "wearing red suit"}]
|
00000004890.jpg
|
4890_evjvqa
|
[{"from": "human", "value": "whose hand is the kid holding?\n<image>"}, {"from": "gpt", "value": "holding the hand of the man in red shirt"}]
|
00000004890.jpg
|
4890_evjvqa
|
[{"from": "human", "value": "by which hand is the kid holding the bag?\n<image>"}, {"from": "gpt", "value": "the kid is holding the bag by right hand"}]
|
00000004892.jpg
|
4892_evjvqa
|
[{"from": "human", "value": "what are the children doing?\n<image>"}, {"from": "gpt", "value": "lining up to get home"}]
|
00000004892.jpg
|
4892_evjvqa
|
[{"from": "human", "value": "how many lines are the children lining up?\n<image>"}, {"from": "gpt", "value": "lining up in two lines"}]
|
00000004893.jpg
|
4893_evjvqa
|
[{"from": "human", "value": "how many bikes are running in the street?\n<image>"}, {"from": "gpt", "value": "there are three bikes running in the street?"}]
|
00000004893.jpg
|
4893_evjvqa
|
[{"from": "human", "value": "where is the bike carrying two people running?\n<image>"}, {"from": "gpt", "value": "running between the other two bikes"}]
|
00000004893.jpg
|
4893_evjvqa
|
[{"from": "human", "value": "what color is the roadside building?\n<image>"}, {"from": "gpt", "value": "the building is white"}]
|
00000004896.jpg
|
4896_evjvqa
|
[{"from": "human", "value": "what is this place?\n<image>"}, {"from": "gpt", "value": "this is a walking street"}]
|
00000004896.jpg
|
4896_evjvqa
|
[{"from": "human", "value": "what color shirt is the man holding a camera wearing?\n<image>"}, {"from": "gpt", "value": "wearing a blue shirt"}]
|
00000004896.jpg
|
4896_evjvqa
|
[{"from": "human", "value": "what color shirt is the girl holding a phone to take a picture wearing?\n<image>"}, {"from": "gpt", "value": "wearing a black shirt"}]
|
00000004897.jpg
|
4897_evjvqa
|
[{"from": "human", "value": "how many ferrys are docking?\n<image>"}, {"from": "gpt", "value": "there are two ferrys docking"}]
|
00000004898.jpg
|
4898_evjvqa
|
[{"from": "human", "value": "what is this place?\n<image>"}, {"from": "gpt", "value": "this is a gallery"}]
|
00000004898.jpg
|
4898_evjvqa
|
[{"from": "human", "value": "how many types of book are placed on the table?\n<image>"}, {"from": "gpt", "value": "there are three types of book placed on the table"}]
|
00000004898.jpg
|
4898_evjvqa
|
[{"from": "human", "value": "what is displayed on the wall?\n<image>"}, {"from": "gpt", "value": "displayed on the wall are the pictures"}]
|
00000004899.jpg
|
4899_evjvqa
|
[{"from": "human", "value": "how many people are on the canoe?\n<image>"}, {"from": "gpt", "value": "there are three people on the canoe"}]
|
00000004899.jpg
|
4899_evjvqa
|
[{"from": "human", "value": "which side is the boatman paddling?\n<image>"}, {"from": "gpt", "value": "paddling on the left"}]
|
00000004899.jpg
|
4899_evjvqa
|
[{"from": "human", "value": "where is canoe going?\n<image>"}, {"from": "gpt", "value": "going on a river in the jungle"}]
|
00000000400.jpg
|
400_evjvqa
|
[{"from": "human", "value": "how many people are posing for a photo?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000400.jpg
|
400_evjvqa
|
[{"from": "human", "value": "what color dress is the sitting woman wearing?\n<image>"}, {"from": "gpt", "value": "black"}]
|
00000000401.jpg
|
401_evjvqa
|
[{"from": "human", "value": "how many people are standing on the bridge?\n<image>"}, {"from": "gpt", "value": "no one is standing on the bridge"}]
|
00000000401.jpg
|
401_evjvqa
|
[{"from": "human", "value": "how many people are on the boat?\n<image>"}, {"from": "gpt", "value": "no one is on the boat"}]
|
00000000401.jpg
|
401_evjvqa
|
[{"from": "human", "value": "how many people are posing for a photo on the bridge?\n<image>"}, {"from": "gpt", "value": "no one is posing for a photo"}]
|
00000000402.jpg
|
402_evjvqa
|
[{"from": "human", "value": "how many bowls of fish sauce are there?\n<image>"}, {"from": "gpt", "value": "two bowls"}]
|
00000000402.jpg
|
402_evjvqa
|
[{"from": "human", "value": "how many dishes are on the table?\n<image>"}, {"from": "gpt", "value": "two dishes"}]
|
00000000402.jpg
|
402_evjvqa
|
[{"from": "human", "value": "how many bowls of fish sauce with chili are there?\n<image>"}, {"from": "gpt", "value": "two bowls"}]
|
00000000405.jpg
|
405_evjvqa
|
[{"from": "human", "value": "what is the girl wearing conical hat sitting on the roof doing?\n<image>"}, {"from": "gpt", "value": "posing for a photo"}]
|
00000000405.jpg
|
405_evjvqa
|
[{"from": "human", "value": "how many people wearing conical hat are in the upper area?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000405.jpg
|
405_evjvqa
|
[{"from": "human", "value": "how many bags does the girl wearing conical hat carry?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000405.jpg
|
405_evjvqa
|
[{"from": "human", "value": "what color shirt is the girl wearing conical hat sitting in the upper area?\n<image>"}, {"from": "gpt", "value": "blue"}]
|
00000000406.jpg
|
406_evjvqa
|
[{"from": "human", "value": "how many people are buying food?\n<image>"}, {"from": "gpt", "value": "no one"}]
|
00000000406.jpg
|
406_evjvqa
|
[{"from": "human", "value": "how many kids are playing at the food court?\n<image>"}, {"from": "gpt", "value": "no one"}]
|
00000000406.jpg
|
406_evjvqa
|
[{"from": "human", "value": "how many women come to buy food?\n<image>"}, {"from": "gpt", "value": "no woman comes to buy food"}]
|
00000000409.jpg
|
409_evjvqa
|
[{"from": "human", "value": "how many people are coming to burn incense?\n<image>"}, {"from": "gpt", "value": "three"}]
|
00000000409.jpg
|
409_evjvqa
|
[{"from": "human", "value": "how many people are bowing in front of the censer?\n<image>"}, {"from": "gpt", "value": "two"}]
|
00000000410.jpg
|
410_evjvqa
|
[{"from": "human", "value": "how many vehicles are parked on the roadside?\n<image>"}, {"from": "gpt", "value": "two vehicles are parked on the roadside"}]
|
00000000411.jpg
|
411_evjvqa
|
[{"from": "human", "value": "how many people does the girl visit with?\n<image>"}, {"from": "gpt", "value": "she visits alone"}]
|
00000000411.jpg
|
411_evjvqa
|
[{"from": "human", "value": "how many people does the girl pose for a photo with?\n<image>"}, {"from": "gpt", "value": "she poses alone"}]
|
00000000413.jpg
|
413_evjvqa
|
[{"from": "human", "value": "how many people are going into the clinic?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000416.jpg
|
416_evjvqa
|
[{"from": "human", "value": "what is the supermarket staff doing?\n<image>"}, {"from": "gpt", "value": "rearranging goods"}]
|
00000000416.jpg
|
416_evjvqa
|
[{"from": "human", "value": "how many staffs are standing at the counter?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000416.jpg
|
416_evjvqa
|
[{"from": "human", "value": "what products does this counter sell?\n<image>"}, {"from": "gpt", "value": "shampoo, shower gell"}]
|
00000000416.jpg
|
416_evjvqa
|
[{"from": "human", "value": "what color uniform do the staff here wear?\n<image>"}, {"from": "gpt", "value": "pink"}]
|
00000000417.jpg
|
417_evjvqa
|
[{"from": "human", "value": "where are these stores located?\n<image>"}, {"from": "gpt", "value": "ba dinh - ha noi"}]
|
00000000418.jpg
|
418_evjvqa
|
[{"from": "human", "value": "where is the girl sitting?\n<image>"}, {"from": "gpt", "value": "in a room in a tower"}]
|
00000000418.jpg
|
418_evjvqa
|
[{"from": "human", "value": "what is the sitting girl doing?\n<image>"}, {"from": "gpt", "value": "scrolling on her phone"}]
|
00000000423.jpg
|
423_evjvqa
|
[{"from": "human", "value": "what items does this store sell?\n<image>"}, {"from": "gpt", "value": "christmas decorations"}]
|
00000000423.jpg
|
423_evjvqa
|
[{"from": "human", "value": "what is this store hanging in the front?\n<image>"}, {"from": "gpt", "value": "christmas decorations"}]
|
00000000426.jpg
|
426_evjvqa
|
[{"from": "human", "value": "how many people are cheering on the table?\n<image>"}, {"from": "gpt", "value": "three"}]
|
00000000426.jpg
|
426_evjvqa
|
[{"from": "human", "value": "how many people are sitting at the same table and cheering?\n<image>"}, {"from": "gpt", "value": "three"}]
|
00000000426.jpg
|
426_evjvqa
|
[{"from": "human", "value": "ưhat are the three men at the same table doing?\n<image>"}, {"from": "gpt", "value": "cheering"}]
|
00000000430.jpg
|
430_evjvqa
|
[{"from": "human", "value": "how many stalls are there?\n<image>"}, {"from": "gpt", "value": "three"}]
|
00000000430.jpg
|
430_evjvqa
|
[{"from": "human", "value": "how many stalls are having customers?\n<image>"}, {"from": "gpt", "value": "two"}]
|
00000000430.jpg
|
430_evjvqa
|
[{"from": "human", "value": "what items do the stalls on the right sell?\n<image>"}, {"from": "gpt", "value": "clothes"}]
|
00000000432.jpg
|
432_evjvqa
|
[{"from": "human", "value": "where are the people sitting and eating?\n<image>"}, {"from": "gpt", "value": "railwayside"}]
|
00000000432.jpg
|
432_evjvqa
|
[{"from": "human", "value": "how many people are walking on the railway?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000432.jpg
|
432_evjvqa
|
[{"from": "human", "value": "where are these restaurants located?\n<image>"}, {"from": "gpt", "value": "both sides of the railway"}]
|
00000000434.jpg
|
434_evjvqa
|
[{"from": "human", "value": "which direction are the coast guard looking?\n<image>"}, {"from": "gpt", "value": "towards the direction of two far away ships"}]
|
00000000438.jpg
|
438_evjvqa
|
[{"from": "human", "value": "what does this painting depict?\n<image>"}, {"from": "gpt", "value": "a scene on a rural street corner"}]
|
00000000438.jpg
|
438_evjvqa
|
[{"from": "human", "value": "how many people are riding a cyclo in the paining?\n<image>"}, {"from": "gpt", "value": "one person is riding a cyclo"}]
|
00000000443.jpg
|
443_evjvqa
|
[{"from": "human", "value": "how many bowls of porridge are there?\n<image>"}, {"from": "gpt", "value": "three"}]
|
00000000443.jpg
|
443_evjvqa
|
[{"from": "human", "value": "how many bowls of fish sauce are used?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000443.jpg
|
443_evjvqa
|
[{"from": "human", "value": "how many bowls of porridge are there for children?\n<image>"}, {"from": "gpt", "value": "one"}]
|
00000000443.jpg
|
443_evjvqa
|
[{"from": "human", "value": "how many bowls of porridge are there for aldults?\n<image>"}, {"from": "gpt", "value": "two"}]
|
00000000445.jpg
|
445_evjvqa
|
[{"from": "human", "value": "where is this scene recorded?\n<image>"}, {"from": "gpt", "value": "in a supermarket"}]
|
00000000446.jpg
|
446_evjvqa
|
[{"from": "human", "value": "how many people are on the boat?\n<image>"}, {"from": "gpt", "value": "two"}]
|
00000000446.jpg
|
446_evjvqa
|
[{"from": "human", "value": "what is the man doing?\n<image>"}, {"from": "gpt", "value": "sailing"}]
|
00000000446.jpg
|
446_evjvqa
|
[{"from": "human", "value": "how is the woman dressed?\n<image>"}, {"from": "gpt", "value": "blouse and bandana"}]
|
00000000446.jpg
|
446_evjvqa
|
[{"from": "human", "value": "how is the man dressed?\n<image>"}, {"from": "gpt", "value": "blue blouse"}]
|
00000000446.jpg
|
446_evjvqa
|
[{"from": "human", "value": "where are these two sitting?\n<image>"}, {"from": "gpt", "value": "on a boat on the water"}]
|
00000000447.jpg
|
447_evjvqa
|
[{"from": "human", "value": "how many people are on the boat carrying watermelons?\n<image>"}, {"from": "gpt", "value": "two"}]
|
00000000447.jpg
|
447_evjvqa
|
[{"from": "human", "value": "what is the boat with two people on the right side carrying?\n<image>"}, {"from": "gpt", "value": "watermelons"}]
|
00000000447.jpg
|
447_evjvqa
|
[{"from": "human", "value": "how many boats are on the river?\n<image>"}, {"from": "gpt", "value": "two"}]
|
00000000448.jpg
|
448_evjvqa
|
[{"from": "human", "value": "how long does the promotion last?\n<image>"}, {"from": "gpt", "value": "seven days"}]
|
00000000448.jpg
|
448_evjvqa
|
[{"from": "human", "value": "what promotion is currently running at the supermarket?\n<image>"}, {"from": "gpt", "value": "7 golden days"}]
|
00000000448.jpg
|
448_evjvqa
|
[{"from": "human", "value": "what color shirt uniform does the security guard of the supermarket wear?\n<image>"}, {"from": "gpt", "value": "white"}]
|
00000000448.jpg
|
448_evjvqa
|
[{"from": "human", "value": "how many people are shopping in the supermarket?\n<image>"}, {"from": "gpt", "value": "no one is shopping"}]
|
00000000453.jpg
|
453_evjvqa
|
[{"from": "human", "value": "what items are the passersby stopping by to buy?\n<image>"}, {"from": "gpt", "value": "flowers"}]
|
00000000453.jpg
|
453_evjvqa
|
[{"from": "human", "value": "what do these stalls sell?\n<image>"}, {"from": "gpt", "value": "flowers"}]
|
00000000453.jpg
|
453_evjvqa
|
[{"from": "human", "value": "what is the flower seller in the gray jacket doing?\n<image>"}, {"from": "gpt", "value": "giving flowers to the customer"}]
|
00000000455.jpg
|
455_evjvqa
|
[{"from": "human", "value": "where is the girl in the white dress being photographed?\n<image>"}, {"from": "gpt", "value": "by the railway line"}]
|
00000000455.jpg
|
455_evjvqa
|
[{"from": "human", "value": "how many people is the girl in the white dress being photographed with?\n<image>"}, {"from": "gpt", "value": "she is being photographed alone"}]
|
00000000455.jpg
|
455_evjvqa
|
[{"from": "human", "value": "how is the girl standing by the railway line dressed?\n<image>"}, {"from": "gpt", "value": "white dress"}]
|
00000000456.jpg
|
456_evjvqa
|
[{"from": "human", "value": "where are the men standing?\n<image>"}, {"from": "gpt", "value": "in the supermarket"}]
|
00000000456.jpg
|
456_evjvqa
|
[{"from": "human", "value": "what color shirt is the woman behind the man in the dark blue shirt wearing?\n<image>"}, {"from": "gpt", "value": "yellow"}]
|
00000000456.jpg
|
456_evjvqa
|
[{"from": "human", "value": "how many people wear light blue uniforms?\n<image>"}, {"from": "gpt", "value": "four"}]
|
00000000456.jpg
|
456_evjvqa
|
[{"from": "human", "value": "what are the people in light blue uniforms doing?\n<image>"}, {"from": "gpt", "value": "viewing goods"}]
|
00000000457.jpg
|
457_evjvqa
|
[{"from": "human", "value": "where is the group of students sitting?\n<image>"}, {"from": "gpt", "value": "in the bus"}]
|
00000000457.jpg
|
457_evjvqa
|
[{"from": "human", "value": "what does the group of students wear on their hands?\n<image>"}, {"from": "gpt", "value": "green ribbon"}]
|
00000000457.jpg
|
457_evjvqa
|
[{"from": "human", "value": "what color is the uniform of the group of students?\n<image>"}, {"from": "gpt", "value": "orange"}]
|
00000000458.jpg
|
458_evjvqa
|
[{"from": "human", "value": "where is this scene recorded?\n<image>"}, {"from": "gpt", "value": "in a rock cave"}]
|
00000000460.jpg
|
460_evjvqa
|
[{"from": "human", "value": "what is the woman in the green shirt right behind the group of people doing?\n<image>"}, {"from": "gpt", "value": "grilling the food"}]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.