image
stringlengths 15
15
| id
stringlengths 8
12
| conversations
stringlengths 79
268
|
---|---|---|
00000002770.jpg
|
2770_evjvqa
|
[{"from": "human", "value": "how many potted plants are placed on the window?\n<image>"}, {"from": "gpt", "value": "five"}]
|
00000002770.jpg
|
2770_evjvqa
|
[{"from": "human", "value": "what color are the walls of the store painted?\n<image>"}, {"from": "gpt", "value": "white"}]
|
00000002770.jpg
|
2770_evjvqa
|
[{"from": "human", "value": "where are five same potted plants placed side by side at this bar?\n<image>"}, {"from": "gpt", "value": "on the window"}]
|
00000002771.jpg
|
2771_evjvqa
|
[{"from": "human", "value": "what color materials are the decorations hanging high on the road made with?\n<image>"}, {"from": "gpt", "value": "yellow"}]
|
00000002771.jpg
|
2771_evjvqa
|
[{"from": "human", "value": "what color t-shirt is the person riding the bicycle on the road?\n<image>"}, {"from": "gpt", "value": "black"}]
|
00000002774.jpg
|
2774_evjvqa
|
[{"from": "human", "value": "what kind of vehicles are parked on the side of the road?\n<image>"}, {"from": "gpt", "value": "the car"}]
|
00000002777.jpg
|
2777_evjvqa
|
[{"from": "human", "value": "how do two young men wear the same clothes?\n<image>"}, {"from": "gpt", "value": "they are also wearing black clothes"}]
|
00000002777.jpg
|
2777_evjvqa
|
[{"from": "human", "value": "wh are two young men in black shirts walking on the street?\n<image>"}, {"from": "gpt", "value": "they are walking by foot"}]
|
00000002777.jpg
|
2777_evjvqa
|
[{"from": "human", "value": "which of the two young people walking with the hat?\n<image>"}, {"from": "gpt", "value": "the young man is on the left is wearing a hat"}]
|
00000002779.jpg
|
2779_evjvqa
|
[{"from": "human", "value": "how many female customers are sitting in the coffee store?\n<image>"}, {"from": "gpt", "value": "two"}]
|
00000002779.jpg
|
2779_evjvqa
|
[{"from": "human", "value": "what color is the wall of the cafe painted?\n<image>"}, {"from": "gpt", "value": "yellow"}]
|
00000002779.jpg
|
2779_evjvqa
|
[{"from": "human", "value": "what are the red things hung on the wall in front of the cafe?\n<image>"}, {"from": "gpt", "value": "the lanterns"}]
|
00000002785.jpg
|
2785_evjvqa
|
[{"from": "human", "value": "what items are on the wrist of young men?\n<image>"}, {"from": "gpt", "value": "the watch"}]
|
00000002785.jpg
|
2785_evjvqa
|
[{"from": "human", "value": "what is the boy wearing the glasses doing?\n<image>"}, {"from": "gpt", "value": "he is shopping in the market"}]
|
00000002785.jpg
|
2785_evjvqa
|
[{"from": "human", "value": "what color frame are the glasses the young man is wearing?\n<image>"}, {"from": "gpt", "value": "black"}]
|
00000002786.jpg
|
2786_evjvqa
|
[{"from": "human", "value": "what is the woman standing there for?\n<image>"}, {"from": "gpt", "value": "she is giving a speech"}]
|
00000002786.jpg
|
2786_evjvqa
|
[{"from": "human", "value": "how many media mics are used at this podium?\n<image>"}, {"from": "gpt", "value": "five"}]
|
00000002789.jpg
|
2789_evjvqa
|
[{"from": "human", "value": "what kind of hat is the man standing wearing?\n<image>"}, {"from": "gpt", "value": "the cap"}]
|
00000002789.jpg
|
2789_evjvqa
|
[{"from": "human", "value": "what kind of hats do the women at the market wear?\n<image>"}, {"from": "gpt", "value": "the conical hats"}]
|
00000002789.jpg
|
2789_evjvqa
|
[{"from": "human", "value": "what is the pose of the man in the cream beige cap?\n<image>"}, {"from": "gpt", "value": "he is standing"}]
|
00000002790.jpg
|
2790_evjvqa
|
[{"from": "human", "value": "what is the girl standing next to?\n<image>"}, {"from": "gpt", "value": "the chirstmas tree"}]
|
00000002790.jpg
|
2790_evjvqa
|
[{"from": "human", "value": "what kind of costume is the girl standing next to the christmas tree wearing?\n<image>"}, {"from": "gpt", "value": "she is wearing the pink t-shirt and the black skirt"}]
|
00000002790.jpg
|
2790_evjvqa
|
[{"from": "human", "value": "who is standing next to the chirstmas tree?\n<image>"}, {"from": "gpt", "value": "the girl is in the pink t-shirt anf the black skirt"}]
|
00000002799.jpg
|
2799_evjvqa
|
[{"from": "human", "value": "what are these men wearing headphones for?\n<image>"}, {"from": "gpt", "value": "to listen to explanations of the works of art"}]
|
00000002799.jpg
|
2799_evjvqa
|
[{"from": "human", "value": "which are the men's eyes with the headphones on?\n<image>"}, {"from": "gpt", "value": "the arts is on the wall"}]
|
00000002799.jpg
|
2799_evjvqa
|
[{"from": "human", "value": "what equipment are the men with their eyes on the wall art wearing on their heads?\n<image>"}, {"from": "gpt", "value": "the headphones"}]
|
00000006301.jpg
|
6301_evjvqa
|
[{"from": "human", "value": "how many people in blue are driving on the street?\n<image>"}, {"from": "gpt", "value": "there is one person in blue driving on the street."}]
|
00000006301.jpg
|
6301_evjvqa
|
[{"from": "human", "value": "how many bicycles are parked on the side of the street?\n<image>"}, {"from": "gpt", "value": "there is one bicycle parked on the side of the street."}]
|
00000006301.jpg
|
6301_evjvqa
|
[{"from": "human", "value": "where is the bicycle parked?\n<image>"}, {"from": "gpt", "value": "on the side of the street."}]
|
00000006306.jpg
|
6306_evjvqa
|
[{"from": "human", "value": "what is he doing?\n<image>"}, {"from": "gpt", "value": "he is painting."}]
|
00000006306.jpg
|
6306_evjvqa
|
[{"from": "human", "value": "what color is his shirt?\n<image>"}, {"from": "gpt", "value": "it is black."}]
|
00000006306.jpg
|
6306_evjvqa
|
[{"from": "human", "value": "what color are his sneakers?\n<image>"}, {"from": "gpt", "value": "they are black."}]
|
00000006307.jpg
|
6307_evjvqa
|
[{"from": "human", "value": "what color is the house that they are standing next to?\n<image>"}, {"from": "gpt", "value": "the house is yellow."}]
|
00000006307.jpg
|
6307_evjvqa
|
[{"from": "human", "value": "what color is the lantern?\n<image>"}, {"from": "gpt", "value": "it is grey."}]
|
00000006307.jpg
|
6307_evjvqa
|
[{"from": "human", "value": "how many women in white dresses and pink hats are there?\n<image>"}, {"from": "gpt", "value": "there is one woman in white dress and pink hat."}]
|
00000006308.jpg
|
6308_evjvqa
|
[{"from": "human", "value": "what color is her bracelet?\n<image>"}, {"from": "gpt", "value": "it is black."}]
|
00000006308.jpg
|
6308_evjvqa
|
[{"from": "human", "value": "what color is her hat?\n<image>"}, {"from": "gpt", "value": "it is beige."}]
|
00000006308.jpg
|
6308_evjvqa
|
[{"from": "human", "value": "what color is her shirt?\n<image>"}, {"from": "gpt", "value": "it is pink."}]
|
00000006314.jpg
|
6314_evjvqa
|
[{"from": "human", "value": "what color is her umbrella?\n<image>"}, {"from": "gpt", "value": "it is pink."}]
|
00000006314.jpg
|
6314_evjvqa
|
[{"from": "human", "value": "how many young girls are walking on the street?\n<image>"}, {"from": "gpt", "value": "there are two young girls walking on the street."}]
|
00000006314.jpg
|
6314_evjvqa
|
[{"from": "human", "value": "what color are the bricks of the temple?\n<image>"}, {"from": "gpt", "value": "they are pink."}]
|
00000006317.jpg
|
6317_evjvqa
|
[{"from": "human", "value": "what color are the staffs' uniforms?\n<image>"}, {"from": "gpt", "value": "they are red."}]
|
00000006317.jpg
|
6317_evjvqa
|
[{"from": "human", "value": "what color is the customer's handbag?\n<image>"}, {"from": "gpt", "value": "it is black."}]
|
00000006317.jpg
|
6317_evjvqa
|
[{"from": "human", "value": "what are they doing?\n<image>"}, {"from": "gpt", "value": "they are conducting trade at the shopping center."}]
|
00000006318.jpg
|
6318_evjvqa
|
[{"from": "human", "value": "what are they doing?\n<image>"}, {"from": "gpt", "value": "they are paddling."}]
|
00000006318.jpg
|
6318_evjvqa
|
[{"from": "human", "value": "what is the girl in white's facial expression?\n<image>"}, {"from": "gpt", "value": "she is smiling."}]
|
00000006318.jpg
|
6318_evjvqa
|
[{"from": "human", "value": "what color are the canoes?\n<image>"}, {"from": "gpt", "value": "they are yellow."}]
|
00000006323.jpg
|
6323_evjvqa
|
[{"from": "human", "value": "what is the woman in white doing?\n<image>"}, {"from": "gpt", "value": "she is selling rice noodles."}]
|
00000006323.jpg
|
6323_evjvqa
|
[{"from": "human", "value": "how many people are wearing raincoats?\n<image>"}, {"from": "gpt", "value": "there is one person wearing a raincoat."}]
|
00000006323.jpg
|
6323_evjvqa
|
[{"from": "human", "value": "what color is the woman in white's mask?\n<image>"}, {"from": "gpt", "value": "it is pink."}]
|
00000006324.jpg
|
6324_evjvqa
|
[{"from": "human", "value": "what is he doing?\n<image>"}, {"from": "gpt", "value": "he is making a billboard."}]
|
00000006324.jpg
|
6324_evjvqa
|
[{"from": "human", "value": "what color is the hat that he is wearing?\n<image>"}, {"from": "gpt", "value": "it is black."}]
|
00000006324.jpg
|
6324_evjvqa
|
[{"from": "human", "value": "what color is his hat?\n<image>"}, {"from": "gpt", "value": "it is black."}]
|
00000006325.jpg
|
6325_evjvqa
|
[{"from": "human", "value": "what kind of clothing is the woman delivering the speech wearing?\n<image>"}, {"from": "gpt", "value": "she is wearing the traditional ao dai dress."}]
|
00000006325.jpg
|
6325_evjvqa
|
[{"from": "human", "value": "what color is the woman giving the speech's outfit?\n<image>"}, {"from": "gpt", "value": "it is red. "}]
|
00000006325.jpg
|
6325_evjvqa
|
[{"from": "human", "value": "what color is the female speaker's ao dai?\n<image>"}, {"from": "gpt", "value": "it is red."}]
|
00000006329.jpg
|
6329_evjvqa
|
[{"from": "human", "value": "what is the woman in orange holding in her hands?\n<image>"}, {"from": "gpt", "value": "she is holding two fish packages."}]
|
00000006330.jpg
|
6330_evjvqa
|
[{"from": "human", "value": "what is the man in white doing?\n<image>"}, {"from": "gpt", "value": "he is lighting the incense."}]
|
00000006330.jpg
|
6330_evjvqa
|
[{"from": "human", "value": "how many girls in blue are there?\n<image>"}, {"from": "gpt", "value": "there is one girl in blue."}]
|
00000006330.jpg
|
6330_evjvqa
|
[{"from": "human", "value": "what color is the man lighting the incense' shirt?\n<image>"}, {"from": "gpt", "value": "it is white. "}]
|
00000006336.jpg
|
6336_evjvqa
|
[{"from": "human", "value": "how many men in black shirt are there?\n<image>"}, {"from": "gpt", "value": "there is one man in black shirt."}]
|
00000006336.jpg
|
6336_evjvqa
|
[{"from": "human", "value": "what color is the coat that the woman in red is wearing?\n<image>"}, {"from": "gpt", "value": "her coat is red."}]
|
00000006336.jpg
|
6336_evjvqa
|
[{"from": "human", "value": "how many men in white are there?\n<image>"}, {"from": "gpt", "value": "there is one man in white. "}]
|
00000006337.jpg
|
6337_evjvqa
|
[{"from": "human", "value": "what is she doing?\n<image>"}, {"from": "gpt", "value": "she is carrying the twin baskets."}]
|
00000006337.jpg
|
6337_evjvqa
|
[{"from": "human", "value": "what is his facial expression?\n<image>"}, {"from": "gpt", "value": "he is smiling."}]
|
00000006337.jpg
|
6337_evjvqa
|
[{"from": "human", "value": "what color is her ao dai dress?\n<image>"}, {"from": "gpt", "value": "it is white."}]
|
00000006340.jpg
|
6340_evjvqa
|
[{"from": "human", "value": "what color is her shirt?\n<image>"}, {"from": "gpt", "value": "she is wearing a brown shirt."}]
|
00000006340.jpg
|
6340_evjvqa
|
[{"from": "human", "value": "what type of shirt is she wearing?\n<image>"}, {"from": "gpt", "value": "she is wearing a brown vietnamese silk ensemble."}]
|
00000006340.jpg
|
6340_evjvqa
|
[{"from": "human", "value": "what is she holding in her hand?\n<image>"}, {"from": "gpt", "value": "she is holding a conical hat."}]
|
00000006345.jpg
|
6345_evjvqa
|
[{"from": "human", "value": "what is she doing?\n<image>"}, {"from": "gpt", "value": "she is pitching products to clients."}]
|
00000006345.jpg
|
6345_evjvqa
|
[{"from": "human", "value": "what color is the sneakers worn by the man in black shirt?\n<image>"}, {"from": "gpt", "value": "they are black."}]
|
00000006345.jpg
|
6345_evjvqa
|
[{"from": "human", "value": "how many men in black are there?\n<image>"}, {"from": "gpt", "value": "there is one man in black."}]
|
00000006353.jpg
|
6353_evjvqa
|
[{"from": "human", "value": "what are they doing?\n<image>"}, {"from": "gpt", "value": "they are gazing at the statue."}]
|
00000006353.jpg
|
6353_evjvqa
|
[{"from": "human", "value": "what color is the shirt that the boy at the front of the line is wearing?\n<image>"}, {"from": "gpt", "value": "it is red."}]
|
00000006353.jpg
|
6353_evjvqa
|
[{"from": "human", "value": "how many girls are smiling with hand over their mouths?\n<image>"}, {"from": "gpt", "value": "there is one girl smiling with hand over her mouth."}]
|
00000006355.jpg
|
6355_evjvqa
|
[{"from": "human", "value": "how many boys are there?\n<image>"}, {"from": "gpt", "value": "there are four boys."}]
|
00000006355.jpg
|
6355_evjvqa
|
[{"from": "human", "value": "what is the color of the boy standing on the bricks' shirt?\n<image>"}, {"from": "gpt", "value": "it is brown."}]
|
00000006355.jpg
|
6355_evjvqa
|
[{"from": "human", "value": "what is the boy in white shirt and black pants doing?\n<image>"}, {"from": "gpt", "value": "he is climbing up the pile of bricks."}]
|
00000006362.jpg
|
6362_evjvqa
|
[{"from": "human", "value": "what color is the parked car?\n<image>"}, {"from": "gpt", "value": "it is black. "}]
|
00000006362.jpg
|
6362_evjvqa
|
[{"from": "human", "value": "how many black cars are there?\n<image>"}, {"from": "gpt", "value": "there is one black car."}]
|
00000006362.jpg
|
6362_evjvqa
|
[{"from": "human", "value": "how many motorbikes are parked in front of the door?\n<image>"}, {"from": "gpt", "value": "there is one motorbike parked in front of the door. "}]
|
00000006365.jpg
|
6365_evjvqa
|
[{"from": "human", "value": "what are they doing?\n<image>"}, {"from": "gpt", "value": "they are standing in front of the statues."}]
|
00000006365.jpg
|
6365_evjvqa
|
[{"from": "human", "value": "what color is the shirt that the woman standing at the front of the line is wearing?\n<image>"}, {"from": "gpt", "value": "it is pink."}]
|
00000006365.jpg
|
6365_evjvqa
|
[{"from": "human", "value": "what color are the flowers that the girls are holding?\n<image>"}, {"from": "gpt", "value": "they are yellow."}]
|
00000006367.jpg
|
6367_evjvqa
|
[{"from": "human", "value": "what color is the old woman' shirt?\n<image>"}, {"from": "gpt", "value": "it is white."}]
|
00000006367.jpg
|
6367_evjvqa
|
[{"from": "human", "value": "what color is her bag?\n<image>"}, {"from": "gpt", "value": "it is green."}]
|
00000006367.jpg
|
6367_evjvqa
|
[{"from": "human", "value": "what color are her pants?\n<image>"}, {"from": "gpt", "value": "they are blue. "}]
|
00000006369.jpg
|
6369_evjvqa
|
[{"from": "human", "value": "where are they sitting on?\n<image>"}, {"from": "gpt", "value": "they are sitting on the red boat."}]
|
00000006369.jpg
|
6369_evjvqa
|
[{"from": "human", "value": "what color is the boat on which they are sitting?\n<image>"}, {"from": "gpt", "value": "it is red."}]
|
00000006369.jpg
|
6369_evjvqa
|
[{"from": "human", "value": "how many boys in white are there?\n<image>"}, {"from": "gpt", "value": "there is one boy in white."}]
|
00000006378.jpg
|
6378_evjvqa
|
[{"from": "human", "value": "how many men are sitting?\n<image>"}, {"from": "gpt", "value": "there are four men sitting."}]
|
00000006378.jpg
|
6378_evjvqa
|
[{"from": "human", "value": "how many plant pots are being displayed?\n<image>"}, {"from": "gpt", "value": "there is one plant pot being displayed. "}]
|
00000006378.jpg
|
6378_evjvqa
|
[{"from": "human", "value": "what color is the flowers in the plant pot?\n<image>"}, {"from": "gpt", "value": "they are purple. "}]
|
00000006381.jpg
|
6381_evjvqa
|
[{"from": "human", "value": "how many male drivers are wearing red helmets?\n<image>"}, {"from": "gpt", "value": "there is one man wearing a red helmet. "}]
|
00000006381.jpg
|
6381_evjvqa
|
[{"from": "human", "value": "how many boys are checking their phones?\n<image>"}, {"from": "gpt", "value": "there is one boy checking his phone. "}]
|
00000006383.jpg
|
6383_evjvqa
|
[{"from": "human", "value": "what is the two girls' expression?\n<image>"}, {"from": "gpt", "value": "the two girls are smiling."}]
|
00000006383.jpg
|
6383_evjvqa
|
[{"from": "human", "value": "what color are the girl in white' shoes?\n<image>"}, {"from": "gpt", "value": "they are grey."}]
|
00000006383.jpg
|
6383_evjvqa
|
[{"from": "human", "value": "what color are the girl in yellow' shoes?\n<image>"}, {"from": "gpt", "value": "they are red."}]
|
00000006396.jpg
|
6396_evjvqa
|
[{"from": "human", "value": "what is he doing?\n<image>"}, {"from": "gpt", "value": "he is feeding fish."}]
|
00000006396.jpg
|
6396_evjvqa
|
[{"from": "human", "value": "what color are his flip-flops?\n<image>"}, {"from": "gpt", "value": "he is wearing a pair of black flip-flops."}]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.