Dataset Viewer
Auto-converted to Parquet
problem_id
string
source
string
task_type
string
in_source_id
string
prompt
string
golden_diff
string
verification_info
string
gh_patches_debug_36408
rasdani/github-patches
git_diff
mne-tools__mne-bids-74
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove .gz extension for fif: It's no longer part of the validator Throughout the current state of MNE BIDS, the file ending `.gz` is used as an identifier for FIF files: https://github.com/mne-tools/mne-bids/blob/c73ce744d30be87645e1648754b488f7572307f3/mne_bids/meg_bids.py#L33-L34 Can we change this to `fif.gz`? I am just concerned, because other files can be `.gz` and have nothing to do with FIF. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mne_bids/io.py` Content: ``` 1 """Check whether a file format is supported by BIDS and then load it.""" 2 # Authors: Mainak Jas <[email protected]> 3 # Alexandre Gramfort <[email protected]> 4 # Teon Brooks <[email protected]> 5 # Chris Holdgraf <[email protected]> 6 # Stefan Appelhoff <[email protected]> 7 # 8 # License: BSD (3-clause) 9 from mne import io 10 import os 11 12 ALLOWED_EXTENSIONS = ['.con', '.sqd', '.fif', '.gz', '.pdf', '.ds'] 13 14 15 def _parse_ext(raw_fname, verbose=False): 16 """Split a filename into its name and extension.""" 17 fname, ext = os.path.splitext(raw_fname) 18 # BTi data is the only file format that does not have a file extension 19 if ext == '': 20 if verbose is True: 21 print('Found no extension for raw file, assuming "BTi" format and ' 22 'appending extension .pdf') 23 ext = '.pdf' 24 return fname, ext 25 26 27 def _read_raw(raw_fname, electrode=None, hsp=None, hpi=None, config=None, 28 verbose=None): 29 """Read a raw file into MNE, making inferences based on extension.""" 30 fname, ext = _parse_ext(raw_fname) 31 32 # MEG File Types 33 # -------------- 34 # KIT systems 35 if ext in ['.con', '.sqd']: 36 raw = io.read_raw_kit(raw_fname, elp=electrode, hsp=hsp, 37 mrk=hpi, preload=False) 38 39 # Neuromag or converted-to-fif systems 40 elif ext in ['.fif', '.gz']: 41 raw = io.read_raw_fif(raw_fname, preload=False) 42 43 # BTi systems 44 elif ext == '.pdf': 45 if os.path.isfile(raw_fname): 46 raw = io.read_raw_bti(raw_fname, config_fname=config, 47 head_shape_fname=hsp, 48 preload=False, verbose=verbose) 49 50 # CTF systems 51 elif ext == '.ds': 52 raw = io.read_raw_ctf(raw_fname) 53 54 # No supported data found ... 55 # --------------------------- 56 else: 57 raise ValueError("Raw file name extension must be one of %\n" 58 "Got %" % (ALLOWED_EXTENSIONS, ext)) 59 return raw 60 ``` Path: `mne_bids/mne_bids.py` Content: ``` 1 """Make BIDS compatible directory structures and infer meta data from MNE.""" 2 # Authors: Mainak Jas <[email protected]> 3 # Alexandre Gramfort <[email protected]> 4 # Teon Brooks <[email protected]> 5 # Chris Holdgraf <[email protected]> 6 # Stefan Appelhoff <[email protected]> 7 # 8 # License: BSD (3-clause) 9 10 import os 11 import shutil as sh 12 import pandas as pd 13 from collections import defaultdict, OrderedDict 14 15 import numpy as np 16 from mne.io.constants import FIFF 17 from mne.io.pick import channel_type 18 from mne.io import BaseRaw 19 from mne.channels.channels import _unit2human 20 from mne.externals.six import string_types 21 22 from datetime import datetime 23 from warnings import warn 24 25 from .utils import (make_bids_filename, make_bids_folders, 26 make_dataset_description, _write_json, 27 _read_events, _mkdir_p) 28 from .io import (_parse_ext, _read_raw, ALLOWED_EXTENSIONS) 29 30 31 ALLOWED_KINDS = ['meg', 'ieeg'] 32 orientation = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.gz': 'RAS', 33 '.pdf': 'ALS', '.ds': 'ALS'} 34 35 units = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.gz': 'm', '.pdf': 'm', 36 '.ds': 'cm'} 37 38 manufacturers = {'.sqd': 'KIT/Yokogawa', '.con': 'KIT/Yokogawa', 39 '.fif': 'Elekta', '.gz': 'Elekta', '.pdf': '4D Magnes', 40 '.ds': 'CTF'} 41 42 43 def _channels_tsv(raw, fname, verbose): 44 """Create a channels.tsv file and save it. 45 46 Parameters 47 ---------- 48 raw : instance of Raw 49 The data as MNE-Python Raw object. 50 fname : str 51 Filename to save the channels.tsv to. 52 verbose : bool 53 Set verbose output to true or false. 54 55 """ 56 map_chs = defaultdict(lambda: 'OTHER') 57 map_chs.update(grad='MEGGRAD', mag='MEGMAG', stim='TRIG', eeg='EEG', 58 ecog='ECOG', seeg='SEEG', eog='EOG', ecg='ECG', misc='MISC', 59 resp='RESPONSE', ref_meg='REFMEG') 60 map_desc = defaultdict(lambda: 'Other type of channel') 61 map_desc.update(grad='Gradiometer', mag='Magnetometer', 62 stim='Trigger', 63 eeg='ElectroEncephaloGram', 64 ecog='Electrocorticography', 65 seeg='StereoEEG', 66 ecg='ElectroCardioGram', 67 eog='ElectrOculoGram', misc='Miscellaneous', 68 ref_meg='Reference channel') 69 70 status, ch_type, description = list(), list(), list() 71 for idx, ch in enumerate(raw.info['ch_names']): 72 status.append('bad' if ch in raw.info['bads'] else 'good') 73 ch_type.append(map_chs[channel_type(raw.info, idx)]) 74 description.append(map_desc[channel_type(raw.info, idx)]) 75 low_cutoff, high_cutoff = (raw.info['highpass'], raw.info['lowpass']) 76 units = [_unit2human.get(ch_i['unit'], 'n/a') for ch_i in raw.info['chs']] 77 units = [u if u not in ['NA'] else 'n/a' for u in units] 78 n_channels = raw.info['nchan'] 79 sfreq = raw.info['sfreq'] 80 81 df = pd.DataFrame(OrderedDict([ 82 ('name', raw.info['ch_names']), 83 ('type', ch_type), 84 ('units', units), 85 ('description', description), 86 ('sampling_frequency', np.full((n_channels), sfreq)), 87 ('low_cutoff', np.full((n_channels), low_cutoff)), 88 ('high_cutoff', np.full((n_channels), high_cutoff)), 89 ('status', status)])) 90 df.to_csv(fname, sep='\t', index=False, na_rep='n/a') 91 92 if verbose: 93 print(os.linesep + "Writing '%s'..." % fname + os.linesep) 94 print(df.head()) 95 96 return fname 97 98 99 def _events_tsv(events, raw, fname, trial_type, verbose): 100 """Create an events.tsv file and save it. 101 102 This function will write the mandatory 'onset', and 'duration' columns as 103 well as the optional 'event_value' and 'event_sample'. The 'event_value' 104 corresponds to the marker value as found in the TRIG channel of the 105 recording. In addition, the 'trial_type' field can be written. 106 107 Parameters 108 ---------- 109 events : array, shape = (n_events, 3) 110 The first column contains the event time in samples and the third 111 column contains the event id. The second column is ignored for now but 112 typically contains the value of the trigger channel either immediately 113 before the event or immediately after. 114 raw : instance of Raw 115 The data as MNE-Python Raw object. 116 fname : str 117 Filename to save the events.tsv to. 118 event_id : dict | None 119 Dictionary mapping a brief description key to an event id (value). For 120 example {'Go': 1, 'No Go': 2}. 121 verbose : bool 122 Set verbose output to true or false. 123 124 Notes 125 ----- 126 The function writes durations of zero for each event. 127 128 """ 129 # Start by filling all data that we know into a df 130 first_samp = raw.first_samp 131 sfreq = raw.info['sfreq'] 132 events[:, 0] -= first_samp 133 134 data = OrderedDict([('onset', events[:, 0]), 135 ('duration', np.zeros(events.shape[0])), 136 ('trial_type', events[:, 2]), 137 ('event_value', events[:, 2]), 138 ('event_sample', events[:, 0])]) 139 140 df = pd.DataFrame.from_dict(data) 141 142 # Now check if trial_type is specified or should be removed 143 if trial_type: 144 trial_type_map = {v: k for k, v in trial_type.items()} 145 df.trial_type = df.trial_type.map(trial_type_map) 146 else: 147 df.drop(labels=['trial_type'], axis=1, inplace=True) 148 149 # Onset column needs to be specified in seconds 150 df.onset /= sfreq 151 152 # Save to file 153 df.to_csv(fname, sep='\t', index=False, na_rep='n/a') 154 if verbose: 155 print(os.linesep + "Writing '%s'..." % fname + os.linesep) 156 print(df.head()) 157 158 return fname 159 160 161 def _scans_tsv(raw, raw_fname, fname, verbose): 162 """Create a scans.tsv file and save it. 163 164 Parameters 165 ---------- 166 raw : instance of Raw 167 The data as MNE-Python Raw object. 168 raw_fname : str 169 Relative path to the raw data file. 170 fname : str 171 Filename to save the scans.tsv to. 172 verbose : bool 173 Set verbose output to true or false. 174 175 """ 176 # get MEASurement date from the data info 177 meas_date = raw.info['meas_date'] 178 if isinstance(meas_date, (np.ndarray, list)): 179 meas_date = meas_date[0] 180 181 if meas_date is None: 182 acq_time = 'n/a' 183 else: 184 acq_time = datetime.fromtimestamp( 185 meas_date).strftime('%Y-%m-%dT%H:%M:%S') 186 187 df = pd.DataFrame(data={'filename': ['%s' % raw_fname], 188 'acq_time': [acq_time]}, 189 columns=['filename', 'acq_time']) 190 191 df.to_csv(fname, sep='\t', index=False, na_rep='n/a') 192 193 if verbose: 194 print(os.linesep + "Writing '%s'..." % fname + os.linesep) 195 print(df.head()) 196 197 return fname 198 199 200 def _coordsystem_json(raw, unit, orient, manufacturer, fname, verbose): 201 """Create a coordsystem.json file and save it. 202 203 Parameters 204 ---------- 205 raw : instance of Raw 206 The data as MNE-Python Raw object. 207 unit : str 208 Units to be used in the coordsystem specification. 209 orient : str 210 Used to define the coordinate system for the head coils. 211 manufacturer : str 212 Used to define the coordinate system for the MEG sensors. 213 fname : str 214 Filename to save the coordsystem.json to. 215 verbose : bool 216 Set verbose output to true or false. 217 218 """ 219 dig = raw.info['dig'] 220 coords = dict() 221 fids = {d['ident']: d for d in dig if d['kind'] == 222 FIFF.FIFFV_POINT_CARDINAL} 223 if fids: 224 if FIFF.FIFFV_POINT_NASION in fids: 225 coords['NAS'] = fids[FIFF.FIFFV_POINT_NASION]['r'].tolist() 226 if FIFF.FIFFV_POINT_LPA in fids: 227 coords['LPA'] = fids[FIFF.FIFFV_POINT_LPA]['r'].tolist() 228 if FIFF.FIFFV_POINT_RPA in fids: 229 coords['RPA'] = fids[FIFF.FIFFV_POINT_RPA]['r'].tolist() 230 231 hpi = {d['ident']: d for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI} 232 if hpi: 233 for ident in hpi.keys(): 234 coords['coil%d' % ident] = hpi[ident]['r'].tolist() 235 236 coord_frame = set([dig[ii]['coord_frame'] for ii in range(len(dig))]) 237 if len(coord_frame) > 1: 238 err = 'All HPI and Fiducials must be in the same coordinate frame.' 239 raise ValueError(err) 240 241 fid_json = {'MEGCoordinateSystem': manufacturer, 242 'MEGCoordinateUnits': unit, # XXX validate this 243 'HeadCoilCoordinates': coords, 244 'HeadCoilCoordinateSystem': orient, 245 'HeadCoilCoordinateUnits': unit # XXX validate this 246 } 247 _write_json(fid_json, fname) 248 249 return fname 250 251 252 def _sidecar_json(raw, task, manufacturer, fname, kind, 253 verbose=True): 254 """Create a sidecar json file depending on the kind and save it. 255 256 The sidecar json file provides meta data about the data of a certain kind. 257 258 Parameters 259 ---------- 260 raw : instance of Raw 261 The data as MNE-Python Raw object. 262 task : str 263 Name of the task the data is based on. 264 manufacturer : str 265 Manufacturer of the acquisition system. For MEG also used to define the 266 coordinate system for the MEG sensors. 267 fname : str 268 Filename to save the sidecar json to. 269 kind : str 270 Type of the data as in ALLOWED_KINDS. 271 verbose : bool 272 Set verbose output to true or false. Defaults to true. 273 274 """ 275 sfreq = raw.info['sfreq'] 276 powerlinefrequency = raw.info.get('line_freq', None) 277 if powerlinefrequency is None: 278 warn('No line frequency found, defaulting to 50 Hz') 279 powerlinefrequency = 50 280 281 n_megchan = len([ch for ch in raw.info['chs'] 282 if ch['kind'] == FIFF.FIFFV_MEG_CH]) 283 n_megrefchan = len([ch for ch in raw.info['chs'] 284 if ch['kind'] == FIFF.FIFFV_REF_MEG_CH]) 285 n_eegchan = len([ch for ch in raw.info['chs'] 286 if ch['kind'] == FIFF.FIFFV_EEG_CH]) 287 n_ecogchan = len([ch for ch in raw.info['chs'] 288 if ch['kind'] == FIFF.FIFFV_ECOG_CH]) 289 n_seegchan = len([ch for ch in raw.info['chs'] 290 if ch['kind'] == FIFF.FIFFV_SEEG_CH]) 291 n_eogchan = len([ch for ch in raw.info['chs'] 292 if ch['kind'] == FIFF.FIFFV_EOG_CH]) 293 n_ecgchan = len([ch for ch in raw.info['chs'] 294 if ch['kind'] == FIFF.FIFFV_ECG_CH]) 295 n_emgchan = len([ch for ch in raw.info['chs'] 296 if ch['kind'] == FIFF.FIFFV_EMG_CH]) 297 n_miscchan = len([ch for ch in raw.info['chs'] 298 if ch['kind'] == FIFF.FIFFV_MISC_CH]) 299 n_stimchan = len([ch for ch in raw.info['chs'] 300 if ch['kind'] == FIFF.FIFFV_STIM_CH]) 301 302 # Define modality-specific JSON dictionaries 303 ch_info_json_common = [ 304 ('TaskName', task), 305 ('Manufacturer', manufacturer), 306 ('PowerLineFrequency', powerlinefrequency)] 307 ch_info_json_meg = [ 308 ('SamplingFrequency', sfreq), 309 ("DewarPosition", "XXX"), 310 ("DigitizedLandmarks", False), 311 ("DigitizedHeadPoints", False), 312 ("SoftwareFilters", "n/a"), 313 ('MEGChannelCount', n_megchan), 314 ('MEGREFChannelCount', n_megrefchan)] 315 ch_info_json_ieeg = [ 316 ('ECOGChannelCount', n_ecogchan), 317 ('SEEGChannelCount', n_seegchan)] 318 ch_info_ch_counts = [ 319 ('EEGChannelCount', n_eegchan), 320 ('EOGChannelCount', n_eogchan), 321 ('ECGChannelCount', n_ecgchan), 322 ('EMGChannelCount', n_emgchan), 323 ('MiscChannelCount', n_miscchan), 324 ('TriggerChannelCount', n_stimchan)] 325 326 # Stitch together the complete JSON dictionary 327 ch_info_json = ch_info_json_common 328 if kind == 'meg': 329 append_kind_json = ch_info_json_meg 330 elif kind == 'ieeg': 331 append_kind_json = ch_info_json_ieeg 332 else: 333 raise ValueError('Unexpected "kind": {}' 334 ' Use one of: {}'.format(kind, ALLOWED_KINDS)) 335 336 ch_info_json += append_kind_json 337 ch_info_json += ch_info_ch_counts 338 ch_info_json = OrderedDict(ch_info_json) 339 340 _write_json(ch_info_json, fname, verbose=verbose) 341 return fname 342 343 344 def raw_to_bids(subject_id, task, raw_file, output_path, session_id=None, 345 run=None, kind='meg', events_data=None, event_id=None, 346 hpi=None, electrode=None, hsp=None, config=None, 347 overwrite=True, verbose=True): 348 """Walk over a folder of files and create BIDS compatible folder. 349 350 Parameters 351 ---------- 352 subject_id : str 353 The subject name in BIDS compatible format ('01', '02', etc.) 354 task : str 355 Name of the task the data is based on. 356 raw_file : str | instance of mne.Raw 357 The raw data. If a string, it is assumed to be the path to the raw data 358 file. Otherwise it must be an instance of mne.Raw 359 output_path : str 360 The path of the BIDS compatible folder 361 session_id : str | None 362 The session name in BIDS compatible format. 363 run : int | None 364 The run number for this dataset. 365 kind : str, one of ('meg', 'ieeg') 366 The kind of data being converted. Defaults to "meg". 367 events_data : str | array | None 368 The events file. If a string, a path to the events file. If an array, 369 the MNE events array (shape n_events, 3). If None, events will be 370 inferred from the stim channel using `mne.find_events`. 371 event_id : dict | None 372 The event id dict used to create a 'trial_type' column in events.tsv 373 hpi : None | str | list of str 374 Marker points representing the location of the marker coils with 375 respect to the MEG Sensors, or path to a marker file. 376 If list, all of the markers will be averaged together. 377 electrode : None | str 378 Digitizer points representing the location of the fiducials and the 379 marker coils with respect to the digitized head shape, or path to a 380 file containing these points. 381 hsp : None | str | array, shape = (n_points, 3) 382 Digitizer head shape points, or path to head shape file. If more than 383 10`000 points are in the head shape, they are automatically decimated. 384 config : str | None 385 A path to the configuration file to use if the data is from a BTi 386 system. 387 overwrite : bool 388 If the file already exists, whether to overwrite it. 389 verbose : bool 390 If verbose is True, this will print a snippet of the sidecar files. If 391 False, no content will be printed. 392 393 """ 394 if isinstance(raw_file, string_types): 395 # We must read in the raw data 396 raw = _read_raw(raw_file, electrode=electrode, hsp=hsp, hpi=hpi, 397 config=config, verbose=verbose) 398 _, ext = _parse_ext(raw_file, verbose=verbose) 399 raw_fname = raw_file 400 elif isinstance(raw_file, BaseRaw): 401 # We got a raw mne object, get back the filename if possible 402 # Assume that if no filename attr exists, it's a fif file. 403 raw = raw_file.copy() 404 if hasattr(raw, 'filenames'): 405 _, ext = _parse_ext(raw.filenames[0], verbose=verbose) 406 raw_fname = raw.filenames[0] 407 else: 408 # FIXME: How to get the filename if no filenames attribute? 409 raw_fname = 'unknown_file_name' 410 ext = '.fif' 411 else: 412 raise ValueError('raw_file must be an instance of str or BaseRaw, ' 413 'got %s' % type(raw_file)) 414 data_path = make_bids_folders(subject=subject_id, session=session_id, 415 kind=kind, root=output_path, 416 overwrite=overwrite, 417 verbose=verbose) 418 if session_id is None: 419 ses_path = data_path 420 else: 421 ses_path = make_bids_folders(subject=subject_id, session=session_id, 422 root=output_path, 423 overwrite=False, 424 verbose=verbose) 425 426 # create filenames 427 scans_fname = make_bids_filename( 428 subject=subject_id, session=session_id, suffix='scans.tsv', 429 prefix=ses_path) 430 431 coordsystem_fname = make_bids_filename( 432 subject=subject_id, session=session_id, 433 suffix='coordsystem.json', prefix=data_path) 434 data_meta_fname = make_bids_filename( 435 subject=subject_id, session=session_id, task=task, run=run, 436 suffix='%s.json' % kind, prefix=data_path) 437 if ext in ['.fif', '.gz', '.ds']: 438 raw_file_bids = make_bids_filename( 439 subject=subject_id, session=session_id, task=task, run=run, 440 suffix='%s%s' % (kind, ext)) 441 else: 442 raw_folder = make_bids_filename( 443 subject=subject_id, session=session_id, task=task, run=run, 444 suffix='%s' % kind) 445 raw_file_bids = make_bids_filename( 446 subject=subject_id, session=session_id, task=task, run=run, 447 suffix='%s%s' % (kind, ext), prefix=raw_folder) 448 events_tsv_fname = make_bids_filename( 449 subject=subject_id, session=session_id, task=task, 450 run=run, suffix='events.tsv', prefix=data_path) 451 channels_fname = make_bids_filename( 452 subject=subject_id, session=session_id, task=task, run=run, 453 suffix='channels.tsv', prefix=data_path) 454 455 # Read in Raw object and extract metadata from Raw object if needed 456 if kind == 'meg': 457 orient = orientation[ext] 458 unit = units[ext] 459 manufacturer = manufacturers[ext] 460 else: 461 orient = 'n/a' 462 unit = 'n/a' 463 manufacturer = 'n/a' 464 465 # save stuff 466 if kind == 'meg': 467 _scans_tsv(raw, os.path.join(kind, raw_file_bids), scans_fname, 468 verbose) 469 _coordsystem_json(raw, unit, orient, manufacturer, coordsystem_fname, 470 verbose) 471 472 make_dataset_description(output_path, name=" ", 473 verbose=verbose) 474 _sidecar_json(raw, task, manufacturer, data_meta_fname, kind, 475 verbose) 476 _channels_tsv(raw, channels_fname, verbose) 477 478 events = _read_events(events_data, raw) 479 if len(events) > 0: 480 _events_tsv(events, raw, events_tsv_fname, event_id, verbose) 481 482 # set the raw file name to now be the absolute path to ensure the files 483 # are placed in the right location 484 raw_file_bids = os.path.join(data_path, raw_file_bids) 485 if os.path.exists(raw_file_bids) and not overwrite: 486 raise ValueError('"%s" already exists. Please set' 487 ' overwrite to True.' % raw_file_bids) 488 _mkdir_p(os.path.dirname(raw_file_bids)) 489 490 if verbose: 491 print('Writing data files to %s' % raw_file_bids) 492 493 if ext not in ALLOWED_EXTENSIONS: 494 raise ValueError('ext must be in %s, got %s' 495 % (''.join(ALLOWED_EXTENSIONS), ext)) 496 497 # for FIF, we need to re-save the file to fix the file pointer 498 # for files with multiple parts 499 if ext in ['.fif', '.gz']: 500 raw.save(raw_file_bids, overwrite=overwrite) 501 elif ext == '.ds': 502 sh.copytree(raw_fname, raw_file_bids) 503 else: 504 sh.copyfile(raw_fname, raw_file_bids) 505 506 return output_path 507 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mne_bids/io.py b/mne_bids/io.py --- a/mne_bids/io.py +++ b/mne_bids/io.py @@ -9,7 +9,7 @@ from mne import io import os -ALLOWED_EXTENSIONS = ['.con', '.sqd', '.fif', '.gz', '.pdf', '.ds'] +ALLOWED_EXTENSIONS = ['.con', '.sqd', '.fif', '.pdf', '.ds'] def _parse_ext(raw_fname, verbose=False): @@ -37,7 +37,7 @@ mrk=hpi, preload=False) # Neuromag or converted-to-fif systems - elif ext in ['.fif', '.gz']: + elif ext in ['.fif']: raw = io.read_raw_fif(raw_fname, preload=False) # BTi systems diff --git a/mne_bids/mne_bids.py b/mne_bids/mne_bids.py --- a/mne_bids/mne_bids.py +++ b/mne_bids/mne_bids.py @@ -29,15 +29,13 @@ ALLOWED_KINDS = ['meg', 'ieeg'] -orientation = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.gz': 'RAS', - '.pdf': 'ALS', '.ds': 'ALS'} +orientation = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.pdf': 'ALS', + '.ds': 'ALS'} -units = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.gz': 'm', '.pdf': 'm', - '.ds': 'cm'} +units = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.pdf': 'm', '.ds': 'cm'} manufacturers = {'.sqd': 'KIT/Yokogawa', '.con': 'KIT/Yokogawa', - '.fif': 'Elekta', '.gz': 'Elekta', '.pdf': '4D Magnes', - '.ds': 'CTF'} + '.fif': 'Elekta', '.pdf': '4D Magnes', '.ds': 'CTF'} def _channels_tsv(raw, fname, verbose): @@ -434,7 +432,7 @@ data_meta_fname = make_bids_filename( subject=subject_id, session=session_id, task=task, run=run, suffix='%s.json' % kind, prefix=data_path) - if ext in ['.fif', '.gz', '.ds']: + if ext in ['.fif', '.ds']: raw_file_bids = make_bids_filename( subject=subject_id, session=session_id, task=task, run=run, suffix='%s%s' % (kind, ext)) @@ -496,7 +494,7 @@ # for FIF, we need to re-save the file to fix the file pointer # for files with multiple parts - if ext in ['.fif', '.gz']: + if ext in ['.fif']: raw.save(raw_file_bids, overwrite=overwrite) elif ext == '.ds': sh.copytree(raw_fname, raw_file_bids)
{"golden_diff": "diff --git a/mne_bids/io.py b/mne_bids/io.py\n--- a/mne_bids/io.py\n+++ b/mne_bids/io.py\n@@ -9,7 +9,7 @@\n from mne import io\n import os\n \n-ALLOWED_EXTENSIONS = ['.con', '.sqd', '.fif', '.gz', '.pdf', '.ds']\n+ALLOWED_EXTENSIONS = ['.con', '.sqd', '.fif', '.pdf', '.ds']\n \n \n def _parse_ext(raw_fname, verbose=False):\n@@ -37,7 +37,7 @@\n mrk=hpi, preload=False)\n \n # Neuromag or converted-to-fif systems\n- elif ext in ['.fif', '.gz']:\n+ elif ext in ['.fif']:\n raw = io.read_raw_fif(raw_fname, preload=False)\n \n # BTi systems\ndiff --git a/mne_bids/mne_bids.py b/mne_bids/mne_bids.py\n--- a/mne_bids/mne_bids.py\n+++ b/mne_bids/mne_bids.py\n@@ -29,15 +29,13 @@\n \n \n ALLOWED_KINDS = ['meg', 'ieeg']\n-orientation = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.gz': 'RAS',\n- '.pdf': 'ALS', '.ds': 'ALS'}\n+orientation = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.pdf': 'ALS',\n+ '.ds': 'ALS'}\n \n-units = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.gz': 'm', '.pdf': 'm',\n- '.ds': 'cm'}\n+units = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.pdf': 'm', '.ds': 'cm'}\n \n manufacturers = {'.sqd': 'KIT/Yokogawa', '.con': 'KIT/Yokogawa',\n- '.fif': 'Elekta', '.gz': 'Elekta', '.pdf': '4D Magnes',\n- '.ds': 'CTF'}\n+ '.fif': 'Elekta', '.pdf': '4D Magnes', '.ds': 'CTF'}\n \n \n def _channels_tsv(raw, fname, verbose):\n@@ -434,7 +432,7 @@\n data_meta_fname = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s.json' % kind, prefix=data_path)\n- if ext in ['.fif', '.gz', '.ds']:\n+ if ext in ['.fif', '.ds']:\n raw_file_bids = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s%s' % (kind, ext))\n@@ -496,7 +494,7 @@\n \n # for FIF, we need to re-save the file to fix the file pointer\n # for files with multiple parts\n- if ext in ['.fif', '.gz']:\n+ if ext in ['.fif']:\n raw.save(raw_file_bids, overwrite=overwrite)\n elif ext == '.ds':\n sh.copytree(raw_fname, raw_file_bids)\n", "issue": "Remove .gz extension for fif: It's no longer part of the validator\nThroughout the current state of MNE BIDS, the file ending `.gz` is used as an identifier for FIF files:\r\n\r\nhttps://github.com/mne-tools/mne-bids/blob/c73ce744d30be87645e1648754b488f7572307f3/mne_bids/meg_bids.py#L33-L34\r\n\r\nCan we change this to `fif.gz`? I am just concerned, because other files can be `.gz` and have nothing to do with FIF.\n", "before_files": [{"content": "\"\"\"Check whether a file format is supported by BIDS and then load it.\"\"\"\n# Authors: Mainak Jas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Teon Brooks <[email protected]>\n# Chris Holdgraf <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD (3-clause)\nfrom mne import io\nimport os\n\nALLOWED_EXTENSIONS = ['.con', '.sqd', '.fif', '.gz', '.pdf', '.ds']\n\n\ndef _parse_ext(raw_fname, verbose=False):\n \"\"\"Split a filename into its name and extension.\"\"\"\n fname, ext = os.path.splitext(raw_fname)\n # BTi data is the only file format that does not have a file extension\n if ext == '':\n if verbose is True:\n print('Found no extension for raw file, assuming \"BTi\" format and '\n 'appending extension .pdf')\n ext = '.pdf'\n return fname, ext\n\n\ndef _read_raw(raw_fname, electrode=None, hsp=None, hpi=None, config=None,\n verbose=None):\n \"\"\"Read a raw file into MNE, making inferences based on extension.\"\"\"\n fname, ext = _parse_ext(raw_fname)\n\n # MEG File Types\n # --------------\n # KIT systems\n if ext in ['.con', '.sqd']:\n raw = io.read_raw_kit(raw_fname, elp=electrode, hsp=hsp,\n mrk=hpi, preload=False)\n\n # Neuromag or converted-to-fif systems\n elif ext in ['.fif', '.gz']:\n raw = io.read_raw_fif(raw_fname, preload=False)\n\n # BTi systems\n elif ext == '.pdf':\n if os.path.isfile(raw_fname):\n raw = io.read_raw_bti(raw_fname, config_fname=config,\n head_shape_fname=hsp,\n preload=False, verbose=verbose)\n\n # CTF systems\n elif ext == '.ds':\n raw = io.read_raw_ctf(raw_fname)\n\n # No supported data found ...\n # ---------------------------\n else:\n raise ValueError(\"Raw file name extension must be one of %\\n\"\n \"Got %\" % (ALLOWED_EXTENSIONS, ext))\n return raw\n", "path": "mne_bids/io.py"}, {"content": "\"\"\"Make BIDS compatible directory structures and infer meta data from MNE.\"\"\"\n# Authors: Mainak Jas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Teon Brooks <[email protected]>\n# Chris Holdgraf <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os\nimport shutil as sh\nimport pandas as pd\nfrom collections import defaultdict, OrderedDict\n\nimport numpy as np\nfrom mne.io.constants import FIFF\nfrom mne.io.pick import channel_type\nfrom mne.io import BaseRaw\nfrom mne.channels.channels import _unit2human\nfrom mne.externals.six import string_types\n\nfrom datetime import datetime\nfrom warnings import warn\n\nfrom .utils import (make_bids_filename, make_bids_folders,\n make_dataset_description, _write_json,\n _read_events, _mkdir_p)\nfrom .io import (_parse_ext, _read_raw, ALLOWED_EXTENSIONS)\n\n\nALLOWED_KINDS = ['meg', 'ieeg']\norientation = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.gz': 'RAS',\n '.pdf': 'ALS', '.ds': 'ALS'}\n\nunits = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.gz': 'm', '.pdf': 'm',\n '.ds': 'cm'}\n\nmanufacturers = {'.sqd': 'KIT/Yokogawa', '.con': 'KIT/Yokogawa',\n '.fif': 'Elekta', '.gz': 'Elekta', '.pdf': '4D Magnes',\n '.ds': 'CTF'}\n\n\ndef _channels_tsv(raw, fname, verbose):\n \"\"\"Create a channels.tsv file and save it.\n\n Parameters\n ----------\n raw : instance of Raw\n The data as MNE-Python Raw object.\n fname : str\n Filename to save the channels.tsv to.\n verbose : bool\n Set verbose output to true or false.\n\n \"\"\"\n map_chs = defaultdict(lambda: 'OTHER')\n map_chs.update(grad='MEGGRAD', mag='MEGMAG', stim='TRIG', eeg='EEG',\n ecog='ECOG', seeg='SEEG', eog='EOG', ecg='ECG', misc='MISC',\n resp='RESPONSE', ref_meg='REFMEG')\n map_desc = defaultdict(lambda: 'Other type of channel')\n map_desc.update(grad='Gradiometer', mag='Magnetometer',\n stim='Trigger',\n eeg='ElectroEncephaloGram',\n ecog='Electrocorticography',\n seeg='StereoEEG',\n ecg='ElectroCardioGram',\n eog='ElectrOculoGram', misc='Miscellaneous',\n ref_meg='Reference channel')\n\n status, ch_type, description = list(), list(), list()\n for idx, ch in enumerate(raw.info['ch_names']):\n status.append('bad' if ch in raw.info['bads'] else 'good')\n ch_type.append(map_chs[channel_type(raw.info, idx)])\n description.append(map_desc[channel_type(raw.info, idx)])\n low_cutoff, high_cutoff = (raw.info['highpass'], raw.info['lowpass'])\n units = [_unit2human.get(ch_i['unit'], 'n/a') for ch_i in raw.info['chs']]\n units = [u if u not in ['NA'] else 'n/a' for u in units]\n n_channels = raw.info['nchan']\n sfreq = raw.info['sfreq']\n\n df = pd.DataFrame(OrderedDict([\n ('name', raw.info['ch_names']),\n ('type', ch_type),\n ('units', units),\n ('description', description),\n ('sampling_frequency', np.full((n_channels), sfreq)),\n ('low_cutoff', np.full((n_channels), low_cutoff)),\n ('high_cutoff', np.full((n_channels), high_cutoff)),\n ('status', status)]))\n df.to_csv(fname, sep='\\t', index=False, na_rep='n/a')\n\n if verbose:\n print(os.linesep + \"Writing '%s'...\" % fname + os.linesep)\n print(df.head())\n\n return fname\n\n\ndef _events_tsv(events, raw, fname, trial_type, verbose):\n \"\"\"Create an events.tsv file and save it.\n\n This function will write the mandatory 'onset', and 'duration' columns as\n well as the optional 'event_value' and 'event_sample'. The 'event_value'\n corresponds to the marker value as found in the TRIG channel of the\n recording. In addition, the 'trial_type' field can be written.\n\n Parameters\n ----------\n events : array, shape = (n_events, 3)\n The first column contains the event time in samples and the third\n column contains the event id. The second column is ignored for now but\n typically contains the value of the trigger channel either immediately\n before the event or immediately after.\n raw : instance of Raw\n The data as MNE-Python Raw object.\n fname : str\n Filename to save the events.tsv to.\n event_id : dict | None\n Dictionary mapping a brief description key to an event id (value). For\n example {'Go': 1, 'No Go': 2}.\n verbose : bool\n Set verbose output to true or false.\n\n Notes\n -----\n The function writes durations of zero for each event.\n\n \"\"\"\n # Start by filling all data that we know into a df\n first_samp = raw.first_samp\n sfreq = raw.info['sfreq']\n events[:, 0] -= first_samp\n\n data = OrderedDict([('onset', events[:, 0]),\n ('duration', np.zeros(events.shape[0])),\n ('trial_type', events[:, 2]),\n ('event_value', events[:, 2]),\n ('event_sample', events[:, 0])])\n\n df = pd.DataFrame.from_dict(data)\n\n # Now check if trial_type is specified or should be removed\n if trial_type:\n trial_type_map = {v: k for k, v in trial_type.items()}\n df.trial_type = df.trial_type.map(trial_type_map)\n else:\n df.drop(labels=['trial_type'], axis=1, inplace=True)\n\n # Onset column needs to be specified in seconds\n df.onset /= sfreq\n\n # Save to file\n df.to_csv(fname, sep='\\t', index=False, na_rep='n/a')\n if verbose:\n print(os.linesep + \"Writing '%s'...\" % fname + os.linesep)\n print(df.head())\n\n return fname\n\n\ndef _scans_tsv(raw, raw_fname, fname, verbose):\n \"\"\"Create a scans.tsv file and save it.\n\n Parameters\n ----------\n raw : instance of Raw\n The data as MNE-Python Raw object.\n raw_fname : str\n Relative path to the raw data file.\n fname : str\n Filename to save the scans.tsv to.\n verbose : bool\n Set verbose output to true or false.\n\n \"\"\"\n # get MEASurement date from the data info\n meas_date = raw.info['meas_date']\n if isinstance(meas_date, (np.ndarray, list)):\n meas_date = meas_date[0]\n\n if meas_date is None:\n acq_time = 'n/a'\n else:\n acq_time = datetime.fromtimestamp(\n meas_date).strftime('%Y-%m-%dT%H:%M:%S')\n\n df = pd.DataFrame(data={'filename': ['%s' % raw_fname],\n 'acq_time': [acq_time]},\n columns=['filename', 'acq_time'])\n\n df.to_csv(fname, sep='\\t', index=False, na_rep='n/a')\n\n if verbose:\n print(os.linesep + \"Writing '%s'...\" % fname + os.linesep)\n print(df.head())\n\n return fname\n\n\ndef _coordsystem_json(raw, unit, orient, manufacturer, fname, verbose):\n \"\"\"Create a coordsystem.json file and save it.\n\n Parameters\n ----------\n raw : instance of Raw\n The data as MNE-Python Raw object.\n unit : str\n Units to be used in the coordsystem specification.\n orient : str\n Used to define the coordinate system for the head coils.\n manufacturer : str\n Used to define the coordinate system for the MEG sensors.\n fname : str\n Filename to save the coordsystem.json to.\n verbose : bool\n Set verbose output to true or false.\n\n \"\"\"\n dig = raw.info['dig']\n coords = dict()\n fids = {d['ident']: d for d in dig if d['kind'] ==\n FIFF.FIFFV_POINT_CARDINAL}\n if fids:\n if FIFF.FIFFV_POINT_NASION in fids:\n coords['NAS'] = fids[FIFF.FIFFV_POINT_NASION]['r'].tolist()\n if FIFF.FIFFV_POINT_LPA in fids:\n coords['LPA'] = fids[FIFF.FIFFV_POINT_LPA]['r'].tolist()\n if FIFF.FIFFV_POINT_RPA in fids:\n coords['RPA'] = fids[FIFF.FIFFV_POINT_RPA]['r'].tolist()\n\n hpi = {d['ident']: d for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI}\n if hpi:\n for ident in hpi.keys():\n coords['coil%d' % ident] = hpi[ident]['r'].tolist()\n\n coord_frame = set([dig[ii]['coord_frame'] for ii in range(len(dig))])\n if len(coord_frame) > 1:\n err = 'All HPI and Fiducials must be in the same coordinate frame.'\n raise ValueError(err)\n\n fid_json = {'MEGCoordinateSystem': manufacturer,\n 'MEGCoordinateUnits': unit, # XXX validate this\n 'HeadCoilCoordinates': coords,\n 'HeadCoilCoordinateSystem': orient,\n 'HeadCoilCoordinateUnits': unit # XXX validate this\n }\n _write_json(fid_json, fname)\n\n return fname\n\n\ndef _sidecar_json(raw, task, manufacturer, fname, kind,\n verbose=True):\n \"\"\"Create a sidecar json file depending on the kind and save it.\n\n The sidecar json file provides meta data about the data of a certain kind.\n\n Parameters\n ----------\n raw : instance of Raw\n The data as MNE-Python Raw object.\n task : str\n Name of the task the data is based on.\n manufacturer : str\n Manufacturer of the acquisition system. For MEG also used to define the\n coordinate system for the MEG sensors.\n fname : str\n Filename to save the sidecar json to.\n kind : str\n Type of the data as in ALLOWED_KINDS.\n verbose : bool\n Set verbose output to true or false. Defaults to true.\n\n \"\"\"\n sfreq = raw.info['sfreq']\n powerlinefrequency = raw.info.get('line_freq', None)\n if powerlinefrequency is None:\n warn('No line frequency found, defaulting to 50 Hz')\n powerlinefrequency = 50\n\n n_megchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_MEG_CH])\n n_megrefchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_REF_MEG_CH])\n n_eegchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_EEG_CH])\n n_ecogchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_ECOG_CH])\n n_seegchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_SEEG_CH])\n n_eogchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_EOG_CH])\n n_ecgchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_ECG_CH])\n n_emgchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_EMG_CH])\n n_miscchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_MISC_CH])\n n_stimchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_STIM_CH])\n\n # Define modality-specific JSON dictionaries\n ch_info_json_common = [\n ('TaskName', task),\n ('Manufacturer', manufacturer),\n ('PowerLineFrequency', powerlinefrequency)]\n ch_info_json_meg = [\n ('SamplingFrequency', sfreq),\n (\"DewarPosition\", \"XXX\"),\n (\"DigitizedLandmarks\", False),\n (\"DigitizedHeadPoints\", False),\n (\"SoftwareFilters\", \"n/a\"),\n ('MEGChannelCount', n_megchan),\n ('MEGREFChannelCount', n_megrefchan)]\n ch_info_json_ieeg = [\n ('ECOGChannelCount', n_ecogchan),\n ('SEEGChannelCount', n_seegchan)]\n ch_info_ch_counts = [\n ('EEGChannelCount', n_eegchan),\n ('EOGChannelCount', n_eogchan),\n ('ECGChannelCount', n_ecgchan),\n ('EMGChannelCount', n_emgchan),\n ('MiscChannelCount', n_miscchan),\n ('TriggerChannelCount', n_stimchan)]\n\n # Stitch together the complete JSON dictionary\n ch_info_json = ch_info_json_common\n if kind == 'meg':\n append_kind_json = ch_info_json_meg\n elif kind == 'ieeg':\n append_kind_json = ch_info_json_ieeg\n else:\n raise ValueError('Unexpected \"kind\": {}'\n ' Use one of: {}'.format(kind, ALLOWED_KINDS))\n\n ch_info_json += append_kind_json\n ch_info_json += ch_info_ch_counts\n ch_info_json = OrderedDict(ch_info_json)\n\n _write_json(ch_info_json, fname, verbose=verbose)\n return fname\n\n\ndef raw_to_bids(subject_id, task, raw_file, output_path, session_id=None,\n run=None, kind='meg', events_data=None, event_id=None,\n hpi=None, electrode=None, hsp=None, config=None,\n overwrite=True, verbose=True):\n \"\"\"Walk over a folder of files and create BIDS compatible folder.\n\n Parameters\n ----------\n subject_id : str\n The subject name in BIDS compatible format ('01', '02', etc.)\n task : str\n Name of the task the data is based on.\n raw_file : str | instance of mne.Raw\n The raw data. If a string, it is assumed to be the path to the raw data\n file. Otherwise it must be an instance of mne.Raw\n output_path : str\n The path of the BIDS compatible folder\n session_id : str | None\n The session name in BIDS compatible format.\n run : int | None\n The run number for this dataset.\n kind : str, one of ('meg', 'ieeg')\n The kind of data being converted. Defaults to \"meg\".\n events_data : str | array | None\n The events file. If a string, a path to the events file. If an array,\n the MNE events array (shape n_events, 3). If None, events will be\n inferred from the stim channel using `mne.find_events`.\n event_id : dict | None\n The event id dict used to create a 'trial_type' column in events.tsv\n hpi : None | str | list of str\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n electrode : None | str\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape = (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10`000 points are in the head shape, they are automatically decimated.\n config : str | None\n A path to the configuration file to use if the data is from a BTi\n system.\n overwrite : bool\n If the file already exists, whether to overwrite it.\n verbose : bool\n If verbose is True, this will print a snippet of the sidecar files. If\n False, no content will be printed.\n\n \"\"\"\n if isinstance(raw_file, string_types):\n # We must read in the raw data\n raw = _read_raw(raw_file, electrode=electrode, hsp=hsp, hpi=hpi,\n config=config, verbose=verbose)\n _, ext = _parse_ext(raw_file, verbose=verbose)\n raw_fname = raw_file\n elif isinstance(raw_file, BaseRaw):\n # We got a raw mne object, get back the filename if possible\n # Assume that if no filename attr exists, it's a fif file.\n raw = raw_file.copy()\n if hasattr(raw, 'filenames'):\n _, ext = _parse_ext(raw.filenames[0], verbose=verbose)\n raw_fname = raw.filenames[0]\n else:\n # FIXME: How to get the filename if no filenames attribute?\n raw_fname = 'unknown_file_name'\n ext = '.fif'\n else:\n raise ValueError('raw_file must be an instance of str or BaseRaw, '\n 'got %s' % type(raw_file))\n data_path = make_bids_folders(subject=subject_id, session=session_id,\n kind=kind, root=output_path,\n overwrite=overwrite,\n verbose=verbose)\n if session_id is None:\n ses_path = data_path\n else:\n ses_path = make_bids_folders(subject=subject_id, session=session_id,\n root=output_path,\n overwrite=False,\n verbose=verbose)\n\n # create filenames\n scans_fname = make_bids_filename(\n subject=subject_id, session=session_id, suffix='scans.tsv',\n prefix=ses_path)\n\n coordsystem_fname = make_bids_filename(\n subject=subject_id, session=session_id,\n suffix='coordsystem.json', prefix=data_path)\n data_meta_fname = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s.json' % kind, prefix=data_path)\n if ext in ['.fif', '.gz', '.ds']:\n raw_file_bids = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s%s' % (kind, ext))\n else:\n raw_folder = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s' % kind)\n raw_file_bids = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s%s' % (kind, ext), prefix=raw_folder)\n events_tsv_fname = make_bids_filename(\n subject=subject_id, session=session_id, task=task,\n run=run, suffix='events.tsv', prefix=data_path)\n channels_fname = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='channels.tsv', prefix=data_path)\n\n # Read in Raw object and extract metadata from Raw object if needed\n if kind == 'meg':\n orient = orientation[ext]\n unit = units[ext]\n manufacturer = manufacturers[ext]\n else:\n orient = 'n/a'\n unit = 'n/a'\n manufacturer = 'n/a'\n\n # save stuff\n if kind == 'meg':\n _scans_tsv(raw, os.path.join(kind, raw_file_bids), scans_fname,\n verbose)\n _coordsystem_json(raw, unit, orient, manufacturer, coordsystem_fname,\n verbose)\n\n make_dataset_description(output_path, name=\" \",\n verbose=verbose)\n _sidecar_json(raw, task, manufacturer, data_meta_fname, kind,\n verbose)\n _channels_tsv(raw, channels_fname, verbose)\n\n events = _read_events(events_data, raw)\n if len(events) > 0:\n _events_tsv(events, raw, events_tsv_fname, event_id, verbose)\n\n # set the raw file name to now be the absolute path to ensure the files\n # are placed in the right location\n raw_file_bids = os.path.join(data_path, raw_file_bids)\n if os.path.exists(raw_file_bids) and not overwrite:\n raise ValueError('\"%s\" already exists. Please set'\n ' overwrite to True.' % raw_file_bids)\n _mkdir_p(os.path.dirname(raw_file_bids))\n\n if verbose:\n print('Writing data files to %s' % raw_file_bids)\n\n if ext not in ALLOWED_EXTENSIONS:\n raise ValueError('ext must be in %s, got %s'\n % (''.join(ALLOWED_EXTENSIONS), ext))\n\n # for FIF, we need to re-save the file to fix the file pointer\n # for files with multiple parts\n if ext in ['.fif', '.gz']:\n raw.save(raw_file_bids, overwrite=overwrite)\n elif ext == '.ds':\n sh.copytree(raw_fname, raw_file_bids)\n else:\n sh.copyfile(raw_fname, raw_file_bids)\n\n return output_path\n", "path": "mne_bids/mne_bids.py"}], "after_files": [{"content": "\"\"\"Check whether a file format is supported by BIDS and then load it.\"\"\"\n# Authors: Mainak Jas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Teon Brooks <[email protected]>\n# Chris Holdgraf <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD (3-clause)\nfrom mne import io\nimport os\n\nALLOWED_EXTENSIONS = ['.con', '.sqd', '.fif', '.pdf', '.ds']\n\n\ndef _parse_ext(raw_fname, verbose=False):\n \"\"\"Split a filename into its name and extension.\"\"\"\n fname, ext = os.path.splitext(raw_fname)\n # BTi data is the only file format that does not have a file extension\n if ext == '':\n if verbose is True:\n print('Found no extension for raw file, assuming \"BTi\" format and '\n 'appending extension .pdf')\n ext = '.pdf'\n return fname, ext\n\n\ndef _read_raw(raw_fname, electrode=None, hsp=None, hpi=None, config=None,\n verbose=None):\n \"\"\"Read a raw file into MNE, making inferences based on extension.\"\"\"\n fname, ext = _parse_ext(raw_fname)\n\n # MEG File Types\n # --------------\n # KIT systems\n if ext in ['.con', '.sqd']:\n raw = io.read_raw_kit(raw_fname, elp=electrode, hsp=hsp,\n mrk=hpi, preload=False)\n\n # Neuromag or converted-to-fif systems\n elif ext in ['.fif']:\n raw = io.read_raw_fif(raw_fname, preload=False)\n\n # BTi systems\n elif ext == '.pdf':\n if os.path.isfile(raw_fname):\n raw = io.read_raw_bti(raw_fname, config_fname=config,\n head_shape_fname=hsp,\n preload=False, verbose=verbose)\n\n # CTF systems\n elif ext == '.ds':\n raw = io.read_raw_ctf(raw_fname)\n\n # No supported data found ...\n # ---------------------------\n else:\n raise ValueError(\"Raw file name extension must be one of %\\n\"\n \"Got %\" % (ALLOWED_EXTENSIONS, ext))\n return raw\n", "path": "mne_bids/io.py"}, {"content": "\"\"\"Make BIDS compatible directory structures and infer meta data from MNE.\"\"\"\n# Authors: Mainak Jas <[email protected]>\n# Alexandre Gramfort <[email protected]>\n# Teon Brooks <[email protected]>\n# Chris Holdgraf <[email protected]>\n# Stefan Appelhoff <[email protected]>\n#\n# License: BSD (3-clause)\n\nimport os\nimport shutil as sh\nimport pandas as pd\nfrom collections import defaultdict, OrderedDict\n\nimport numpy as np\nfrom mne.io.constants import FIFF\nfrom mne.io.pick import channel_type\nfrom mne.io import BaseRaw\nfrom mne.channels.channels import _unit2human\nfrom mne.externals.six import string_types\n\nfrom datetime import datetime\nfrom warnings import warn\n\nfrom .utils import (make_bids_filename, make_bids_folders,\n make_dataset_description, _write_json,\n _read_events, _mkdir_p)\nfrom .io import (_parse_ext, _read_raw, ALLOWED_EXTENSIONS)\n\n\nALLOWED_KINDS = ['meg', 'ieeg']\norientation = {'.sqd': 'ALS', '.con': 'ALS', '.fif': 'RAS', '.pdf': 'ALS',\n '.ds': 'ALS'}\n\nunits = {'.sqd': 'm', '.con': 'm', '.fif': 'm', '.pdf': 'm', '.ds': 'cm'}\n\nmanufacturers = {'.sqd': 'KIT/Yokogawa', '.con': 'KIT/Yokogawa',\n '.fif': 'Elekta', '.pdf': '4D Magnes', '.ds': 'CTF'}\n\n\ndef _channels_tsv(raw, fname, verbose):\n \"\"\"Create a channels.tsv file and save it.\n\n Parameters\n ----------\n raw : instance of Raw\n The data as MNE-Python Raw object.\n fname : str\n Filename to save the channels.tsv to.\n verbose : bool\n Set verbose output to true or false.\n\n \"\"\"\n map_chs = defaultdict(lambda: 'OTHER')\n map_chs.update(grad='MEGGRAD', mag='MEGMAG', stim='TRIG', eeg='EEG',\n ecog='ECOG', seeg='SEEG', eog='EOG', ecg='ECG', misc='MISC',\n resp='RESPONSE', ref_meg='REFMEG')\n map_desc = defaultdict(lambda: 'Other type of channel')\n map_desc.update(grad='Gradiometer', mag='Magnetometer',\n stim='Trigger',\n eeg='ElectroEncephaloGram',\n ecog='Electrocorticography',\n seeg='StereoEEG',\n ecg='ElectroCardioGram',\n eog='ElectrOculoGram', misc='Miscellaneous',\n ref_meg='Reference channel')\n\n status, ch_type, description = list(), list(), list()\n for idx, ch in enumerate(raw.info['ch_names']):\n status.append('bad' if ch in raw.info['bads'] else 'good')\n ch_type.append(map_chs[channel_type(raw.info, idx)])\n description.append(map_desc[channel_type(raw.info, idx)])\n low_cutoff, high_cutoff = (raw.info['highpass'], raw.info['lowpass'])\n units = [_unit2human.get(ch_i['unit'], 'n/a') for ch_i in raw.info['chs']]\n units = [u if u not in ['NA'] else 'n/a' for u in units]\n n_channels = raw.info['nchan']\n sfreq = raw.info['sfreq']\n\n df = pd.DataFrame(OrderedDict([\n ('name', raw.info['ch_names']),\n ('type', ch_type),\n ('units', units),\n ('description', description),\n ('sampling_frequency', np.full((n_channels), sfreq)),\n ('low_cutoff', np.full((n_channels), low_cutoff)),\n ('high_cutoff', np.full((n_channels), high_cutoff)),\n ('status', status)]))\n df.to_csv(fname, sep='\\t', index=False, na_rep='n/a')\n\n if verbose:\n print(os.linesep + \"Writing '%s'...\" % fname + os.linesep)\n print(df.head())\n\n return fname\n\n\ndef _events_tsv(events, raw, fname, trial_type, verbose):\n \"\"\"Create an events.tsv file and save it.\n\n This function will write the mandatory 'onset', and 'duration' columns as\n well as the optional 'event_value' and 'event_sample'. The 'event_value'\n corresponds to the marker value as found in the TRIG channel of the\n recording. In addition, the 'trial_type' field can be written.\n\n Parameters\n ----------\n events : array, shape = (n_events, 3)\n The first column contains the event time in samples and the third\n column contains the event id. The second column is ignored for now but\n typically contains the value of the trigger channel either immediately\n before the event or immediately after.\n raw : instance of Raw\n The data as MNE-Python Raw object.\n fname : str\n Filename to save the events.tsv to.\n event_id : dict | None\n Dictionary mapping a brief description key to an event id (value). For\n example {'Go': 1, 'No Go': 2}.\n verbose : bool\n Set verbose output to true or false.\n\n Notes\n -----\n The function writes durations of zero for each event.\n\n \"\"\"\n # Start by filling all data that we know into a df\n first_samp = raw.first_samp\n sfreq = raw.info['sfreq']\n events[:, 0] -= first_samp\n\n data = OrderedDict([('onset', events[:, 0]),\n ('duration', np.zeros(events.shape[0])),\n ('trial_type', events[:, 2]),\n ('event_value', events[:, 2]),\n ('event_sample', events[:, 0])])\n\n df = pd.DataFrame.from_dict(data)\n\n # Now check if trial_type is specified or should be removed\n if trial_type:\n trial_type_map = {v: k for k, v in trial_type.items()}\n df.trial_type = df.trial_type.map(trial_type_map)\n else:\n df.drop(labels=['trial_type'], axis=1, inplace=True)\n\n # Onset column needs to be specified in seconds\n df.onset /= sfreq\n\n # Save to file\n df.to_csv(fname, sep='\\t', index=False, na_rep='n/a')\n if verbose:\n print(os.linesep + \"Writing '%s'...\" % fname + os.linesep)\n print(df.head())\n\n return fname\n\n\ndef _scans_tsv(raw, raw_fname, fname, verbose):\n \"\"\"Create a scans.tsv file and save it.\n\n Parameters\n ----------\n raw : instance of Raw\n The data as MNE-Python Raw object.\n raw_fname : str\n Relative path to the raw data file.\n fname : str\n Filename to save the scans.tsv to.\n verbose : bool\n Set verbose output to true or false.\n\n \"\"\"\n # get MEASurement date from the data info\n meas_date = raw.info['meas_date']\n if isinstance(meas_date, (np.ndarray, list)):\n meas_date = meas_date[0]\n\n if meas_date is None:\n acq_time = 'n/a'\n else:\n acq_time = datetime.fromtimestamp(\n meas_date).strftime('%Y-%m-%dT%H:%M:%S')\n\n df = pd.DataFrame(data={'filename': ['%s' % raw_fname],\n 'acq_time': [acq_time]},\n columns=['filename', 'acq_time'])\n\n df.to_csv(fname, sep='\\t', index=False, na_rep='n/a')\n\n if verbose:\n print(os.linesep + \"Writing '%s'...\" % fname + os.linesep)\n print(df.head())\n\n return fname\n\n\ndef _coordsystem_json(raw, unit, orient, manufacturer, fname, verbose):\n \"\"\"Create a coordsystem.json file and save it.\n\n Parameters\n ----------\n raw : instance of Raw\n The data as MNE-Python Raw object.\n unit : str\n Units to be used in the coordsystem specification.\n orient : str\n Used to define the coordinate system for the head coils.\n manufacturer : str\n Used to define the coordinate system for the MEG sensors.\n fname : str\n Filename to save the coordsystem.json to.\n verbose : bool\n Set verbose output to true or false.\n\n \"\"\"\n dig = raw.info['dig']\n coords = dict()\n fids = {d['ident']: d for d in dig if d['kind'] ==\n FIFF.FIFFV_POINT_CARDINAL}\n if fids:\n if FIFF.FIFFV_POINT_NASION in fids:\n coords['NAS'] = fids[FIFF.FIFFV_POINT_NASION]['r'].tolist()\n if FIFF.FIFFV_POINT_LPA in fids:\n coords['LPA'] = fids[FIFF.FIFFV_POINT_LPA]['r'].tolist()\n if FIFF.FIFFV_POINT_RPA in fids:\n coords['RPA'] = fids[FIFF.FIFFV_POINT_RPA]['r'].tolist()\n\n hpi = {d['ident']: d for d in dig if d['kind'] == FIFF.FIFFV_POINT_HPI}\n if hpi:\n for ident in hpi.keys():\n coords['coil%d' % ident] = hpi[ident]['r'].tolist()\n\n coord_frame = set([dig[ii]['coord_frame'] for ii in range(len(dig))])\n if len(coord_frame) > 1:\n err = 'All HPI and Fiducials must be in the same coordinate frame.'\n raise ValueError(err)\n\n fid_json = {'MEGCoordinateSystem': manufacturer,\n 'MEGCoordinateUnits': unit, # XXX validate this\n 'HeadCoilCoordinates': coords,\n 'HeadCoilCoordinateSystem': orient,\n 'HeadCoilCoordinateUnits': unit # XXX validate this\n }\n _write_json(fid_json, fname)\n\n return fname\n\n\ndef _sidecar_json(raw, task, manufacturer, fname, kind,\n verbose=True):\n \"\"\"Create a sidecar json file depending on the kind and save it.\n\n The sidecar json file provides meta data about the data of a certain kind.\n\n Parameters\n ----------\n raw : instance of Raw\n The data as MNE-Python Raw object.\n task : str\n Name of the task the data is based on.\n manufacturer : str\n Manufacturer of the acquisition system. For MEG also used to define the\n coordinate system for the MEG sensors.\n fname : str\n Filename to save the sidecar json to.\n kind : str\n Type of the data as in ALLOWED_KINDS.\n verbose : bool\n Set verbose output to true or false. Defaults to true.\n\n \"\"\"\n sfreq = raw.info['sfreq']\n powerlinefrequency = raw.info.get('line_freq', None)\n if powerlinefrequency is None:\n warn('No line frequency found, defaulting to 50 Hz')\n powerlinefrequency = 50\n\n n_megchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_MEG_CH])\n n_megrefchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_REF_MEG_CH])\n n_eegchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_EEG_CH])\n n_ecogchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_ECOG_CH])\n n_seegchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_SEEG_CH])\n n_eogchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_EOG_CH])\n n_ecgchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_ECG_CH])\n n_emgchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_EMG_CH])\n n_miscchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_MISC_CH])\n n_stimchan = len([ch for ch in raw.info['chs']\n if ch['kind'] == FIFF.FIFFV_STIM_CH])\n\n # Define modality-specific JSON dictionaries\n ch_info_json_common = [\n ('TaskName', task),\n ('Manufacturer', manufacturer),\n ('PowerLineFrequency', powerlinefrequency)]\n ch_info_json_meg = [\n ('SamplingFrequency', sfreq),\n (\"DewarPosition\", \"XXX\"),\n (\"DigitizedLandmarks\", False),\n (\"DigitizedHeadPoints\", False),\n (\"SoftwareFilters\", \"n/a\"),\n ('MEGChannelCount', n_megchan),\n ('MEGREFChannelCount', n_megrefchan)]\n ch_info_json_ieeg = [\n ('ECOGChannelCount', n_ecogchan),\n ('SEEGChannelCount', n_seegchan)]\n ch_info_ch_counts = [\n ('EEGChannelCount', n_eegchan),\n ('EOGChannelCount', n_eogchan),\n ('ECGChannelCount', n_ecgchan),\n ('EMGChannelCount', n_emgchan),\n ('MiscChannelCount', n_miscchan),\n ('TriggerChannelCount', n_stimchan)]\n\n # Stitch together the complete JSON dictionary\n ch_info_json = ch_info_json_common\n if kind == 'meg':\n append_kind_json = ch_info_json_meg\n elif kind == 'ieeg':\n append_kind_json = ch_info_json_ieeg\n else:\n raise ValueError('Unexpected \"kind\": {}'\n ' Use one of: {}'.format(kind, ALLOWED_KINDS))\n\n ch_info_json += append_kind_json\n ch_info_json += ch_info_ch_counts\n ch_info_json = OrderedDict(ch_info_json)\n\n _write_json(ch_info_json, fname, verbose=verbose)\n return fname\n\n\ndef raw_to_bids(subject_id, task, raw_file, output_path, session_id=None,\n run=None, kind='meg', events_data=None, event_id=None,\n hpi=None, electrode=None, hsp=None, config=None,\n overwrite=True, verbose=True):\n \"\"\"Walk over a folder of files and create BIDS compatible folder.\n\n Parameters\n ----------\n subject_id : str\n The subject name in BIDS compatible format ('01', '02', etc.)\n task : str\n Name of the task the data is based on.\n raw_file : str | instance of mne.Raw\n The raw data. If a string, it is assumed to be the path to the raw data\n file. Otherwise it must be an instance of mne.Raw\n output_path : str\n The path of the BIDS compatible folder\n session_id : str | None\n The session name in BIDS compatible format.\n run : int | None\n The run number for this dataset.\n kind : str, one of ('meg', 'ieeg')\n The kind of data being converted. Defaults to \"meg\".\n events_data : str | array | None\n The events file. If a string, a path to the events file. If an array,\n the MNE events array (shape n_events, 3). If None, events will be\n inferred from the stim channel using `mne.find_events`.\n event_id : dict | None\n The event id dict used to create a 'trial_type' column in events.tsv\n hpi : None | str | list of str\n Marker points representing the location of the marker coils with\n respect to the MEG Sensors, or path to a marker file.\n If list, all of the markers will be averaged together.\n electrode : None | str\n Digitizer points representing the location of the fiducials and the\n marker coils with respect to the digitized head shape, or path to a\n file containing these points.\n hsp : None | str | array, shape = (n_points, 3)\n Digitizer head shape points, or path to head shape file. If more than\n 10`000 points are in the head shape, they are automatically decimated.\n config : str | None\n A path to the configuration file to use if the data is from a BTi\n system.\n overwrite : bool\n If the file already exists, whether to overwrite it.\n verbose : bool\n If verbose is True, this will print a snippet of the sidecar files. If\n False, no content will be printed.\n\n \"\"\"\n if isinstance(raw_file, string_types):\n # We must read in the raw data\n raw = _read_raw(raw_file, electrode=electrode, hsp=hsp, hpi=hpi,\n config=config, verbose=verbose)\n _, ext = _parse_ext(raw_file, verbose=verbose)\n raw_fname = raw_file\n elif isinstance(raw_file, BaseRaw):\n # We got a raw mne object, get back the filename if possible\n # Assume that if no filename attr exists, it's a fif file.\n raw = raw_file.copy()\n if hasattr(raw, 'filenames'):\n _, ext = _parse_ext(raw.filenames[0], verbose=verbose)\n raw_fname = raw.filenames[0]\n else:\n # FIXME: How to get the filename if no filenames attribute?\n raw_fname = 'unknown_file_name'\n ext = '.fif'\n else:\n raise ValueError('raw_file must be an instance of str or BaseRaw, '\n 'got %s' % type(raw_file))\n data_path = make_bids_folders(subject=subject_id, session=session_id,\n kind=kind, root=output_path,\n overwrite=overwrite,\n verbose=verbose)\n if session_id is None:\n ses_path = data_path\n else:\n ses_path = make_bids_folders(subject=subject_id, session=session_id,\n root=output_path,\n overwrite=False,\n verbose=verbose)\n\n # create filenames\n scans_fname = make_bids_filename(\n subject=subject_id, session=session_id, suffix='scans.tsv',\n prefix=ses_path)\n\n coordsystem_fname = make_bids_filename(\n subject=subject_id, session=session_id,\n suffix='coordsystem.json', prefix=data_path)\n data_meta_fname = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s.json' % kind, prefix=data_path)\n if ext in ['.fif', '.ds']:\n raw_file_bids = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s%s' % (kind, ext))\n else:\n raw_folder = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s' % kind)\n raw_file_bids = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='%s%s' % (kind, ext), prefix=raw_folder)\n events_tsv_fname = make_bids_filename(\n subject=subject_id, session=session_id, task=task,\n run=run, suffix='events.tsv', prefix=data_path)\n channels_fname = make_bids_filename(\n subject=subject_id, session=session_id, task=task, run=run,\n suffix='channels.tsv', prefix=data_path)\n\n # Read in Raw object and extract metadata from Raw object if needed\n if kind == 'meg':\n orient = orientation[ext]\n unit = units[ext]\n manufacturer = manufacturers[ext]\n else:\n orient = 'n/a'\n unit = 'n/a'\n manufacturer = 'n/a'\n\n # save stuff\n if kind == 'meg':\n _scans_tsv(raw, os.path.join(kind, raw_file_bids), scans_fname,\n verbose)\n _coordsystem_json(raw, unit, orient, manufacturer, coordsystem_fname,\n verbose)\n\n make_dataset_description(output_path, name=\" \",\n verbose=verbose)\n _sidecar_json(raw, task, manufacturer, data_meta_fname, kind,\n verbose)\n _channels_tsv(raw, channels_fname, verbose)\n\n events = _read_events(events_data, raw)\n if len(events) > 0:\n _events_tsv(events, raw, events_tsv_fname, event_id, verbose)\n\n # set the raw file name to now be the absolute path to ensure the files\n # are placed in the right location\n raw_file_bids = os.path.join(data_path, raw_file_bids)\n if os.path.exists(raw_file_bids) and not overwrite:\n raise ValueError('\"%s\" already exists. Please set'\n ' overwrite to True.' % raw_file_bids)\n _mkdir_p(os.path.dirname(raw_file_bids))\n\n if verbose:\n print('Writing data files to %s' % raw_file_bids)\n\n if ext not in ALLOWED_EXTENSIONS:\n raise ValueError('ext must be in %s, got %s'\n % (''.join(ALLOWED_EXTENSIONS), ext))\n\n # for FIF, we need to re-save the file to fix the file pointer\n # for files with multiple parts\n if ext in ['.fif']:\n raw.save(raw_file_bids, overwrite=overwrite)\n elif ext == '.ds':\n sh.copytree(raw_fname, raw_file_bids)\n else:\n sh.copyfile(raw_fname, raw_file_bids)\n\n return output_path\n", "path": "mne_bids/mne_bids.py"}]}
gh_patches_debug_32737
rasdani/github-patches
git_diff
dask__dask-586
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Additional improvement for da.image.imread I think following 2 improvements make the function better. #### 1. Allow custom `imread` function. Some prefer `opencv` which reads color in BGR order, otherwise `skimage` reads in RGB order. Adding `dialect` option (like `dialect='skimage'` or `dialect='cv'`) or accept different file read function may better. #### 2. Allow custom preprocessing function. In image processing, input images may have different sizes. In these case, preprocessing is needed between image read and array creation. This preprocessing function must return the same size of array on user's responsibility. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dask/array/image.py` Content: ``` 1 from glob import glob 2 import os 3 4 try: 5 from skimage.io import imread as sk_imread 6 except ImportError: 7 pass 8 9 from .core import Array 10 from ..base import tokenize 11 12 def add_leading_dimension(x): 13 return x[None, ...] 14 15 16 def imread(filename): 17 """ Read a stack of images into a dask array 18 19 Parameters 20 ---------- 21 22 filename: string 23 A globstring like 'myfile.*.png' 24 25 Example 26 ------- 27 28 >>> from dask.array.image import imread 29 >>> im = imread('2015-*-*.png') # doctest: +SKIP 30 >>> im.shape # doctest: +SKIP 31 (365, 1000, 1000, 3) 32 33 Returns 34 ------- 35 36 Dask array of all images stacked along the first dimension. All images 37 will be treated as individual chunks 38 """ 39 filenames = sorted(glob(filename)) 40 if not filenames: 41 raise ValueError("No files found under name %s" % filename) 42 43 name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames)) 44 45 sample = sk_imread(filenames[0]) 46 47 dsk = dict(((name, i) + (0,) * len(sample.shape), 48 (add_leading_dimension, (sk_imread, filename))) 49 for i, filename in enumerate(filenames)) 50 51 chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape) 52 53 return Array(dsk, name, chunks, sample.dtype) 54 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dask/array/image.py b/dask/array/image.py --- a/dask/array/image.py +++ b/dask/array/image.py @@ -13,7 +13,7 @@ return x[None, ...] -def imread(filename): +def imread(filename, imread=None, preprocess=None): """ Read a stack of images into a dask array Parameters @@ -21,6 +21,13 @@ filename: string A globstring like 'myfile.*.png' + imread: function (optional) + Optionally provide custom imread function. + Function should expect a filename and produce a numpy array. + Defaults to ``skimage.io.imread``. + preprocess: function (optional) + Optionally provide custom function to preprocess the image. + Function should expect a numpy array for a single image. Example ------- @@ -36,17 +43,25 @@ Dask array of all images stacked along the first dimension. All images will be treated as individual chunks """ + imread = imread or sk_imread filenames = sorted(glob(filename)) if not filenames: raise ValueError("No files found under name %s" % filename) name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames)) - sample = sk_imread(filenames[0]) - - dsk = dict(((name, i) + (0,) * len(sample.shape), - (add_leading_dimension, (sk_imread, filename))) - for i, filename in enumerate(filenames)) + sample = imread(filenames[0]) + if preprocess: + sample = preprocess(sample) + + keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))] + if preprocess: + values = [(add_leading_dimension, (preprocess, (imread, filename))) + for filename in filenames] + else: + values = [(add_leading_dimension, (imread, filename)) + for filename in filenames] + dsk = dict(zip(keys, values)) chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)
{"golden_diff": "diff --git a/dask/array/image.py b/dask/array/image.py\n--- a/dask/array/image.py\n+++ b/dask/array/image.py\n@@ -13,7 +13,7 @@\n return x[None, ...]\n \n \n-def imread(filename):\n+def imread(filename, imread=None, preprocess=None):\n \"\"\" Read a stack of images into a dask array\n \n Parameters\n@@ -21,6 +21,13 @@\n \n filename: string\n A globstring like 'myfile.*.png'\n+ imread: function (optional)\n+ Optionally provide custom imread function.\n+ Function should expect a filename and produce a numpy array.\n+ Defaults to ``skimage.io.imread``.\n+ preprocess: function (optional)\n+ Optionally provide custom function to preprocess the image.\n+ Function should expect a numpy array for a single image.\n \n Example\n -------\n@@ -36,17 +43,25 @@\n Dask array of all images stacked along the first dimension. All images\n will be treated as individual chunks\n \"\"\"\n+ imread = imread or sk_imread\n filenames = sorted(glob(filename))\n if not filenames:\n raise ValueError(\"No files found under name %s\" % filename)\n \n name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames))\n \n- sample = sk_imread(filenames[0])\n-\n- dsk = dict(((name, i) + (0,) * len(sample.shape),\n- (add_leading_dimension, (sk_imread, filename)))\n- for i, filename in enumerate(filenames))\n+ sample = imread(filenames[0])\n+ if preprocess:\n+ sample = preprocess(sample)\n+\n+ keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))]\n+ if preprocess:\n+ values = [(add_leading_dimension, (preprocess, (imread, filename)))\n+ for filename in filenames]\n+ else:\n+ values = [(add_leading_dimension, (imread, filename))\n+ for filename in filenames]\n+ dsk = dict(zip(keys, values))\n \n chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)\n", "issue": "Additional improvement for da.image.imread\nI think following 2 improvements make the function better.\n#### 1. Allow custom `imread` function.\n\nSome prefer `opencv` which reads color in BGR order, otherwise `skimage` reads in RGB order. Adding `dialect` option (like `dialect='skimage'` or `dialect='cv'`) or accept different file read function may better.\n#### 2. Allow custom preprocessing function.\n\nIn image processing, input images may have different sizes. In these case, preprocessing is needed between image read and array creation. This preprocessing function must return the same size of array on user's responsibility.\n\n", "before_files": [{"content": "from glob import glob\nimport os\n\ntry:\n from skimage.io import imread as sk_imread\nexcept ImportError:\n pass\n\nfrom .core import Array\nfrom ..base import tokenize\n\ndef add_leading_dimension(x):\n return x[None, ...]\n\n\ndef imread(filename):\n \"\"\" Read a stack of images into a dask array\n\n Parameters\n ----------\n\n filename: string\n A globstring like 'myfile.*.png'\n\n Example\n -------\n\n >>> from dask.array.image import imread\n >>> im = imread('2015-*-*.png') # doctest: +SKIP\n >>> im.shape # doctest: +SKIP\n (365, 1000, 1000, 3)\n\n Returns\n -------\n\n Dask array of all images stacked along the first dimension. All images\n will be treated as individual chunks\n \"\"\"\n filenames = sorted(glob(filename))\n if not filenames:\n raise ValueError(\"No files found under name %s\" % filename)\n\n name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames))\n\n sample = sk_imread(filenames[0])\n\n dsk = dict(((name, i) + (0,) * len(sample.shape),\n (add_leading_dimension, (sk_imread, filename)))\n for i, filename in enumerate(filenames))\n\n chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)\n\n return Array(dsk, name, chunks, sample.dtype)\n", "path": "dask/array/image.py"}], "after_files": [{"content": "from glob import glob\nimport os\n\ntry:\n from skimage.io import imread as sk_imread\nexcept ImportError:\n pass\n\nfrom .core import Array\nfrom ..base import tokenize\n\ndef add_leading_dimension(x):\n return x[None, ...]\n\n\ndef imread(filename, imread=None, preprocess=None):\n \"\"\" Read a stack of images into a dask array\n\n Parameters\n ----------\n\n filename: string\n A globstring like 'myfile.*.png'\n imread: function (optional)\n Optionally provide custom imread function.\n Function should expect a filename and produce a numpy array.\n Defaults to ``skimage.io.imread``.\n preprocess: function (optional)\n Optionally provide custom function to preprocess the image.\n Function should expect a numpy array for a single image.\n\n Example\n -------\n\n >>> from dask.array.image import imread\n >>> im = imread('2015-*-*.png') # doctest: +SKIP\n >>> im.shape # doctest: +SKIP\n (365, 1000, 1000, 3)\n\n Returns\n -------\n\n Dask array of all images stacked along the first dimension. All images\n will be treated as individual chunks\n \"\"\"\n imread = imread or sk_imread\n filenames = sorted(glob(filename))\n if not filenames:\n raise ValueError(\"No files found under name %s\" % filename)\n\n name = 'imread-%s' % tokenize(filenames, map(os.path.getmtime, filenames))\n\n sample = imread(filenames[0])\n if preprocess:\n sample = preprocess(sample)\n\n keys = [(name, i) + (0,) * len(sample.shape) for i in range(len(filenames))]\n if preprocess:\n values = [(add_leading_dimension, (preprocess, (imread, filename)))\n for filename in filenames]\n else:\n values = [(add_leading_dimension, (imread, filename))\n for filename in filenames]\n dsk = dict(zip(keys, values))\n\n chunks = ((1,) * len(filenames),) + tuple((d,) for d in sample.shape)\n\n return Array(dsk, name, chunks, sample.dtype)\n", "path": "dask/array/image.py"}]}
gh_patches_debug_19722
rasdani/github-patches
git_diff
iterative__dvc-7283
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 'None' does not contain DVC directory ```console cd "$(mktemp -d)" dvc add foo # or any other command ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dvc/repo/__init__.py` Content: ``` 1 import logging 2 import os 3 from collections import defaultdict 4 from contextlib import contextmanager 5 from functools import wraps 6 from typing import TYPE_CHECKING, Callable, Optional, Set 7 8 from funcy import cached_property 9 10 from dvc.exceptions import FileMissingError 11 from dvc.exceptions import IsADirectoryError as DvcIsADirectoryError 12 from dvc.exceptions import NotDvcRepoError, OutputNotFoundError 13 from dvc.ignore import DvcIgnoreFilter 14 from dvc.utils import env2bool 15 from dvc.utils.fs import path_isin 16 17 if TYPE_CHECKING: 18 from dvc.fs.base import FileSystem 19 from dvc.objects.file import HashFile 20 from dvc.repo.scm_context import SCMContext 21 22 logger = logging.getLogger(__name__) 23 24 25 @contextmanager 26 def lock_repo(repo: "Repo"): 27 # pylint: disable=protected-access 28 depth = repo._lock_depth 29 repo._lock_depth += 1 30 31 try: 32 if depth > 0: 33 yield 34 else: 35 with repo.lock: 36 repo._reset() 37 yield 38 # Graph cache is no longer valid after we release the repo.lock 39 repo._reset() 40 finally: 41 repo._lock_depth = depth 42 43 44 def locked(f): 45 @wraps(f) 46 def wrapper(repo, *args, **kwargs): 47 with lock_repo(repo): 48 return f(repo, *args, **kwargs) 49 50 return wrapper 51 52 53 class Repo: 54 DVC_DIR = ".dvc" 55 56 from dvc.repo.add import add 57 from dvc.repo.checkout import checkout 58 from dvc.repo.commit import commit 59 from dvc.repo.destroy import destroy 60 from dvc.repo.diff import diff 61 from dvc.repo.fetch import fetch 62 from dvc.repo.freeze import freeze, unfreeze 63 from dvc.repo.gc import gc 64 from dvc.repo.get import get as _get 65 from dvc.repo.get_url import get_url as _get_url 66 from dvc.repo.imp import imp 67 from dvc.repo.imp_url import imp_url 68 from dvc.repo.install import install 69 from dvc.repo.ls import ls as _ls 70 from dvc.repo.move import move 71 from dvc.repo.pull import pull 72 from dvc.repo.push import push 73 from dvc.repo.remove import remove 74 from dvc.repo.reproduce import reproduce 75 from dvc.repo.run import run 76 from dvc.repo.status import status 77 from dvc.repo.update import update 78 79 ls = staticmethod(_ls) 80 get = staticmethod(_get) 81 get_url = staticmethod(_get_url) 82 83 def _get_repo_dirs( 84 self, 85 root_dir: str = None, 86 fs: "FileSystem" = None, 87 uninitialized: bool = False, 88 ): 89 from dvc.scm import SCM, Base, SCMError 90 from dvc.utils.fs import makedirs 91 92 dvc_dir = None 93 tmp_dir = None 94 try: 95 root_dir = self.find_root(root_dir, fs) 96 dvc_dir = os.path.join(root_dir, self.DVC_DIR) 97 tmp_dir = os.path.join(dvc_dir, "tmp") 98 makedirs(tmp_dir, exist_ok=True) 99 except NotDvcRepoError: 100 if not uninitialized: 101 raise 102 103 try: 104 scm = SCM(root_dir or os.curdir) 105 except SCMError: 106 scm = SCM(os.curdir, no_scm=True) 107 108 assert isinstance(scm, Base) 109 root_dir = scm.root_dir 110 111 return root_dir, dvc_dir, tmp_dir 112 113 def _get_database_dir(self, db_name): 114 # NOTE: by default, store SQLite-based remote indexes and state's 115 # `links` and `md5s` caches in the repository itself to avoid any 116 # possible state corruption in 'shared cache dir' scenario, but allow 117 # user to override this through config when, say, the repository is 118 # located on a mounted volume — see 119 # https://github.com/iterative/dvc/issues/4420 120 base_db_dir = self.config.get(db_name, {}).get("dir", None) 121 if not base_db_dir: 122 return self.tmp_dir 123 124 import hashlib 125 126 from dvc.utils.fs import makedirs 127 128 root_dir_hash = hashlib.sha224( 129 self.root_dir.encode("utf-8") 130 ).hexdigest() 131 132 db_dir = os.path.join( 133 base_db_dir, 134 self.DVC_DIR, 135 f"{os.path.basename(self.root_dir)}-{root_dir_hash[0:7]}", 136 ) 137 138 makedirs(db_dir, exist_ok=True) 139 return db_dir 140 141 def __init__( 142 self, 143 root_dir=None, 144 fs=None, 145 rev=None, 146 subrepos=False, 147 uninitialized=False, 148 config=None, 149 url=None, 150 repo_factory=None, 151 ): 152 from dvc.config import Config 153 from dvc.data.db import ODBManager 154 from dvc.data_cloud import DataCloud 155 from dvc.fs.git import GitFileSystem 156 from dvc.fs.local import localfs 157 from dvc.lock import LockNoop, make_lock 158 from dvc.repo.live import Live 159 from dvc.repo.metrics import Metrics 160 from dvc.repo.params import Params 161 from dvc.repo.plots import Plots 162 from dvc.repo.stage import StageLoad 163 from dvc.scm import SCM 164 from dvc.stage.cache import StageCache 165 from dvc.state import State, StateNoop 166 167 self.url = url 168 self._fs_conf = {"repo_factory": repo_factory} 169 self._fs = fs or localfs 170 self._scm = None 171 172 if rev and not fs: 173 self._scm = SCM(root_dir or os.curdir) 174 self._fs = GitFileSystem(scm=self._scm, rev=rev) 175 176 self.root_dir, self.dvc_dir, self.tmp_dir = self._get_repo_dirs( 177 root_dir=root_dir, fs=self.fs, uninitialized=uninitialized 178 ) 179 180 self.config = Config(self.dvc_dir, fs=self.fs, config=config) 181 self._uninitialized = uninitialized 182 183 # used by RepoFileSystem to determine if it should traverse subrepos 184 self.subrepos = subrepos 185 186 self.cloud = DataCloud(self) 187 self.stage = StageLoad(self) 188 189 if isinstance(self.fs, GitFileSystem) or not self.dvc_dir: 190 self.lock = LockNoop() 191 self.state = StateNoop() 192 self.odb = ODBManager(self) 193 else: 194 self.lock = make_lock( 195 os.path.join(self.tmp_dir, "lock"), 196 tmp_dir=self.tmp_dir, 197 hardlink_lock=self.config["core"].get("hardlink_lock", False), 198 friendly=True, 199 ) 200 201 state_db_dir = self._get_database_dir("state") 202 self.state = State(self.root_dir, state_db_dir, self.dvcignore) 203 self.odb = ODBManager(self) 204 205 self.stage_cache = StageCache(self) 206 207 self._ignore() 208 209 self.metrics = Metrics(self) 210 self.plots = Plots(self) 211 self.params = Params(self) 212 self.live = Live(self) 213 214 self.stage_collection_error_handler: Optional[ 215 Callable[[str, Exception], None] 216 ] = None 217 self._lock_depth = 0 218 219 def __str__(self): 220 return self.url or self.root_dir 221 222 @cached_property 223 def index(self): 224 from dvc.repo.index import Index 225 226 return Index(self) 227 228 @staticmethod 229 def open(url, *args, **kwargs): 230 if url is None: 231 url = os.getcwd() 232 233 if os.path.exists(url): 234 try: 235 return Repo(url, *args, **kwargs) 236 except NotDvcRepoError: 237 pass # fallthrough to external_repo 238 239 from dvc.external_repo import external_repo 240 241 return external_repo(url, *args, **kwargs) 242 243 @cached_property 244 def scm(self): 245 from dvc.scm import SCM, SCMError 246 247 if self._scm: 248 return self._scm 249 250 no_scm = self.config["core"].get("no_scm", False) 251 try: 252 return SCM(self.root_dir, no_scm=no_scm) 253 except SCMError: 254 if self._uninitialized: 255 # might not be a git/dvc repo at all 256 # used in `params/metrics/plots/live` targets 257 return SCM(self.root_dir, no_scm=True) 258 raise 259 260 @cached_property 261 def scm_context(self) -> "SCMContext": 262 from dvc.repo.scm_context import SCMContext 263 264 return SCMContext(self.scm, self.config) 265 266 @cached_property 267 def dvcignore(self) -> DvcIgnoreFilter: 268 269 return DvcIgnoreFilter(self.fs, self.root_dir) 270 271 def get_rev(self): 272 from dvc.fs.local import LocalFileSystem 273 274 assert self.scm 275 if isinstance(self.fs, LocalFileSystem): 276 from dvc.scm import map_scm_exception 277 278 with map_scm_exception(): 279 return self.scm.get_rev() 280 return self.fs.rev 281 282 @cached_property 283 def experiments(self): 284 from dvc.repo.experiments import Experiments 285 286 return Experiments(self) 287 288 @cached_property 289 def machine(self): 290 from dvc.machine import MachineManager 291 292 if self.tmp_dir and ( 293 self.config["feature"].get("machine", False) 294 or env2bool("DVC_TEST") 295 ): 296 return MachineManager(self) 297 return None 298 299 @property 300 def fs(self) -> "FileSystem": 301 return self._fs 302 303 @fs.setter 304 def fs(self, fs: "FileSystem"): 305 self._fs = fs 306 # Our graph cache is no longer valid, as it was based on the previous 307 # fs. 308 self._reset() 309 310 def __repr__(self): 311 return f"{self.__class__.__name__}: '{self.root_dir}'" 312 313 @classmethod 314 def find_root(cls, root=None, fs=None) -> str: 315 from dvc.fs.local import LocalFileSystem, localfs 316 317 root_dir = os.path.realpath(root or os.curdir) 318 _fs = fs 319 fs = fs or localfs 320 321 if not fs.isdir(root_dir): 322 raise NotDvcRepoError(f"directory '{root}' does not exist") 323 324 while True: 325 dvc_dir = fs.path.join(root_dir, cls.DVC_DIR) 326 if fs.isdir(dvc_dir): 327 return root_dir 328 if isinstance(fs, LocalFileSystem) and os.path.ismount(root_dir): 329 break 330 parent = fs.path.parent(root_dir) 331 if parent == root_dir: 332 break 333 root_dir = parent 334 335 if _fs: 336 msg = f"'{root}' does not contain DVC directory" 337 else: 338 msg = ( 339 "you are not inside of a DVC repository " 340 f"(checked up to mount point '{root_dir}')" 341 ) 342 raise NotDvcRepoError(msg) 343 344 @classmethod 345 def find_dvc_dir(cls, root=None): 346 root_dir = cls.find_root(root) 347 return os.path.join(root_dir, cls.DVC_DIR) 348 349 @staticmethod 350 def init(root_dir=os.curdir, no_scm=False, force=False, subdir=False): 351 from dvc.repo.init import init 352 353 return init( 354 root_dir=root_dir, no_scm=no_scm, force=force, subdir=subdir 355 ) 356 357 def unprotect(self, target): 358 return self.odb.local.unprotect(target) 359 360 def _ignore(self): 361 flist = [self.config.files["local"], self.tmp_dir] 362 363 if path_isin(self.odb.local.cache_dir, self.root_dir): 364 flist += [self.odb.local.cache_dir] 365 366 for file in flist: 367 self.scm_context.ignore(file) 368 369 def brancher(self, *args, **kwargs): 370 from dvc.repo.brancher import brancher 371 372 return brancher(self, *args, **kwargs) 373 374 def used_objs( 375 self, 376 targets=None, 377 all_branches=False, 378 with_deps=False, 379 all_tags=False, 380 all_commits=False, 381 all_experiments=False, 382 remote=None, 383 force=False, 384 jobs=None, 385 recursive=False, 386 used_run_cache=None, 387 revs=None, 388 ): 389 """Get the stages related to the given target and collect 390 the `info` of its outputs. 391 392 This is useful to know what files from the cache are _in use_ 393 (namely, a file described as an output on a stage). 394 395 The scope is, by default, the working directory, but you can use 396 `all_branches`/`all_tags`/`all_commits`/`all_experiments` to expand 397 the scope. 398 399 Returns: 400 A dict mapping (remote) ODB instances to sets of objects that 401 belong to each ODB. If the ODB instance is None, the objects 402 are naive and do not belong to a specific remote ODB. 403 """ 404 used = defaultdict(set) 405 406 def _add_suffix(objs: Set["HashFile"], suffix: str) -> None: 407 from itertools import chain 408 409 from dvc.data import iterobjs 410 411 for obj in chain.from_iterable(map(iterobjs, objs)): 412 if obj.name is not None: 413 obj.name += suffix 414 415 for branch in self.brancher( 416 revs=revs, 417 all_branches=all_branches, 418 all_tags=all_tags, 419 all_commits=all_commits, 420 all_experiments=all_experiments, 421 ): 422 for odb, objs in self.index.used_objs( 423 targets, 424 remote=remote, 425 force=force, 426 jobs=jobs, 427 recursive=recursive, 428 with_deps=with_deps, 429 ).items(): 430 if branch: 431 _add_suffix(objs, f" ({branch})") 432 used[odb].update(objs) 433 434 if used_run_cache: 435 for odb, objs in self.stage_cache.get_used_objs( 436 used_run_cache, remote=remote, force=force, jobs=jobs 437 ).items(): 438 used[odb].update(objs) 439 440 return used 441 442 @property 443 def stages(self): # obsolete, only for backward-compatibility 444 return self.index.stages 445 446 def find_outs_by_path(self, path, outs=None, recursive=False, strict=True): 447 # using `outs_graph` to ensure graph checks are run 448 outs = outs or self.index.outs_graph 449 450 abs_path = os.path.abspath(path) 451 fs_path = abs_path 452 453 def func(out): 454 def eq(one, two): 455 return one == two 456 457 match = eq if strict else out.fs.path.isin_or_eq 458 459 if out.scheme == "local" and match(fs_path, out.fs_path): 460 return True 461 462 if recursive and out.fs.path.isin(out.fs_path, fs_path): 463 return True 464 465 return False 466 467 matched = list(filter(func, outs)) 468 if not matched: 469 raise OutputNotFoundError(path, self) 470 471 return matched 472 473 def is_dvc_internal(self, path): 474 path_parts = os.path.normpath(path).split(os.path.sep) 475 return self.DVC_DIR in path_parts 476 477 @cached_property 478 def dvcfs(self): 479 from dvc.fs.dvc import DvcFileSystem 480 481 return DvcFileSystem(repo=self) 482 483 @cached_property 484 def repo_fs(self): 485 from dvc.fs.repo import RepoFileSystem 486 487 return RepoFileSystem(self, subrepos=self.subrepos, **self._fs_conf) 488 489 @cached_property 490 def index_db_dir(self): 491 return self._get_database_dir("index") 492 493 @contextmanager 494 def open_by_relpath(self, path, remote=None, mode="r", encoding=None): 495 """Opens a specified resource as a file descriptor""" 496 from dvc.fs.repo import RepoFileSystem 497 498 fs = RepoFileSystem(self, subrepos=True) 499 path = self.fs.path.join(self.root_dir, path) 500 try: 501 with fs.open( 502 path, mode=mode, encoding=encoding, remote=remote 503 ) as fobj: 504 yield fobj 505 except FileNotFoundError as exc: 506 raise FileMissingError(path) from exc 507 except IsADirectoryError as exc: 508 raise DvcIsADirectoryError(f"'{path}' is a directory") from exc 509 510 def close(self): 511 self.scm.close() 512 self.state.close() 513 514 def _reset(self): 515 self.state.close() 516 self.scm._reset() # pylint: disable=protected-access 517 self.__dict__.pop("index", None) 518 self.__dict__.pop("dvcignore", None) 519 520 def __enter__(self): 521 return self 522 523 def __exit__(self, exc_type, exc_val, exc_tb): 524 self._reset() 525 self.scm.close() 526 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dvc/repo/__init__.py b/dvc/repo/__init__.py --- a/dvc/repo/__init__.py +++ b/dvc/repo/__init__.py @@ -314,8 +314,8 @@ def find_root(cls, root=None, fs=None) -> str: from dvc.fs.local import LocalFileSystem, localfs - root_dir = os.path.realpath(root or os.curdir) - _fs = fs + root = root or os.curdir + root_dir = os.path.realpath(root) fs = fs or localfs if not fs.isdir(root_dir): @@ -332,13 +332,11 @@ break root_dir = parent - if _fs: - msg = f"'{root}' does not contain DVC directory" - else: - msg = ( - "you are not inside of a DVC repository " - f"(checked up to mount point '{root_dir}')" - ) + msg = "you are not inside of a DVC repository" + + if isinstance(fs, LocalFileSystem): + msg = f"{msg} (checked up to mount point '{root_dir}')" + raise NotDvcRepoError(msg) @classmethod
{"golden_diff": "diff --git a/dvc/repo/__init__.py b/dvc/repo/__init__.py\n--- a/dvc/repo/__init__.py\n+++ b/dvc/repo/__init__.py\n@@ -314,8 +314,8 @@\n def find_root(cls, root=None, fs=None) -> str:\n from dvc.fs.local import LocalFileSystem, localfs\n \n- root_dir = os.path.realpath(root or os.curdir)\n- _fs = fs\n+ root = root or os.curdir\n+ root_dir = os.path.realpath(root)\n fs = fs or localfs\n \n if not fs.isdir(root_dir):\n@@ -332,13 +332,11 @@\n break\n root_dir = parent\n \n- if _fs:\n- msg = f\"'{root}' does not contain DVC directory\"\n- else:\n- msg = (\n- \"you are not inside of a DVC repository \"\n- f\"(checked up to mount point '{root_dir}')\"\n- )\n+ msg = \"you are not inside of a DVC repository\"\n+\n+ if isinstance(fs, LocalFileSystem):\n+ msg = f\"{msg} (checked up to mount point '{root_dir}')\"\n+\n raise NotDvcRepoError(msg)\n \n @classmethod\n", "issue": "'None' does not contain DVC directory\n```console\r\ncd \"$(mktemp -d)\"\r\ndvc add foo # or any other command\r\n```\r\n\n", "before_files": [{"content": "import logging\nimport os\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom functools import wraps\nfrom typing import TYPE_CHECKING, Callable, Optional, Set\n\nfrom funcy import cached_property\n\nfrom dvc.exceptions import FileMissingError\nfrom dvc.exceptions import IsADirectoryError as DvcIsADirectoryError\nfrom dvc.exceptions import NotDvcRepoError, OutputNotFoundError\nfrom dvc.ignore import DvcIgnoreFilter\nfrom dvc.utils import env2bool\nfrom dvc.utils.fs import path_isin\n\nif TYPE_CHECKING:\n from dvc.fs.base import FileSystem\n from dvc.objects.file import HashFile\n from dvc.repo.scm_context import SCMContext\n\nlogger = logging.getLogger(__name__)\n\n\n@contextmanager\ndef lock_repo(repo: \"Repo\"):\n # pylint: disable=protected-access\n depth = repo._lock_depth\n repo._lock_depth += 1\n\n try:\n if depth > 0:\n yield\n else:\n with repo.lock:\n repo._reset()\n yield\n # Graph cache is no longer valid after we release the repo.lock\n repo._reset()\n finally:\n repo._lock_depth = depth\n\n\ndef locked(f):\n @wraps(f)\n def wrapper(repo, *args, **kwargs):\n with lock_repo(repo):\n return f(repo, *args, **kwargs)\n\n return wrapper\n\n\nclass Repo:\n DVC_DIR = \".dvc\"\n\n from dvc.repo.add import add\n from dvc.repo.checkout import checkout\n from dvc.repo.commit import commit\n from dvc.repo.destroy import destroy\n from dvc.repo.diff import diff\n from dvc.repo.fetch import fetch\n from dvc.repo.freeze import freeze, unfreeze\n from dvc.repo.gc import gc\n from dvc.repo.get import get as _get\n from dvc.repo.get_url import get_url as _get_url\n from dvc.repo.imp import imp\n from dvc.repo.imp_url import imp_url\n from dvc.repo.install import install\n from dvc.repo.ls import ls as _ls\n from dvc.repo.move import move\n from dvc.repo.pull import pull\n from dvc.repo.push import push\n from dvc.repo.remove import remove\n from dvc.repo.reproduce import reproduce\n from dvc.repo.run import run\n from dvc.repo.status import status\n from dvc.repo.update import update\n\n ls = staticmethod(_ls)\n get = staticmethod(_get)\n get_url = staticmethod(_get_url)\n\n def _get_repo_dirs(\n self,\n root_dir: str = None,\n fs: \"FileSystem\" = None,\n uninitialized: bool = False,\n ):\n from dvc.scm import SCM, Base, SCMError\n from dvc.utils.fs import makedirs\n\n dvc_dir = None\n tmp_dir = None\n try:\n root_dir = self.find_root(root_dir, fs)\n dvc_dir = os.path.join(root_dir, self.DVC_DIR)\n tmp_dir = os.path.join(dvc_dir, \"tmp\")\n makedirs(tmp_dir, exist_ok=True)\n except NotDvcRepoError:\n if not uninitialized:\n raise\n\n try:\n scm = SCM(root_dir or os.curdir)\n except SCMError:\n scm = SCM(os.curdir, no_scm=True)\n\n assert isinstance(scm, Base)\n root_dir = scm.root_dir\n\n return root_dir, dvc_dir, tmp_dir\n\n def _get_database_dir(self, db_name):\n # NOTE: by default, store SQLite-based remote indexes and state's\n # `links` and `md5s` caches in the repository itself to avoid any\n # possible state corruption in 'shared cache dir' scenario, but allow\n # user to override this through config when, say, the repository is\n # located on a mounted volume \u2014 see\n # https://github.com/iterative/dvc/issues/4420\n base_db_dir = self.config.get(db_name, {}).get(\"dir\", None)\n if not base_db_dir:\n return self.tmp_dir\n\n import hashlib\n\n from dvc.utils.fs import makedirs\n\n root_dir_hash = hashlib.sha224(\n self.root_dir.encode(\"utf-8\")\n ).hexdigest()\n\n db_dir = os.path.join(\n base_db_dir,\n self.DVC_DIR,\n f\"{os.path.basename(self.root_dir)}-{root_dir_hash[0:7]}\",\n )\n\n makedirs(db_dir, exist_ok=True)\n return db_dir\n\n def __init__(\n self,\n root_dir=None,\n fs=None,\n rev=None,\n subrepos=False,\n uninitialized=False,\n config=None,\n url=None,\n repo_factory=None,\n ):\n from dvc.config import Config\n from dvc.data.db import ODBManager\n from dvc.data_cloud import DataCloud\n from dvc.fs.git import GitFileSystem\n from dvc.fs.local import localfs\n from dvc.lock import LockNoop, make_lock\n from dvc.repo.live import Live\n from dvc.repo.metrics import Metrics\n from dvc.repo.params import Params\n from dvc.repo.plots import Plots\n from dvc.repo.stage import StageLoad\n from dvc.scm import SCM\n from dvc.stage.cache import StageCache\n from dvc.state import State, StateNoop\n\n self.url = url\n self._fs_conf = {\"repo_factory\": repo_factory}\n self._fs = fs or localfs\n self._scm = None\n\n if rev and not fs:\n self._scm = SCM(root_dir or os.curdir)\n self._fs = GitFileSystem(scm=self._scm, rev=rev)\n\n self.root_dir, self.dvc_dir, self.tmp_dir = self._get_repo_dirs(\n root_dir=root_dir, fs=self.fs, uninitialized=uninitialized\n )\n\n self.config = Config(self.dvc_dir, fs=self.fs, config=config)\n self._uninitialized = uninitialized\n\n # used by RepoFileSystem to determine if it should traverse subrepos\n self.subrepos = subrepos\n\n self.cloud = DataCloud(self)\n self.stage = StageLoad(self)\n\n if isinstance(self.fs, GitFileSystem) or not self.dvc_dir:\n self.lock = LockNoop()\n self.state = StateNoop()\n self.odb = ODBManager(self)\n else:\n self.lock = make_lock(\n os.path.join(self.tmp_dir, \"lock\"),\n tmp_dir=self.tmp_dir,\n hardlink_lock=self.config[\"core\"].get(\"hardlink_lock\", False),\n friendly=True,\n )\n\n state_db_dir = self._get_database_dir(\"state\")\n self.state = State(self.root_dir, state_db_dir, self.dvcignore)\n self.odb = ODBManager(self)\n\n self.stage_cache = StageCache(self)\n\n self._ignore()\n\n self.metrics = Metrics(self)\n self.plots = Plots(self)\n self.params = Params(self)\n self.live = Live(self)\n\n self.stage_collection_error_handler: Optional[\n Callable[[str, Exception], None]\n ] = None\n self._lock_depth = 0\n\n def __str__(self):\n return self.url or self.root_dir\n\n @cached_property\n def index(self):\n from dvc.repo.index import Index\n\n return Index(self)\n\n @staticmethod\n def open(url, *args, **kwargs):\n if url is None:\n url = os.getcwd()\n\n if os.path.exists(url):\n try:\n return Repo(url, *args, **kwargs)\n except NotDvcRepoError:\n pass # fallthrough to external_repo\n\n from dvc.external_repo import external_repo\n\n return external_repo(url, *args, **kwargs)\n\n @cached_property\n def scm(self):\n from dvc.scm import SCM, SCMError\n\n if self._scm:\n return self._scm\n\n no_scm = self.config[\"core\"].get(\"no_scm\", False)\n try:\n return SCM(self.root_dir, no_scm=no_scm)\n except SCMError:\n if self._uninitialized:\n # might not be a git/dvc repo at all\n # used in `params/metrics/plots/live` targets\n return SCM(self.root_dir, no_scm=True)\n raise\n\n @cached_property\n def scm_context(self) -> \"SCMContext\":\n from dvc.repo.scm_context import SCMContext\n\n return SCMContext(self.scm, self.config)\n\n @cached_property\n def dvcignore(self) -> DvcIgnoreFilter:\n\n return DvcIgnoreFilter(self.fs, self.root_dir)\n\n def get_rev(self):\n from dvc.fs.local import LocalFileSystem\n\n assert self.scm\n if isinstance(self.fs, LocalFileSystem):\n from dvc.scm import map_scm_exception\n\n with map_scm_exception():\n return self.scm.get_rev()\n return self.fs.rev\n\n @cached_property\n def experiments(self):\n from dvc.repo.experiments import Experiments\n\n return Experiments(self)\n\n @cached_property\n def machine(self):\n from dvc.machine import MachineManager\n\n if self.tmp_dir and (\n self.config[\"feature\"].get(\"machine\", False)\n or env2bool(\"DVC_TEST\")\n ):\n return MachineManager(self)\n return None\n\n @property\n def fs(self) -> \"FileSystem\":\n return self._fs\n\n @fs.setter\n def fs(self, fs: \"FileSystem\"):\n self._fs = fs\n # Our graph cache is no longer valid, as it was based on the previous\n # fs.\n self._reset()\n\n def __repr__(self):\n return f\"{self.__class__.__name__}: '{self.root_dir}'\"\n\n @classmethod\n def find_root(cls, root=None, fs=None) -> str:\n from dvc.fs.local import LocalFileSystem, localfs\n\n root_dir = os.path.realpath(root or os.curdir)\n _fs = fs\n fs = fs or localfs\n\n if not fs.isdir(root_dir):\n raise NotDvcRepoError(f\"directory '{root}' does not exist\")\n\n while True:\n dvc_dir = fs.path.join(root_dir, cls.DVC_DIR)\n if fs.isdir(dvc_dir):\n return root_dir\n if isinstance(fs, LocalFileSystem) and os.path.ismount(root_dir):\n break\n parent = fs.path.parent(root_dir)\n if parent == root_dir:\n break\n root_dir = parent\n\n if _fs:\n msg = f\"'{root}' does not contain DVC directory\"\n else:\n msg = (\n \"you are not inside of a DVC repository \"\n f\"(checked up to mount point '{root_dir}')\"\n )\n raise NotDvcRepoError(msg)\n\n @classmethod\n def find_dvc_dir(cls, root=None):\n root_dir = cls.find_root(root)\n return os.path.join(root_dir, cls.DVC_DIR)\n\n @staticmethod\n def init(root_dir=os.curdir, no_scm=False, force=False, subdir=False):\n from dvc.repo.init import init\n\n return init(\n root_dir=root_dir, no_scm=no_scm, force=force, subdir=subdir\n )\n\n def unprotect(self, target):\n return self.odb.local.unprotect(target)\n\n def _ignore(self):\n flist = [self.config.files[\"local\"], self.tmp_dir]\n\n if path_isin(self.odb.local.cache_dir, self.root_dir):\n flist += [self.odb.local.cache_dir]\n\n for file in flist:\n self.scm_context.ignore(file)\n\n def brancher(self, *args, **kwargs):\n from dvc.repo.brancher import brancher\n\n return brancher(self, *args, **kwargs)\n\n def used_objs(\n self,\n targets=None,\n all_branches=False,\n with_deps=False,\n all_tags=False,\n all_commits=False,\n all_experiments=False,\n remote=None,\n force=False,\n jobs=None,\n recursive=False,\n used_run_cache=None,\n revs=None,\n ):\n \"\"\"Get the stages related to the given target and collect\n the `info` of its outputs.\n\n This is useful to know what files from the cache are _in use_\n (namely, a file described as an output on a stage).\n\n The scope is, by default, the working directory, but you can use\n `all_branches`/`all_tags`/`all_commits`/`all_experiments` to expand\n the scope.\n\n Returns:\n A dict mapping (remote) ODB instances to sets of objects that\n belong to each ODB. If the ODB instance is None, the objects\n are naive and do not belong to a specific remote ODB.\n \"\"\"\n used = defaultdict(set)\n\n def _add_suffix(objs: Set[\"HashFile\"], suffix: str) -> None:\n from itertools import chain\n\n from dvc.data import iterobjs\n\n for obj in chain.from_iterable(map(iterobjs, objs)):\n if obj.name is not None:\n obj.name += suffix\n\n for branch in self.brancher(\n revs=revs,\n all_branches=all_branches,\n all_tags=all_tags,\n all_commits=all_commits,\n all_experiments=all_experiments,\n ):\n for odb, objs in self.index.used_objs(\n targets,\n remote=remote,\n force=force,\n jobs=jobs,\n recursive=recursive,\n with_deps=with_deps,\n ).items():\n if branch:\n _add_suffix(objs, f\" ({branch})\")\n used[odb].update(objs)\n\n if used_run_cache:\n for odb, objs in self.stage_cache.get_used_objs(\n used_run_cache, remote=remote, force=force, jobs=jobs\n ).items():\n used[odb].update(objs)\n\n return used\n\n @property\n def stages(self): # obsolete, only for backward-compatibility\n return self.index.stages\n\n def find_outs_by_path(self, path, outs=None, recursive=False, strict=True):\n # using `outs_graph` to ensure graph checks are run\n outs = outs or self.index.outs_graph\n\n abs_path = os.path.abspath(path)\n fs_path = abs_path\n\n def func(out):\n def eq(one, two):\n return one == two\n\n match = eq if strict else out.fs.path.isin_or_eq\n\n if out.scheme == \"local\" and match(fs_path, out.fs_path):\n return True\n\n if recursive and out.fs.path.isin(out.fs_path, fs_path):\n return True\n\n return False\n\n matched = list(filter(func, outs))\n if not matched:\n raise OutputNotFoundError(path, self)\n\n return matched\n\n def is_dvc_internal(self, path):\n path_parts = os.path.normpath(path).split(os.path.sep)\n return self.DVC_DIR in path_parts\n\n @cached_property\n def dvcfs(self):\n from dvc.fs.dvc import DvcFileSystem\n\n return DvcFileSystem(repo=self)\n\n @cached_property\n def repo_fs(self):\n from dvc.fs.repo import RepoFileSystem\n\n return RepoFileSystem(self, subrepos=self.subrepos, **self._fs_conf)\n\n @cached_property\n def index_db_dir(self):\n return self._get_database_dir(\"index\")\n\n @contextmanager\n def open_by_relpath(self, path, remote=None, mode=\"r\", encoding=None):\n \"\"\"Opens a specified resource as a file descriptor\"\"\"\n from dvc.fs.repo import RepoFileSystem\n\n fs = RepoFileSystem(self, subrepos=True)\n path = self.fs.path.join(self.root_dir, path)\n try:\n with fs.open(\n path, mode=mode, encoding=encoding, remote=remote\n ) as fobj:\n yield fobj\n except FileNotFoundError as exc:\n raise FileMissingError(path) from exc\n except IsADirectoryError as exc:\n raise DvcIsADirectoryError(f\"'{path}' is a directory\") from exc\n\n def close(self):\n self.scm.close()\n self.state.close()\n\n def _reset(self):\n self.state.close()\n self.scm._reset() # pylint: disable=protected-access\n self.__dict__.pop(\"index\", None)\n self.__dict__.pop(\"dvcignore\", None)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._reset()\n self.scm.close()\n", "path": "dvc/repo/__init__.py"}], "after_files": [{"content": "import logging\nimport os\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom functools import wraps\nfrom typing import TYPE_CHECKING, Callable, Optional, Set\n\nfrom funcy import cached_property\n\nfrom dvc.exceptions import FileMissingError\nfrom dvc.exceptions import IsADirectoryError as DvcIsADirectoryError\nfrom dvc.exceptions import NotDvcRepoError, OutputNotFoundError\nfrom dvc.ignore import DvcIgnoreFilter\nfrom dvc.utils import env2bool\nfrom dvc.utils.fs import path_isin\n\nif TYPE_CHECKING:\n from dvc.fs.base import FileSystem\n from dvc.objects.file import HashFile\n from dvc.repo.scm_context import SCMContext\n\nlogger = logging.getLogger(__name__)\n\n\n@contextmanager\ndef lock_repo(repo: \"Repo\"):\n # pylint: disable=protected-access\n depth = repo._lock_depth\n repo._lock_depth += 1\n\n try:\n if depth > 0:\n yield\n else:\n with repo.lock:\n repo._reset()\n yield\n # Graph cache is no longer valid after we release the repo.lock\n repo._reset()\n finally:\n repo._lock_depth = depth\n\n\ndef locked(f):\n @wraps(f)\n def wrapper(repo, *args, **kwargs):\n with lock_repo(repo):\n return f(repo, *args, **kwargs)\n\n return wrapper\n\n\nclass Repo:\n DVC_DIR = \".dvc\"\n\n from dvc.repo.add import add\n from dvc.repo.checkout import checkout\n from dvc.repo.commit import commit\n from dvc.repo.destroy import destroy\n from dvc.repo.diff import diff\n from dvc.repo.fetch import fetch\n from dvc.repo.freeze import freeze, unfreeze\n from dvc.repo.gc import gc\n from dvc.repo.get import get as _get\n from dvc.repo.get_url import get_url as _get_url\n from dvc.repo.imp import imp\n from dvc.repo.imp_url import imp_url\n from dvc.repo.install import install\n from dvc.repo.ls import ls as _ls\n from dvc.repo.move import move\n from dvc.repo.pull import pull\n from dvc.repo.push import push\n from dvc.repo.remove import remove\n from dvc.repo.reproduce import reproduce\n from dvc.repo.run import run\n from dvc.repo.status import status\n from dvc.repo.update import update\n\n ls = staticmethod(_ls)\n get = staticmethod(_get)\n get_url = staticmethod(_get_url)\n\n def _get_repo_dirs(\n self,\n root_dir: str = None,\n fs: \"FileSystem\" = None,\n uninitialized: bool = False,\n ):\n from dvc.scm import SCM, Base, SCMError\n from dvc.utils.fs import makedirs\n\n dvc_dir = None\n tmp_dir = None\n try:\n root_dir = self.find_root(root_dir, fs)\n dvc_dir = os.path.join(root_dir, self.DVC_DIR)\n tmp_dir = os.path.join(dvc_dir, \"tmp\")\n makedirs(tmp_dir, exist_ok=True)\n except NotDvcRepoError:\n if not uninitialized:\n raise\n\n try:\n scm = SCM(root_dir or os.curdir)\n except SCMError:\n scm = SCM(os.curdir, no_scm=True)\n\n assert isinstance(scm, Base)\n root_dir = scm.root_dir\n\n return root_dir, dvc_dir, tmp_dir\n\n def _get_database_dir(self, db_name):\n # NOTE: by default, store SQLite-based remote indexes and state's\n # `links` and `md5s` caches in the repository itself to avoid any\n # possible state corruption in 'shared cache dir' scenario, but allow\n # user to override this through config when, say, the repository is\n # located on a mounted volume \u2014 see\n # https://github.com/iterative/dvc/issues/4420\n base_db_dir = self.config.get(db_name, {}).get(\"dir\", None)\n if not base_db_dir:\n return self.tmp_dir\n\n import hashlib\n\n from dvc.utils.fs import makedirs\n\n root_dir_hash = hashlib.sha224(\n self.root_dir.encode(\"utf-8\")\n ).hexdigest()\n\n db_dir = os.path.join(\n base_db_dir,\n self.DVC_DIR,\n f\"{os.path.basename(self.root_dir)}-{root_dir_hash[0:7]}\",\n )\n\n makedirs(db_dir, exist_ok=True)\n return db_dir\n\n def __init__(\n self,\n root_dir=None,\n fs=None,\n rev=None,\n subrepos=False,\n uninitialized=False,\n config=None,\n url=None,\n repo_factory=None,\n ):\n from dvc.config import Config\n from dvc.data.db import ODBManager\n from dvc.data_cloud import DataCloud\n from dvc.fs.git import GitFileSystem\n from dvc.fs.local import localfs\n from dvc.lock import LockNoop, make_lock\n from dvc.repo.live import Live\n from dvc.repo.metrics import Metrics\n from dvc.repo.params import Params\n from dvc.repo.plots import Plots\n from dvc.repo.stage import StageLoad\n from dvc.scm import SCM\n from dvc.stage.cache import StageCache\n from dvc.state import State, StateNoop\n\n self.url = url\n self._fs_conf = {\"repo_factory\": repo_factory}\n self._fs = fs or localfs\n self._scm = None\n\n if rev and not fs:\n self._scm = SCM(root_dir or os.curdir)\n self._fs = GitFileSystem(scm=self._scm, rev=rev)\n\n self.root_dir, self.dvc_dir, self.tmp_dir = self._get_repo_dirs(\n root_dir=root_dir, fs=self.fs, uninitialized=uninitialized\n )\n\n self.config = Config(self.dvc_dir, fs=self.fs, config=config)\n self._uninitialized = uninitialized\n\n # used by RepoFileSystem to determine if it should traverse subrepos\n self.subrepos = subrepos\n\n self.cloud = DataCloud(self)\n self.stage = StageLoad(self)\n\n if isinstance(self.fs, GitFileSystem) or not self.dvc_dir:\n self.lock = LockNoop()\n self.state = StateNoop()\n self.odb = ODBManager(self)\n else:\n self.lock = make_lock(\n os.path.join(self.tmp_dir, \"lock\"),\n tmp_dir=self.tmp_dir,\n hardlink_lock=self.config[\"core\"].get(\"hardlink_lock\", False),\n friendly=True,\n )\n\n state_db_dir = self._get_database_dir(\"state\")\n self.state = State(self.root_dir, state_db_dir, self.dvcignore)\n self.odb = ODBManager(self)\n\n self.stage_cache = StageCache(self)\n\n self._ignore()\n\n self.metrics = Metrics(self)\n self.plots = Plots(self)\n self.params = Params(self)\n self.live = Live(self)\n\n self.stage_collection_error_handler: Optional[\n Callable[[str, Exception], None]\n ] = None\n self._lock_depth = 0\n\n def __str__(self):\n return self.url or self.root_dir\n\n @cached_property\n def index(self):\n from dvc.repo.index import Index\n\n return Index(self)\n\n @staticmethod\n def open(url, *args, **kwargs):\n if url is None:\n url = os.getcwd()\n\n if os.path.exists(url):\n try:\n return Repo(url, *args, **kwargs)\n except NotDvcRepoError:\n pass # fallthrough to external_repo\n\n from dvc.external_repo import external_repo\n\n return external_repo(url, *args, **kwargs)\n\n @cached_property\n def scm(self):\n from dvc.scm import SCM, SCMError\n\n if self._scm:\n return self._scm\n\n no_scm = self.config[\"core\"].get(\"no_scm\", False)\n try:\n return SCM(self.root_dir, no_scm=no_scm)\n except SCMError:\n if self._uninitialized:\n # might not be a git/dvc repo at all\n # used in `params/metrics/plots/live` targets\n return SCM(self.root_dir, no_scm=True)\n raise\n\n @cached_property\n def scm_context(self) -> \"SCMContext\":\n from dvc.repo.scm_context import SCMContext\n\n return SCMContext(self.scm, self.config)\n\n @cached_property\n def dvcignore(self) -> DvcIgnoreFilter:\n\n return DvcIgnoreFilter(self.fs, self.root_dir)\n\n def get_rev(self):\n from dvc.fs.local import LocalFileSystem\n\n assert self.scm\n if isinstance(self.fs, LocalFileSystem):\n from dvc.scm import map_scm_exception\n\n with map_scm_exception():\n return self.scm.get_rev()\n return self.fs.rev\n\n @cached_property\n def experiments(self):\n from dvc.repo.experiments import Experiments\n\n return Experiments(self)\n\n @cached_property\n def machine(self):\n from dvc.machine import MachineManager\n\n if self.tmp_dir and (\n self.config[\"feature\"].get(\"machine\", False)\n or env2bool(\"DVC_TEST\")\n ):\n return MachineManager(self)\n return None\n\n @property\n def fs(self) -> \"FileSystem\":\n return self._fs\n\n @fs.setter\n def fs(self, fs: \"FileSystem\"):\n self._fs = fs\n # Our graph cache is no longer valid, as it was based on the previous\n # fs.\n self._reset()\n\n def __repr__(self):\n return f\"{self.__class__.__name__}: '{self.root_dir}'\"\n\n @classmethod\n def find_root(cls, root=None, fs=None) -> str:\n from dvc.fs.local import LocalFileSystem, localfs\n\n root = root or os.curdir\n root_dir = os.path.realpath(root)\n fs = fs or localfs\n\n if not fs.isdir(root_dir):\n raise NotDvcRepoError(f\"directory '{root}' does not exist\")\n\n while True:\n dvc_dir = fs.path.join(root_dir, cls.DVC_DIR)\n if fs.isdir(dvc_dir):\n return root_dir\n if isinstance(fs, LocalFileSystem) and os.path.ismount(root_dir):\n break\n parent = fs.path.parent(root_dir)\n if parent == root_dir:\n break\n root_dir = parent\n\n msg = \"you are not inside of a DVC repository\"\n\n if isinstance(fs, LocalFileSystem):\n msg = f\"{msg} (checked up to mount point '{root_dir}')\"\n\n raise NotDvcRepoError(msg)\n\n @classmethod\n def find_dvc_dir(cls, root=None):\n root_dir = cls.find_root(root)\n return os.path.join(root_dir, cls.DVC_DIR)\n\n @staticmethod\n def init(root_dir=os.curdir, no_scm=False, force=False, subdir=False):\n from dvc.repo.init import init\n\n return init(\n root_dir=root_dir, no_scm=no_scm, force=force, subdir=subdir\n )\n\n def unprotect(self, target):\n return self.odb.local.unprotect(target)\n\n def _ignore(self):\n flist = [self.config.files[\"local\"], self.tmp_dir]\n\n if path_isin(self.odb.local.cache_dir, self.root_dir):\n flist += [self.odb.local.cache_dir]\n\n for file in flist:\n self.scm_context.ignore(file)\n\n def brancher(self, *args, **kwargs):\n from dvc.repo.brancher import brancher\n\n return brancher(self, *args, **kwargs)\n\n def used_objs(\n self,\n targets=None,\n all_branches=False,\n with_deps=False,\n all_tags=False,\n all_commits=False,\n all_experiments=False,\n remote=None,\n force=False,\n jobs=None,\n recursive=False,\n used_run_cache=None,\n revs=None,\n ):\n \"\"\"Get the stages related to the given target and collect\n the `info` of its outputs.\n\n This is useful to know what files from the cache are _in use_\n (namely, a file described as an output on a stage).\n\n The scope is, by default, the working directory, but you can use\n `all_branches`/`all_tags`/`all_commits`/`all_experiments` to expand\n the scope.\n\n Returns:\n A dict mapping (remote) ODB instances to sets of objects that\n belong to each ODB. If the ODB instance is None, the objects\n are naive and do not belong to a specific remote ODB.\n \"\"\"\n used = defaultdict(set)\n\n def _add_suffix(objs: Set[\"HashFile\"], suffix: str) -> None:\n from itertools import chain\n\n from dvc.data import iterobjs\n\n for obj in chain.from_iterable(map(iterobjs, objs)):\n if obj.name is not None:\n obj.name += suffix\n\n for branch in self.brancher(\n revs=revs,\n all_branches=all_branches,\n all_tags=all_tags,\n all_commits=all_commits,\n all_experiments=all_experiments,\n ):\n for odb, objs in self.index.used_objs(\n targets,\n remote=remote,\n force=force,\n jobs=jobs,\n recursive=recursive,\n with_deps=with_deps,\n ).items():\n if branch:\n _add_suffix(objs, f\" ({branch})\")\n used[odb].update(objs)\n\n if used_run_cache:\n for odb, objs in self.stage_cache.get_used_objs(\n used_run_cache, remote=remote, force=force, jobs=jobs\n ).items():\n used[odb].update(objs)\n\n return used\n\n @property\n def stages(self): # obsolete, only for backward-compatibility\n return self.index.stages\n\n def find_outs_by_path(self, path, outs=None, recursive=False, strict=True):\n # using `outs_graph` to ensure graph checks are run\n outs = outs or self.index.outs_graph\n\n abs_path = os.path.abspath(path)\n fs_path = abs_path\n\n def func(out):\n def eq(one, two):\n return one == two\n\n match = eq if strict else out.fs.path.isin_or_eq\n\n if out.scheme == \"local\" and match(fs_path, out.fs_path):\n return True\n\n if recursive and out.fs.path.isin(out.fs_path, fs_path):\n return True\n\n return False\n\n matched = list(filter(func, outs))\n if not matched:\n raise OutputNotFoundError(path, self)\n\n return matched\n\n def is_dvc_internal(self, path):\n path_parts = os.path.normpath(path).split(os.path.sep)\n return self.DVC_DIR in path_parts\n\n @cached_property\n def dvcfs(self):\n from dvc.fs.dvc import DvcFileSystem\n\n return DvcFileSystem(repo=self)\n\n @cached_property\n def repo_fs(self):\n from dvc.fs.repo import RepoFileSystem\n\n return RepoFileSystem(self, subrepos=self.subrepos, **self._fs_conf)\n\n @cached_property\n def index_db_dir(self):\n return self._get_database_dir(\"index\")\n\n @contextmanager\n def open_by_relpath(self, path, remote=None, mode=\"r\", encoding=None):\n \"\"\"Opens a specified resource as a file descriptor\"\"\"\n from dvc.fs.repo import RepoFileSystem\n\n fs = RepoFileSystem(self, subrepos=True)\n path = self.fs.path.join(self.root_dir, path)\n try:\n with fs.open(\n path, mode=mode, encoding=encoding, remote=remote\n ) as fobj:\n yield fobj\n except FileNotFoundError as exc:\n raise FileMissingError(path) from exc\n except IsADirectoryError as exc:\n raise DvcIsADirectoryError(f\"'{path}' is a directory\") from exc\n\n def close(self):\n self.scm.close()\n self.state.close()\n\n def _reset(self):\n self.state.close()\n self.scm._reset() # pylint: disable=protected-access\n self.__dict__.pop(\"index\", None)\n self.__dict__.pop(\"dvcignore\", None)\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self._reset()\n self.scm.close()\n", "path": "dvc/repo/__init__.py"}]}
gh_patches_debug_233
rasdani/github-patches
git_diff
learningequality__kolibri-6355
"We are currently solving the following issue within our repository. Here is the issue text:\n--- BE(...TRUNCATED)
"diff --git a/kolibri/core/content/serializers.py b/kolibri/core/content/serializers.py\n--- a/kolib(...TRUNCATED)
"{\"golden_diff\": \"diff --git a/kolibri/core/content/serializers.py b/kolibri/core/content/seriali(...TRUNCATED)
gh_patches_debug_65930
rasdani/github-patches
git_diff
iterative__dvc-2882
"We are currently solving the following issue within our repository. Here is the issue text:\n--- BE(...TRUNCATED)
"diff --git a/dvc/remote/base.py b/dvc/remote/base.py\n--- a/dvc/remote/base.py\n+++ b/dvc/remote/ba(...TRUNCATED)
"{\"golden_diff\": \"diff --git a/dvc/remote/base.py b/dvc/remote/base.py\\n--- a/dvc/remote/base.py(...TRUNCATED)
gh_patches_debug_60514
rasdani/github-patches
git_diff
kedro-org__kedro-1706
"We are currently solving the following issue within our repository. Here is the issue text:\n--- BE(...TRUNCATED)
"diff --git a/features/steps/cli_steps.py b/features/steps/cli_steps.py\n--- a/features/steps/cli_st(...TRUNCATED)
"{\"golden_diff\": \"diff --git a/features/steps/cli_steps.py b/features/steps/cli_steps.py\\n--- a/(...TRUNCATED)
gh_patches_debug_41327
rasdani/github-patches
git_diff
ibis-project__ibis-1988
"We are currently solving the following issue within our repository. Here is the issue text:\n--- BE(...TRUNCATED)
"diff --git a/ibis/pyspark/compiler.py b/ibis/pyspark/compiler.py\n--- a/ibis/pyspark/compiler.py\n+(...TRUNCATED)
"{\"golden_diff\": \"diff --git a/ibis/pyspark/compiler.py b/ibis/pyspark/compiler.py\\n--- a/ibis/p(...TRUNCATED)
gh_patches_debug_3876
rasdani/github-patches
git_diff
xorbitsai__inference-299
"We are currently solving the following issue within our repository. Here is the issue text:\n--- BE(...TRUNCATED)
"diff --git a/examples/gradio_chatinterface.py b/examples/gradio_chatinterface.py\n--- a/examples/gr(...TRUNCATED)
"{\"golden_diff\": \"diff --git a/examples/gradio_chatinterface.py b/examples/gradio_chatinterface.p(...TRUNCATED)
gh_patches_debug_57507
rasdani/github-patches
git_diff
scikit-image__scikit-image-7095
"We are currently solving the following issue within our repository. Here is the issue text:\n--- BE(...TRUNCATED)
"diff --git a/skimage/morphology/_skeletonize.py b/skimage/morphology/_skeletonize.py\n--- a/skimage(...TRUNCATED)
"{\"golden_diff\": \"diff --git a/skimage/morphology/_skeletonize.py b/skimage/morphology/_skeletoni(...TRUNCATED)
gh_patches_debug_29709
rasdani/github-patches
git_diff
opsdroid__opsdroid-1306
"We are currently solving the following issue within our repository. Here is the issue text:\n--- BE(...TRUNCATED)
"diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__init__.py\n--- a/ops(...TRUNCATED)
"{\"golden_diff\": \"diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__(...TRUNCATED)
End of preview. Expand in Data Studio
import re
import json
from datasets import load_dataset

PROMPT_TEMPLATE = """\
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
{issue}
--- END ISSUE ---

Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
{file_context}
--- END FILES ---

Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.

Here is an example:

\```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
 
 
 if __name__ == "__main__":
-    asyncio.run(run_async_server("."), debug=True)
+    asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
 
 
 if __name__ == "__main__":
-    server = run_sync_server(".")
+    server = run_sync_server()
     server.shutdown()

\```
"""

# ds = load_dataset("rasdani/github-patches-10k-sample-sorted", split="train")
ds = load_dataset("rasdani/github-patches-decontaminated", split="train")


def prepend_line_numbers(file_content: str) -> str:
    if not file_content:
        return ""
    lines = file_content.split('\n')
    lines = [f"{i+1} {line}" for i, line in enumerate(lines)]
    ret = '\n'.join(lines)
    ret = ret.strip() + "\n"
    return ret

def normalize_diff(diff_text: str) -> str:
    diff_text = re.sub(r'(?m)^index [^\n]*\n', '', diff_text)
    diff_text = re.sub(r'(?m)^(@@[^@]*@@).*', r'\1', diff_text)
    diff_text = diff_text.strip() + "\n"
    return diff_text

def filter_diff_by_files(diff: str, touched_files: set) -> str:
    """Filter a git diff to only include changes for specific files."""
    if not touched_files:
        return diff
    
    lines = diff.split('\n')
    filtered_lines = []
    include_section = False
    
    for line in lines:
        if line.startswith('diff --git'):
            # Check if this file should be included
            # Extract the file path from "diff --git a/path b/path"
            match = re.match(r'diff --git a/(.*?) b/', line)
            if match:
                file_path = match.group(1)
                include_section = file_path in touched_files
            else:
                include_section = False
        
        if include_section:
            filtered_lines.append(line)
    
    return '\n'.join(filtered_lines)

def create_golden_diff(example):
    before_paths = [b["path"] for b in example["before_files"]]
    after_paths = [a["path"] for a in example["after_files"]]
    touched_files = set(before_paths) | set(after_paths)
    filtered_diff = filter_diff_by_files(example["pr_diff"], touched_files)
    golden_diff = normalize_diff(filtered_diff)
    for path in touched_files:
        assert path in golden_diff, f"Path {path} not found in golden diff {golden_diff}"
    verification_info_dict = {
        "golden_diff": golden_diff,
        "issue": example["issue"],
        "before_files": example["before_files"],
        "after_files": example["after_files"],
    }
    verification_info = json.dumps(verification_info_dict)
    return {"golden_diff": golden_diff, "verification_info": verification_info}

def create_prompt(example):
    golden_diff = example["golden_diff"]
    issue = example["issue"]
    before_files = example["before_files"]
    file_context = [f"Path: `{x['path']}`\nContent:\n```\n{prepend_line_numbers(x['content'])}```" for x in before_files]
    file_context = "\n\n".join(file_context)
    prompt = PROMPT_TEMPLATE.format(issue=issue, file_context=file_context, golden_diff=golden_diff)
    # print(prompt)
    # print("="*100)
    return {"prompt": prompt}

    

ds_up = ds.map(lambda x, idx: {"problem_id": f"gh_patches_debug_{idx}"}, with_indices=True)
ds_up = ds_up.map(lambda x: {"source": "rasdani/github-patches", "task_type": "git_diff"})

ds_up = ds_up.map(create_golden_diff, num_proc=10)
# example = ds_up[0]
# create_prompt(example)
ds_up = ds_up.map(create_prompt, num_proc=10)

ds_up = ds_up.select_columns(["problem_id", "source", "task_type", "in_source_id", "prompt", "golden_diff", "verification_info"])

# ds_up.push_to_hub("rasdani/github-patches-debug")
# ds_up.push_to_hub("rasdani/github-patches-debug-genesys")
ds_up = ds_up.shuffle(seed=42)
ds_up.push_to_hub("rasdani/github-patches-genesys")
Downloads last month
202