monsoon-nlp commited on
Commit
0c7def8
·
verified ·
1 Parent(s): 9e2fc96

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +3 -2
  2. plant_genome_file_names.txt +2 -1
  3. wheat-bees.py +6 -3
README.md CHANGED
@@ -3,5 +3,6 @@ tags:
3
  - DNA
4
  ---
5
 
6
- Single-genome version of [InstaDeepAI/plant-multi-species-genomes](https://huggingface.co/datasets/InstaDeepAI/plant-multi-species-genomes) for a demo / debugging
7
- training run of [monsoon-nlp/dna-blockdiff](https://huggingface.co/monsoon-nlp/dna-blockdiff)
 
 
3
  - DNA
4
  ---
5
 
6
+ Mini mini version of [InstaDeepAI/plant-multi-species-genomes](https://huggingface.co/datasets/InstaDeepAI/plant-multi-species-genomes) with wheat as training set and papaya as validation set.
7
+
8
+ One model [monsoon-nlp/dna-blockdiff-papaya](https://huggingface.co/monsoon-nlp/dna-blockdiff-papaya) was trained only on the papaya genome as a proof of concept.
plant_genome_file_names.txt CHANGED
@@ -1 +1,2 @@
1
- GCF_000150535.2_Papaya1.0_genomic.fna.gz
 
 
1
+ GCF_018294505.1_IWGSC_CS_RefSeq_v2.1_genomic.fna.gz
2
+ GCF_000150535.2_Papaya1.0_genomic.fna.gz
wheat-bees.py CHANGED
@@ -25,7 +25,7 @@ _CITATION = """\
25
 
26
  # You can copy an official description
27
  _DESCRIPTION = """\
28
- Dataset made of diverse genomes available on NCBI and coming from 48 different species.
29
  Test and validation are made of 2 species each. The rest of the genomes are used for training.
30
  Default configuration "6kbp" yields chunks of 6.2kbp (100bp overlap on each side). The chunks of DNA are cleaned and processed so that
31
  they can only contain the letters A, T, C, G and N.
@@ -118,12 +118,15 @@ class PlantMultiSpeciesGenomes(datasets.GeneratorBasedBuilder):
118
  with open(filepaths_txt) as f:
119
  filepaths = [os.path.join("plant_genomes",filepath.rstrip()) for filepath in f]
120
 
121
- train_paths = filepaths
122
-
123
  train_downloaded_files = dl_manager.download_and_extract(train_paths)
124
 
 
 
 
125
  return [
126
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": train_downloaded_files, "chunk_length": self.config.chunk_length}),
 
127
  ]
128
 
129
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
 
25
 
26
  # You can copy an official description
27
  _DESCRIPTION = """\
28
+ Dataset made of diverse genomes available on NCBI and coming from 48 different species.
29
  Test and validation are made of 2 species each. The rest of the genomes are used for training.
30
  Default configuration "6kbp" yields chunks of 6.2kbp (100bp overlap on each side). The chunks of DNA are cleaned and processed so that
31
  they can only contain the letters A, T, C, G and N.
 
118
  with open(filepaths_txt) as f:
119
  filepaths = [os.path.join("plant_genomes",filepath.rstrip()) for filepath in f]
120
 
121
+ train_paths = [filepaths[0]]
 
122
  train_downloaded_files = dl_manager.download_and_extract(train_paths)
123
 
124
+ validation_paths = [filepaths[1]]
125
+ validation_downloaded_files = dl_manager.download_and_extract(validation_paths)
126
+
127
  return [
128
  datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": train_downloaded_files, "chunk_length": self.config.chunk_length}),
129
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"files": validation_downloaded_files, "chunk_length": self.config.chunk_length}),
130
  ]
131
 
132
  # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`