prompt
large_stringlengths
72
9.34k
completion
large_stringlengths
0
7.61k
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded <|fim_middle|> elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
return val
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key <|fim_middle|> elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
raise KeyError(key)
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache <|fim_middle|> def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
return self.loadItem(key)
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: <|fim_middle|> back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
try: val = self.loadItem(key) except Exception: continue
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": <|fim_middle|> <|fim▁end|>
import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def <|fim_middle|>(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
__init__
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def <|fim_middle|>(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
loadItem
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def <|fim_middle|>(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
getItemSize
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def <|fim_middle|>(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
checkLimit
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def <|fim_middle|>(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
addCachedKey
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def <|fim_middle|>(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
__getitem__
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def <|fim_middle|>(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
__setitem__
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def <|fim_middle|>(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
__delitem__
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def <|fim_middle|>(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
iteritems
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def <|fim_middle|>(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
items
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def <|fim_middle|>(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
values
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def <|fim_middle|>(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def execute(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
get
<|file_name|>ContentDbDict.py<|end_file_name|><|fim▁begin|>import time import os import ContentDb from Debug import Debug from Config import config class ContentDbDict(dict): def __init__(self, site, *args, **kwargs): s = time.time() self.site = site self.cached_keys = [] self.log = self.site.log self.db = ContentDb.getContentDb() self.db_id = self.db.needSite(site) self.num_loaded = 0 super(ContentDbDict, self).__init__(self.db.loadDbDict(site)) # Load keys from database self.log.debug("ContentDb init: %.3fs, found files: %s, sites: %s" % (time.time() - s, len(self), len(self.db.site_ids))) def loadItem(self, key): try: self.num_loaded += 1 if self.num_loaded % 100 == 0: if config.verbose: self.log.debug("Loaded json: %s (latest: %s) called by: %s" % (self.num_loaded, key, Debug.formatStack())) else: self.log.debug("Loaded json: %s (latest: %s)" % (self.num_loaded, key)) content = self.site.storage.loadJson(key) dict.__setitem__(self, key, content) except IOError: if dict.get(self, key): self.__delitem__(key) # File not exists anymore raise KeyError(key) self.addCachedKey(key) self.checkLimit() return content def getItemSize(self, key): return self.site.storage.getSize(key) # Only keep last 10 accessed json in memory def checkLimit(self): if len(self.cached_keys) > 10: key_deleted = self.cached_keys.pop(0) dict.__setitem__(self, key_deleted, False) def addCachedKey(self, key): if key not in self.cached_keys and key != "content.json" and len(key) > 40: # Always keep keys smaller than 40 char self.cached_keys.append(key) def __getitem__(self, key): val = dict.get(self, key) if val: # Already loaded return val elif val is None: # Unknown key raise KeyError(key) elif val is False: # Loaded before, but purged from cache return self.loadItem(key) def __setitem__(self, key, val): self.addCachedKey(key) self.checkLimit() size = self.getItemSize(key) self.db.setContent(self.site, key, val, size) dict.__setitem__(self, key, val) def __delitem__(self, key): self.db.deleteContent(self.site, key) dict.__delitem__(self, key) try: self.cached_keys.remove(key) except ValueError: pass def iteritems(self): for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue yield key, val def items(self): back = [] for key in dict.keys(self): try: val = self[key] except Exception as err: self.log.warning("Error loading %s: %s" % (key, err)) continue back.append((key, val)) return back def values(self): back = [] for key, val in dict.iteritems(self): if not val: try: val = self.loadItem(key) except Exception: continue back.append(val) return back def get(self, key, default=None): try: return self.__getitem__(key) except KeyError: return default except Exception as err: self.site.bad_files[key] = self.site.bad_files.get(key, 1) dict.__delitem__(self, key) self.log.warning("Error loading %s: %s" % (key, err)) return default def <|fim_middle|>(self, query, params={}): params["site_id"] = self.db_id return self.db.execute(query, params) if __name__ == "__main__": import psutil process = psutil.Process(os.getpid()) s_mem = process.memory_info()[0] / float(2 ** 20) root = "data-live/1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27" contents = ContentDbDict("1MaiL5gfBM1cyb4a8e3iiL8L5gXmoAJu27", root) print "Init len", len(contents) s = time.time() for dir_name in os.listdir(root + "/data/users/")[0:8000]: contents["data/users/%s/content.json" % dir_name] print "Load: %.3fs" % (time.time() - s) s = time.time() found = 0 for key, val in contents.iteritems(): found += 1 assert key assert val print "Found:", found print "Iteritem: %.3fs" % (time.time() - s) s = time.time() found = 0 for key in contents.keys(): found += 1 assert key in contents print "In: %.3fs" % (time.time() - s) print "Len:", len(contents.values()), len(contents.keys()) print "Mem: +", process.memory_info()[0] / float(2 ** 20) - s_mem <|fim▁end|>
execute
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import patterns, include, url urlpatterns = patterns('', url(r'^$', 'webinterface.view.dashboard.main'), url(r'^dashboard/$', 'webinterface.view.dashboard.main'),<|fim▁hole|> url(r'^settings/$', 'webinterface.view.settings.main'), url(r'^settings/ajax/$', 'webinterface.view.settings.ajax'), url(r'^orders/$', 'webinterface.view.orders.main'), url(r'^orders/ajax/$', 'webinterface.view.orders.ajax'), )<|fim▁end|>
url(r'^login/$', 'webinterface.view.login.main'), url(r'^login/ajax/$', 'webinterface.view.login.ajax'),
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1]<|fim▁hole|> a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main()<|fim▁end|>
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 <|fim_middle|> class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score())
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): <|fim_middle|> if __name__ == "__main__": main() <|fim▁end|>
""" This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2)
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): <|fim_middle|> def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int)
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): <|fim_middle|> def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names)
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): <|fim_middle|> def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
""" Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2)
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): <|fim_middle|> def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
""" Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s)
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): <|fim_middle|> if __name__ == "__main__": main() <|fim▁end|>
n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2)
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: <|fim_middle|> def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
self.table = np.zeros((self.n_classes, self.n_classes), dtype=int)
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: <|fim_middle|> elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1]))
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: <|fim_middle|> else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1]))
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: <|fim_middle|> return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
s[i, j] = s[j, i]
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": <|fim_middle|> <|fim▁end|>
main()
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def <|fim_middle|>(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
main
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def <|fim_middle|>(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
__init__
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def <|fim_middle|>(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
__add__
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def <|fim_middle|>(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
peirce_skill_score
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def <|fim_middle|>(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def heidke_skill_score(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
gerrity_score
<|file_name|>MulticlassContingencyTable.py<|end_file_name|><|fim▁begin|>import numpy as np __author__ = 'David John Gagne <[email protected]>' def main(): # Contingency Table from Wilks (2011) Table 8.3 table = np.array([[50, 91, 71], [47, 2364, 170], [54, 205, 3288]]) mct = MulticlassContingencyTable(table, n_classes=table.shape[0], class_names=np.arange(table.shape[0]).astype(str)) print(mct.peirce_skill_score()) print(mct.gerrity_score()) class MulticlassContingencyTable(object): """ This class is a container for a contingency table containing more than 2 classes. The contingency table is stored in table as a numpy array with the rows corresponding to forecast categories, and the columns corresponding to observation categories. """ def __init__(self, table=None, n_classes=2, class_names=("1", "0")): self.table = table self.n_classes = n_classes self.class_names = class_names if table is None: self.table = np.zeros((self.n_classes, self.n_classes), dtype=int) def __add__(self, other): assert self.n_classes == other.n_classes, "Number of classes does not match" return MulticlassContingencyTable(self.table + other.table, n_classes=self.n_classes, class_names=self.class_names) def peirce_skill_score(self): """ Multiclass Peirce Skill Score (also Hanssen and Kuipers score, True Skill Score) """ n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (no * no).sum() / n ** 2) def gerrity_score(self): """ Gerrity Score, which weights each cell in the contingency table by its observed relative frequency. :return: """ k = self.table.shape[0] n = float(self.table.sum()) p_o = self.table.sum(axis=0) / n p_sum = np.cumsum(p_o)[:-1] a = (1.0 - p_sum) / p_sum s = np.zeros(self.table.shape, dtype=float) for (i, j) in np.ndindex(*s.shape): if i == j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:j]) + np.sum(a[j:k - 1])) elif i < j: s[i, j] = 1.0 / (k - 1.0) * (np.sum(1.0 / a[0:i]) - (j - i) + np.sum(a[j:k - 1])) else: s[i, j] = s[j, i] return np.sum(self.table / float(self.table.sum()) * s) def <|fim_middle|>(self): n = float(self.table.sum()) nf = self.table.sum(axis=1) no = self.table.sum(axis=0) correct = float(self.table.trace()) return (correct / n - (nf * no).sum() / n ** 2) / (1 - (nf * no).sum() / n ** 2) if __name__ == "__main__": main() <|fim▁end|>
heidke_skill_score
<|file_name|>dummyPlatform.py<|end_file_name|><|fim▁begin|>from hwt.synthesizer.rtlLevel.extract_part_drivers import extract_part_drivers from hwt.synthesizer.rtlLevel.remove_unconnected_signals import removeUnconnectedSignals from hwt.synthesizer.rtlLevel.mark_visibility_of_signals_and_check_drivers import markVisibilityOfSignalsAndCheckDrivers class DummyPlatform(): """ :note: all processors has to be callable with only one parameter which is actual Unit/RtlNetlist instance """ def __init__(self): self.beforeToRtl = [] self.beforeToRtlImpl = [] self.afterToRtlImpl = []<|fim▁hole|> extract_part_drivers, removeUnconnectedSignals, markVisibilityOfSignalsAndCheckDrivers, ] self.afterToRtl = []<|fim▁end|>
self.beforeHdlArchGeneration = [
<|file_name|>dummyPlatform.py<|end_file_name|><|fim▁begin|>from hwt.synthesizer.rtlLevel.extract_part_drivers import extract_part_drivers from hwt.synthesizer.rtlLevel.remove_unconnected_signals import removeUnconnectedSignals from hwt.synthesizer.rtlLevel.mark_visibility_of_signals_and_check_drivers import markVisibilityOfSignalsAndCheckDrivers class DummyPlatform(): <|fim_middle|> <|fim▁end|>
""" :note: all processors has to be callable with only one parameter which is actual Unit/RtlNetlist instance """ def __init__(self): self.beforeToRtl = [] self.beforeToRtlImpl = [] self.afterToRtlImpl = [] self.beforeHdlArchGeneration = [ extract_part_drivers, removeUnconnectedSignals, markVisibilityOfSignalsAndCheckDrivers, ] self.afterToRtl = []
<|file_name|>dummyPlatform.py<|end_file_name|><|fim▁begin|>from hwt.synthesizer.rtlLevel.extract_part_drivers import extract_part_drivers from hwt.synthesizer.rtlLevel.remove_unconnected_signals import removeUnconnectedSignals from hwt.synthesizer.rtlLevel.mark_visibility_of_signals_and_check_drivers import markVisibilityOfSignalsAndCheckDrivers class DummyPlatform(): """ :note: all processors has to be callable with only one parameter which is actual Unit/RtlNetlist instance """ def __init__(self): <|fim_middle|> <|fim▁end|>
self.beforeToRtl = [] self.beforeToRtlImpl = [] self.afterToRtlImpl = [] self.beforeHdlArchGeneration = [ extract_part_drivers, removeUnconnectedSignals, markVisibilityOfSignalsAndCheckDrivers, ] self.afterToRtl = []
<|file_name|>dummyPlatform.py<|end_file_name|><|fim▁begin|>from hwt.synthesizer.rtlLevel.extract_part_drivers import extract_part_drivers from hwt.synthesizer.rtlLevel.remove_unconnected_signals import removeUnconnectedSignals from hwt.synthesizer.rtlLevel.mark_visibility_of_signals_and_check_drivers import markVisibilityOfSignalsAndCheckDrivers class DummyPlatform(): """ :note: all processors has to be callable with only one parameter which is actual Unit/RtlNetlist instance """ def <|fim_middle|>(self): self.beforeToRtl = [] self.beforeToRtlImpl = [] self.afterToRtlImpl = [] self.beforeHdlArchGeneration = [ extract_part_drivers, removeUnconnectedSignals, markVisibilityOfSignalsAndCheckDrivers, ] self.afterToRtl = [] <|fim▁end|>
__init__
<|file_name|>interface.py<|end_file_name|><|fim▁begin|>from tkinter import * import tkinter import HoursParser class UserInterface(tkinter.Frame): def __init__(self, master): self.master = master self.events_list = [] # Set window size master.minsize(width=800, height=600) master.maxsize(width=800, height=600) # File Parser self.parser = HoursParser.FileParser() # Filename Label self.file_select_text = tkinter.StringVar() self.file_select_text.set(" ") # Initialize Widgets super().__init__(master) self.pack() # Label for Application self.title_label = LabelFrame(master, text="Technical Services - Scheduler (Alpha)") self.title_label.pack(fill="both", expand="yes") self.inner_label = Label(self.title_label, text="Choose Hours File") self.inner_label.pack() # Button for File Selection self.file_select_button = Button(self.title_label, text="Select File", command=lambda: self.file_button_press()) self.file_select_button.pack() # Label for File Selection Button self.file_select_label = Label(self.title_label, textvariable=self.file_select_text) self.file_select_label.pack() # Button for Parsing File self.file_parse_button = Button(self.title_label, state=DISABLED, text="Read File", command=lambda: self.parse_button_pressed()) self.file_parse_button.pack() # List of Events self.events_list_box = Listbox(self.title_label) self.events_list_box.pack() <|fim▁hole|> self.show_info_button = Button(self.title_label, state="disabled", command=lambda: self.show_event_info()) self.show_info_button.pack() # Shows information about event self.text_area = Text(self.title_label) self.text_area.pack() # Called when Select File button is pressed. def file_button_press(self): self.parser.choose_file() self.file_select_text.set(self.parser.file_name) if self.parser.file_name is not None: self.file_parse_button['state'] = 'normal' def parse_button_pressed(self): self.events_list = self.parser.parse_file() self.populate_list_box(self.events_list_box) # Puts names of events in a list from parsed file. def populate_list_box(self, list_box): i = 0 for event in self.events_list: list_box.insert(i, event.get_event_name()) i += 1 self.show_info_button['state'] = 'normal' def show_event_info(self): # Store Active Time Index event_list_index = int(self.events_list_box.index(ACTIVE)) # Clear text box from previous event details # if self.text_area.get(END) is not "\n": # self.text_area.delete(0, 'end') # Display Formatted Information about Event. self.text_area.insert(END, "Event: " + self.events_list[event_list_index].get_event_name()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Location: " + self.events_list[event_list_index].get_event_location()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Start Time: " + self.events_list[event_list_index].get_event_start_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "End Time: " + self.events_list[event_list_index].get_event_end_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "# of Staff: " + self.events_list[event_list_index].get_event_number_employees()) self.text_area.insert(END, '\n') root = Tk() root.wm_title("Scheduler (Alpha)") main_app = UserInterface(master=root) main_app.mainloop()<|fim▁end|>
# Show Info Button
<|file_name|>interface.py<|end_file_name|><|fim▁begin|>from tkinter import * import tkinter import HoursParser class UserInterface(tkinter.Frame): <|fim_middle|> root = Tk() root.wm_title("Scheduler (Alpha)") main_app = UserInterface(master=root) main_app.mainloop() <|fim▁end|>
def __init__(self, master): self.master = master self.events_list = [] # Set window size master.minsize(width=800, height=600) master.maxsize(width=800, height=600) # File Parser self.parser = HoursParser.FileParser() # Filename Label self.file_select_text = tkinter.StringVar() self.file_select_text.set(" ") # Initialize Widgets super().__init__(master) self.pack() # Label for Application self.title_label = LabelFrame(master, text="Technical Services - Scheduler (Alpha)") self.title_label.pack(fill="both", expand="yes") self.inner_label = Label(self.title_label, text="Choose Hours File") self.inner_label.pack() # Button for File Selection self.file_select_button = Button(self.title_label, text="Select File", command=lambda: self.file_button_press()) self.file_select_button.pack() # Label for File Selection Button self.file_select_label = Label(self.title_label, textvariable=self.file_select_text) self.file_select_label.pack() # Button for Parsing File self.file_parse_button = Button(self.title_label, state=DISABLED, text="Read File", command=lambda: self.parse_button_pressed()) self.file_parse_button.pack() # List of Events self.events_list_box = Listbox(self.title_label) self.events_list_box.pack() # Show Info Button self.show_info_button = Button(self.title_label, state="disabled", command=lambda: self.show_event_info()) self.show_info_button.pack() # Shows information about event self.text_area = Text(self.title_label) self.text_area.pack() # Called when Select File button is pressed. def file_button_press(self): self.parser.choose_file() self.file_select_text.set(self.parser.file_name) if self.parser.file_name is not None: self.file_parse_button['state'] = 'normal' def parse_button_pressed(self): self.events_list = self.parser.parse_file() self.populate_list_box(self.events_list_box) # Puts names of events in a list from parsed file. def populate_list_box(self, list_box): i = 0 for event in self.events_list: list_box.insert(i, event.get_event_name()) i += 1 self.show_info_button['state'] = 'normal' def show_event_info(self): # Store Active Time Index event_list_index = int(self.events_list_box.index(ACTIVE)) # Clear text box from previous event details # if self.text_area.get(END) is not "\n": # self.text_area.delete(0, 'end') # Display Formatted Information about Event. self.text_area.insert(END, "Event: " + self.events_list[event_list_index].get_event_name()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Location: " + self.events_list[event_list_index].get_event_location()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Start Time: " + self.events_list[event_list_index].get_event_start_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "End Time: " + self.events_list[event_list_index].get_event_end_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "# of Staff: " + self.events_list[event_list_index].get_event_number_employees()) self.text_area.insert(END, '\n')
<|file_name|>interface.py<|end_file_name|><|fim▁begin|>from tkinter import * import tkinter import HoursParser class UserInterface(tkinter.Frame): def __init__(self, master): <|fim_middle|> # Called when Select File button is pressed. def file_button_press(self): self.parser.choose_file() self.file_select_text.set(self.parser.file_name) if self.parser.file_name is not None: self.file_parse_button['state'] = 'normal' def parse_button_pressed(self): self.events_list = self.parser.parse_file() self.populate_list_box(self.events_list_box) # Puts names of events in a list from parsed file. def populate_list_box(self, list_box): i = 0 for event in self.events_list: list_box.insert(i, event.get_event_name()) i += 1 self.show_info_button['state'] = 'normal' def show_event_info(self): # Store Active Time Index event_list_index = int(self.events_list_box.index(ACTIVE)) # Clear text box from previous event details # if self.text_area.get(END) is not "\n": # self.text_area.delete(0, 'end') # Display Formatted Information about Event. self.text_area.insert(END, "Event: " + self.events_list[event_list_index].get_event_name()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Location: " + self.events_list[event_list_index].get_event_location()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Start Time: " + self.events_list[event_list_index].get_event_start_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "End Time: " + self.events_list[event_list_index].get_event_end_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "# of Staff: " + self.events_list[event_list_index].get_event_number_employees()) self.text_area.insert(END, '\n') root = Tk() root.wm_title("Scheduler (Alpha)") main_app = UserInterface(master=root) main_app.mainloop() <|fim▁end|>
self.master = master self.events_list = [] # Set window size master.minsize(width=800, height=600) master.maxsize(width=800, height=600) # File Parser self.parser = HoursParser.FileParser() # Filename Label self.file_select_text = tkinter.StringVar() self.file_select_text.set(" ") # Initialize Widgets super().__init__(master) self.pack() # Label for Application self.title_label = LabelFrame(master, text="Technical Services - Scheduler (Alpha)") self.title_label.pack(fill="both", expand="yes") self.inner_label = Label(self.title_label, text="Choose Hours File") self.inner_label.pack() # Button for File Selection self.file_select_button = Button(self.title_label, text="Select File", command=lambda: self.file_button_press()) self.file_select_button.pack() # Label for File Selection Button self.file_select_label = Label(self.title_label, textvariable=self.file_select_text) self.file_select_label.pack() # Button for Parsing File self.file_parse_button = Button(self.title_label, state=DISABLED, text="Read File", command=lambda: self.parse_button_pressed()) self.file_parse_button.pack() # List of Events self.events_list_box = Listbox(self.title_label) self.events_list_box.pack() # Show Info Button self.show_info_button = Button(self.title_label, state="disabled", command=lambda: self.show_event_info()) self.show_info_button.pack() # Shows information about event self.text_area = Text(self.title_label) self.text_area.pack()
<|file_name|>interface.py<|end_file_name|><|fim▁begin|>from tkinter import * import tkinter import HoursParser class UserInterface(tkinter.Frame): def __init__(self, master): self.master = master self.events_list = [] # Set window size master.minsize(width=800, height=600) master.maxsize(width=800, height=600) # File Parser self.parser = HoursParser.FileParser() # Filename Label self.file_select_text = tkinter.StringVar() self.file_select_text.set(" ") # Initialize Widgets super().__init__(master) self.pack() # Label for Application self.title_label = LabelFrame(master, text="Technical Services - Scheduler (Alpha)") self.title_label.pack(fill="both", expand="yes") self.inner_label = Label(self.title_label, text="Choose Hours File") self.inner_label.pack() # Button for File Selection self.file_select_button = Button(self.title_label, text="Select File", command=lambda: self.file_button_press()) self.file_select_button.pack() # Label for File Selection Button self.file_select_label = Label(self.title_label, textvariable=self.file_select_text) self.file_select_label.pack() # Button for Parsing File self.file_parse_button = Button(self.title_label, state=DISABLED, text="Read File", command=lambda: self.parse_button_pressed()) self.file_parse_button.pack() # List of Events self.events_list_box = Listbox(self.title_label) self.events_list_box.pack() # Show Info Button self.show_info_button = Button(self.title_label, state="disabled", command=lambda: self.show_event_info()) self.show_info_button.pack() # Shows information about event self.text_area = Text(self.title_label) self.text_area.pack() # Called when Select File button is pressed. def file_button_press(self): <|fim_middle|> def parse_button_pressed(self): self.events_list = self.parser.parse_file() self.populate_list_box(self.events_list_box) # Puts names of events in a list from parsed file. def populate_list_box(self, list_box): i = 0 for event in self.events_list: list_box.insert(i, event.get_event_name()) i += 1 self.show_info_button['state'] = 'normal' def show_event_info(self): # Store Active Time Index event_list_index = int(self.events_list_box.index(ACTIVE)) # Clear text box from previous event details # if self.text_area.get(END) is not "\n": # self.text_area.delete(0, 'end') # Display Formatted Information about Event. self.text_area.insert(END, "Event: " + self.events_list[event_list_index].get_event_name()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Location: " + self.events_list[event_list_index].get_event_location()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Start Time: " + self.events_list[event_list_index].get_event_start_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "End Time: " + self.events_list[event_list_index].get_event_end_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "# of Staff: " + self.events_list[event_list_index].get_event_number_employees()) self.text_area.insert(END, '\n') root = Tk() root.wm_title("Scheduler (Alpha)") main_app = UserInterface(master=root) main_app.mainloop() <|fim▁end|>
self.parser.choose_file() self.file_select_text.set(self.parser.file_name) if self.parser.file_name is not None: self.file_parse_button['state'] = 'normal'
<|file_name|>interface.py<|end_file_name|><|fim▁begin|>from tkinter import * import tkinter import HoursParser class UserInterface(tkinter.Frame): def __init__(self, master): self.master = master self.events_list = [] # Set window size master.minsize(width=800, height=600) master.maxsize(width=800, height=600) # File Parser self.parser = HoursParser.FileParser() # Filename Label self.file_select_text = tkinter.StringVar() self.file_select_text.set(" ") # Initialize Widgets super().__init__(master) self.pack() # Label for Application self.title_label = LabelFrame(master, text="Technical Services - Scheduler (Alpha)") self.title_label.pack(fill="both", expand="yes") self.inner_label = Label(self.title_label, text="Choose Hours File") self.inner_label.pack() # Button for File Selection self.file_select_button = Button(self.title_label, text="Select File", command=lambda: self.file_button_press()) self.file_select_button.pack() # Label for File Selection Button self.file_select_label = Label(self.title_label, textvariable=self.file_select_text) self.file_select_label.pack() # Button for Parsing File self.file_parse_button = Button(self.title_label, state=DISABLED, text="Read File", command=lambda: self.parse_button_pressed()) self.file_parse_button.pack() # List of Events self.events_list_box = Listbox(self.title_label) self.events_list_box.pack() # Show Info Button self.show_info_button = Button(self.title_label, state="disabled", command=lambda: self.show_event_info()) self.show_info_button.pack() # Shows information about event self.text_area = Text(self.title_label) self.text_area.pack() # Called when Select File button is pressed. def file_button_press(self): self.parser.choose_file() self.file_select_text.set(self.parser.file_name) if self.parser.file_name is not None: self.file_parse_button['state'] = 'normal' def parse_button_pressed(self): <|fim_middle|> # Puts names of events in a list from parsed file. def populate_list_box(self, list_box): i = 0 for event in self.events_list: list_box.insert(i, event.get_event_name()) i += 1 self.show_info_button['state'] = 'normal' def show_event_info(self): # Store Active Time Index event_list_index = int(self.events_list_box.index(ACTIVE)) # Clear text box from previous event details # if self.text_area.get(END) is not "\n": # self.text_area.delete(0, 'end') # Display Formatted Information about Event. self.text_area.insert(END, "Event: " + self.events_list[event_list_index].get_event_name()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Location: " + self.events_list[event_list_index].get_event_location()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Start Time: " + self.events_list[event_list_index].get_event_start_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "End Time: " + self.events_list[event_list_index].get_event_end_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "# of Staff: " + self.events_list[event_list_index].get_event_number_employees()) self.text_area.insert(END, '\n') root = Tk() root.wm_title("Scheduler (Alpha)") main_app = UserInterface(master=root) main_app.mainloop() <|fim▁end|>
self.events_list = self.parser.parse_file() self.populate_list_box(self.events_list_box)
<|file_name|>interface.py<|end_file_name|><|fim▁begin|>from tkinter import * import tkinter import HoursParser class UserInterface(tkinter.Frame): def __init__(self, master): self.master = master self.events_list = [] # Set window size master.minsize(width=800, height=600) master.maxsize(width=800, height=600) # File Parser self.parser = HoursParser.FileParser() # Filename Label self.file_select_text = tkinter.StringVar() self.file_select_text.set(" ") # Initialize Widgets super().__init__(master) self.pack() # Label for Application self.title_label = LabelFrame(master, text="Technical Services - Scheduler (Alpha)") self.title_label.pack(fill="both", expand="yes") self.inner_label = Label(self.title_label, text="Choose Hours File") self.inner_label.pack() # Button for File Selection self.file_select_button = Button(self.title_label, text="Select File", command=lambda: self.file_button_press()) self.file_select_button.pack() # Label for File Selection Button self.file_select_label = Label(self.title_label, textvariable=self.file_select_text) self.file_select_label.pack() # Button for Parsing File self.file_parse_button = Button(self.title_label, state=DISABLED, text="Read File", command=lambda: self.parse_button_pressed()) self.file_parse_button.pack() # List of Events self.events_list_box = Listbox(self.title_label) self.events_list_box.pack() # Show Info Button self.show_info_button = Button(self.title_label, state="disabled", command=lambda: self.show_event_info()) self.show_info_button.pack() # Shows information about event self.text_area = Text(self.title_label) self.text_area.pack() # Called when Select File button is pressed. def file_button_press(self): self.parser.choose_file() self.file_select_text.set(self.parser.file_name) if self.parser.file_name is not None: self.file_parse_button['state'] = 'normal' def parse_button_pressed(self): self.events_list = self.parser.parse_file() self.populate_list_box(self.events_list_box) # Puts names of events in a list from parsed file. def populate_list_box(self, list_box): <|fim_middle|> def show_event_info(self): # Store Active Time Index event_list_index = int(self.events_list_box.index(ACTIVE)) # Clear text box from previous event details # if self.text_area.get(END) is not "\n": # self.text_area.delete(0, 'end') # Display Formatted Information about Event. self.text_area.insert(END, "Event: " + self.events_list[event_list_index].get_event_name()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Location: " + self.events_list[event_list_index].get_event_location()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Start Time: " + self.events_list[event_list_index].get_event_start_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "End Time: " + self.events_list[event_list_index].get_event_end_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "# of Staff: " + self.events_list[event_list_index].get_event_number_employees()) self.text_area.insert(END, '\n') root = Tk() root.wm_title("Scheduler (Alpha)") main_app = UserInterface(master=root) main_app.mainloop() <|fim▁end|>
i = 0 for event in self.events_list: list_box.insert(i, event.get_event_name()) i += 1 self.show_info_button['state'] = 'normal'
<|file_name|>interface.py<|end_file_name|><|fim▁begin|>from tkinter import * import tkinter import HoursParser class UserInterface(tkinter.Frame): def __init__(self, master): self.master = master self.events_list = [] # Set window size master.minsize(width=800, height=600) master.maxsize(width=800, height=600) # File Parser self.parser = HoursParser.FileParser() # Filename Label self.file_select_text = tkinter.StringVar() self.file_select_text.set(" ") # Initialize Widgets super().__init__(master) self.pack() # Label for Application self.title_label = LabelFrame(master, text="Technical Services - Scheduler (Alpha)") self.title_label.pack(fill="both", expand="yes") self.inner_label = Label(self.title_label, text="Choose Hours File") self.inner_label.pack() # Button for File Selection self.file_select_button = Button(self.title_label, text="Select File", command=lambda: self.file_button_press()) self.file_select_button.pack() # Label for File Selection Button self.file_select_label = Label(self.title_label, textvariable=self.file_select_text) self.file_select_label.pack() # Button for Parsing File self.file_parse_button = Button(self.title_label, state=DISABLED, text="Read File", command=lambda: self.parse_button_pressed()) self.file_parse_button.pack() # List of Events self.events_list_box = Listbox(self.title_label) self.events_list_box.pack() # Show Info Button self.show_info_button = Button(self.title_label, state="disabled", command=lambda: self.show_event_info()) self.show_info_button.pack() # Shows information about event self.text_area = Text(self.title_label) self.text_area.pack() # Called when Select File button is pressed. def file_button_press(self): self.parser.choose_file() self.file_select_text.set(self.parser.file_name) if self.parser.file_name is not None: self.file_parse_button['state'] = 'normal' def parse_button_pressed(self): self.events_list = self.parser.parse_file() self.populate_list_box(self.events_list_box) # Puts names of events in a list from parsed file. def populate_list_box(self, list_box): i = 0 for event in self.events_list: list_box.insert(i, event.get_event_name()) i += 1 self.show_info_button['state'] = 'normal' def show_event_info(self): # Store Active Time Index <|fim_middle|> root = Tk() root.wm_title("Scheduler (Alpha)") main_app = UserInterface(master=root) main_app.mainloop() <|fim▁end|>
event_list_index = int(self.events_list_box.index(ACTIVE)) # Clear text box from previous event details # if self.text_area.get(END) is not "\n": # self.text_area.delete(0, 'end') # Display Formatted Information about Event. self.text_area.insert(END, "Event: " + self.events_list[event_list_index].get_event_name()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Location: " + self.events_list[event_list_index].get_event_location()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Start Time: " + self.events_list[event_list_index].get_event_start_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "End Time: " + self.events_list[event_list_index].get_event_end_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "# of Staff: " + self.events_list[event_list_index].get_event_number_employees()) self.text_area.insert(END, '\n')
<|file_name|>interface.py<|end_file_name|><|fim▁begin|>from tkinter import * import tkinter import HoursParser class UserInterface(tkinter.Frame): def __init__(self, master): self.master = master self.events_list = [] # Set window size master.minsize(width=800, height=600) master.maxsize(width=800, height=600) # File Parser self.parser = HoursParser.FileParser() # Filename Label self.file_select_text = tkinter.StringVar() self.file_select_text.set(" ") # Initialize Widgets super().__init__(master) self.pack() # Label for Application self.title_label = LabelFrame(master, text="Technical Services - Scheduler (Alpha)") self.title_label.pack(fill="both", expand="yes") self.inner_label = Label(self.title_label, text="Choose Hours File") self.inner_label.pack() # Button for File Selection self.file_select_button = Button(self.title_label, text="Select File", command=lambda: self.file_button_press()) self.file_select_button.pack() # Label for File Selection Button self.file_select_label = Label(self.title_label, textvariable=self.file_select_text) self.file_select_label.pack() # Button for Parsing File self.file_parse_button = Button(self.title_label, state=DISABLED, text="Read File", command=lambda: self.parse_button_pressed()) self.file_parse_button.pack() # List of Events self.events_list_box = Listbox(self.title_label) self.events_list_box.pack() # Show Info Button self.show_info_button = Button(self.title_label, state="disabled", command=lambda: self.show_event_info()) self.show_info_button.pack() # Shows information about event self.text_area = Text(self.title_label) self.text_area.pack() # Called when Select File button is pressed. def file_button_press(self): self.parser.choose_file() self.file_select_text.set(self.parser.file_name) if self.parser.file_name is not None: <|fim_middle|> def parse_button_pressed(self): self.events_list = self.parser.parse_file() self.populate_list_box(self.events_list_box) # Puts names of events in a list from parsed file. def populate_list_box(self, list_box): i = 0 for event in self.events_list: list_box.insert(i, event.get_event_name()) i += 1 self.show_info_button['state'] = 'normal' def show_event_info(self): # Store Active Time Index event_list_index = int(self.events_list_box.index(ACTIVE)) # Clear text box from previous event details # if self.text_area.get(END) is not "\n": # self.text_area.delete(0, 'end') # Display Formatted Information about Event. self.text_area.insert(END, "Event: " + self.events_list[event_list_index].get_event_name()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Location: " + self.events_list[event_list_index].get_event_location()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Start Time: " + self.events_list[event_list_index].get_event_start_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "End Time: " + self.events_list[event_list_index].get_event_end_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "# of Staff: " + self.events_list[event_list_index].get_event_number_employees()) self.text_area.insert(END, '\n') root = Tk() root.wm_title("Scheduler (Alpha)") main_app = UserInterface(master=root) main_app.mainloop() <|fim▁end|>
self.file_parse_button['state'] = 'normal'
<|file_name|>interface.py<|end_file_name|><|fim▁begin|>from tkinter import * import tkinter import HoursParser class UserInterface(tkinter.Frame): def <|fim_middle|>(self, master): self.master = master self.events_list = [] # Set window size master.minsize(width=800, height=600) master.maxsize(width=800, height=600) # File Parser self.parser = HoursParser.FileParser() # Filename Label self.file_select_text = tkinter.StringVar() self.file_select_text.set(" ") # Initialize Widgets super().__init__(master) self.pack() # Label for Application self.title_label = LabelFrame(master, text="Technical Services - Scheduler (Alpha)") self.title_label.pack(fill="both", expand="yes") self.inner_label = Label(self.title_label, text="Choose Hours File") self.inner_label.pack() # Button for File Selection self.file_select_button = Button(self.title_label, text="Select File", command=lambda: self.file_button_press()) self.file_select_button.pack() # Label for File Selection Button self.file_select_label = Label(self.title_label, textvariable=self.file_select_text) self.file_select_label.pack() # Button for Parsing File self.file_parse_button = Button(self.title_label, state=DISABLED, text="Read File", command=lambda: self.parse_button_pressed()) self.file_parse_button.pack() # List of Events self.events_list_box = Listbox(self.title_label) self.events_list_box.pack() # Show Info Button self.show_info_button = Button(self.title_label, state="disabled", command=lambda: self.show_event_info()) self.show_info_button.pack() # Shows information about event self.text_area = Text(self.title_label) self.text_area.pack() # Called when Select File button is pressed. def file_button_press(self): self.parser.choose_file() self.file_select_text.set(self.parser.file_name) if self.parser.file_name is not None: self.file_parse_button['state'] = 'normal' def parse_button_pressed(self): self.events_list = self.parser.parse_file() self.populate_list_box(self.events_list_box) # Puts names of events in a list from parsed file. def populate_list_box(self, list_box): i = 0 for event in self.events_list: list_box.insert(i, event.get_event_name()) i += 1 self.show_info_button['state'] = 'normal' def show_event_info(self): # Store Active Time Index event_list_index = int(self.events_list_box.index(ACTIVE)) # Clear text box from previous event details # if self.text_area.get(END) is not "\n": # self.text_area.delete(0, 'end') # Display Formatted Information about Event. self.text_area.insert(END, "Event: " + self.events_list[event_list_index].get_event_name()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Location: " + self.events_list[event_list_index].get_event_location()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Start Time: " + self.events_list[event_list_index].get_event_start_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "End Time: " + self.events_list[event_list_index].get_event_end_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "# of Staff: " + self.events_list[event_list_index].get_event_number_employees()) self.text_area.insert(END, '\n') root = Tk() root.wm_title("Scheduler (Alpha)") main_app = UserInterface(master=root) main_app.mainloop() <|fim▁end|>
__init__
<|file_name|>interface.py<|end_file_name|><|fim▁begin|>from tkinter import * import tkinter import HoursParser class UserInterface(tkinter.Frame): def __init__(self, master): self.master = master self.events_list = [] # Set window size master.minsize(width=800, height=600) master.maxsize(width=800, height=600) # File Parser self.parser = HoursParser.FileParser() # Filename Label self.file_select_text = tkinter.StringVar() self.file_select_text.set(" ") # Initialize Widgets super().__init__(master) self.pack() # Label for Application self.title_label = LabelFrame(master, text="Technical Services - Scheduler (Alpha)") self.title_label.pack(fill="both", expand="yes") self.inner_label = Label(self.title_label, text="Choose Hours File") self.inner_label.pack() # Button for File Selection self.file_select_button = Button(self.title_label, text="Select File", command=lambda: self.file_button_press()) self.file_select_button.pack() # Label for File Selection Button self.file_select_label = Label(self.title_label, textvariable=self.file_select_text) self.file_select_label.pack() # Button for Parsing File self.file_parse_button = Button(self.title_label, state=DISABLED, text="Read File", command=lambda: self.parse_button_pressed()) self.file_parse_button.pack() # List of Events self.events_list_box = Listbox(self.title_label) self.events_list_box.pack() # Show Info Button self.show_info_button = Button(self.title_label, state="disabled", command=lambda: self.show_event_info()) self.show_info_button.pack() # Shows information about event self.text_area = Text(self.title_label) self.text_area.pack() # Called when Select File button is pressed. def <|fim_middle|>(self): self.parser.choose_file() self.file_select_text.set(self.parser.file_name) if self.parser.file_name is not None: self.file_parse_button['state'] = 'normal' def parse_button_pressed(self): self.events_list = self.parser.parse_file() self.populate_list_box(self.events_list_box) # Puts names of events in a list from parsed file. def populate_list_box(self, list_box): i = 0 for event in self.events_list: list_box.insert(i, event.get_event_name()) i += 1 self.show_info_button['state'] = 'normal' def show_event_info(self): # Store Active Time Index event_list_index = int(self.events_list_box.index(ACTIVE)) # Clear text box from previous event details # if self.text_area.get(END) is not "\n": # self.text_area.delete(0, 'end') # Display Formatted Information about Event. self.text_area.insert(END, "Event: " + self.events_list[event_list_index].get_event_name()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Location: " + self.events_list[event_list_index].get_event_location()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Start Time: " + self.events_list[event_list_index].get_event_start_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "End Time: " + self.events_list[event_list_index].get_event_end_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "# of Staff: " + self.events_list[event_list_index].get_event_number_employees()) self.text_area.insert(END, '\n') root = Tk() root.wm_title("Scheduler (Alpha)") main_app = UserInterface(master=root) main_app.mainloop() <|fim▁end|>
file_button_press
<|file_name|>interface.py<|end_file_name|><|fim▁begin|>from tkinter import * import tkinter import HoursParser class UserInterface(tkinter.Frame): def __init__(self, master): self.master = master self.events_list = [] # Set window size master.minsize(width=800, height=600) master.maxsize(width=800, height=600) # File Parser self.parser = HoursParser.FileParser() # Filename Label self.file_select_text = tkinter.StringVar() self.file_select_text.set(" ") # Initialize Widgets super().__init__(master) self.pack() # Label for Application self.title_label = LabelFrame(master, text="Technical Services - Scheduler (Alpha)") self.title_label.pack(fill="both", expand="yes") self.inner_label = Label(self.title_label, text="Choose Hours File") self.inner_label.pack() # Button for File Selection self.file_select_button = Button(self.title_label, text="Select File", command=lambda: self.file_button_press()) self.file_select_button.pack() # Label for File Selection Button self.file_select_label = Label(self.title_label, textvariable=self.file_select_text) self.file_select_label.pack() # Button for Parsing File self.file_parse_button = Button(self.title_label, state=DISABLED, text="Read File", command=lambda: self.parse_button_pressed()) self.file_parse_button.pack() # List of Events self.events_list_box = Listbox(self.title_label) self.events_list_box.pack() # Show Info Button self.show_info_button = Button(self.title_label, state="disabled", command=lambda: self.show_event_info()) self.show_info_button.pack() # Shows information about event self.text_area = Text(self.title_label) self.text_area.pack() # Called when Select File button is pressed. def file_button_press(self): self.parser.choose_file() self.file_select_text.set(self.parser.file_name) if self.parser.file_name is not None: self.file_parse_button['state'] = 'normal' def <|fim_middle|>(self): self.events_list = self.parser.parse_file() self.populate_list_box(self.events_list_box) # Puts names of events in a list from parsed file. def populate_list_box(self, list_box): i = 0 for event in self.events_list: list_box.insert(i, event.get_event_name()) i += 1 self.show_info_button['state'] = 'normal' def show_event_info(self): # Store Active Time Index event_list_index = int(self.events_list_box.index(ACTIVE)) # Clear text box from previous event details # if self.text_area.get(END) is not "\n": # self.text_area.delete(0, 'end') # Display Formatted Information about Event. self.text_area.insert(END, "Event: " + self.events_list[event_list_index].get_event_name()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Location: " + self.events_list[event_list_index].get_event_location()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Start Time: " + self.events_list[event_list_index].get_event_start_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "End Time: " + self.events_list[event_list_index].get_event_end_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "# of Staff: " + self.events_list[event_list_index].get_event_number_employees()) self.text_area.insert(END, '\n') root = Tk() root.wm_title("Scheduler (Alpha)") main_app = UserInterface(master=root) main_app.mainloop() <|fim▁end|>
parse_button_pressed
<|file_name|>interface.py<|end_file_name|><|fim▁begin|>from tkinter import * import tkinter import HoursParser class UserInterface(tkinter.Frame): def __init__(self, master): self.master = master self.events_list = [] # Set window size master.minsize(width=800, height=600) master.maxsize(width=800, height=600) # File Parser self.parser = HoursParser.FileParser() # Filename Label self.file_select_text = tkinter.StringVar() self.file_select_text.set(" ") # Initialize Widgets super().__init__(master) self.pack() # Label for Application self.title_label = LabelFrame(master, text="Technical Services - Scheduler (Alpha)") self.title_label.pack(fill="both", expand="yes") self.inner_label = Label(self.title_label, text="Choose Hours File") self.inner_label.pack() # Button for File Selection self.file_select_button = Button(self.title_label, text="Select File", command=lambda: self.file_button_press()) self.file_select_button.pack() # Label for File Selection Button self.file_select_label = Label(self.title_label, textvariable=self.file_select_text) self.file_select_label.pack() # Button for Parsing File self.file_parse_button = Button(self.title_label, state=DISABLED, text="Read File", command=lambda: self.parse_button_pressed()) self.file_parse_button.pack() # List of Events self.events_list_box = Listbox(self.title_label) self.events_list_box.pack() # Show Info Button self.show_info_button = Button(self.title_label, state="disabled", command=lambda: self.show_event_info()) self.show_info_button.pack() # Shows information about event self.text_area = Text(self.title_label) self.text_area.pack() # Called when Select File button is pressed. def file_button_press(self): self.parser.choose_file() self.file_select_text.set(self.parser.file_name) if self.parser.file_name is not None: self.file_parse_button['state'] = 'normal' def parse_button_pressed(self): self.events_list = self.parser.parse_file() self.populate_list_box(self.events_list_box) # Puts names of events in a list from parsed file. def <|fim_middle|>(self, list_box): i = 0 for event in self.events_list: list_box.insert(i, event.get_event_name()) i += 1 self.show_info_button['state'] = 'normal' def show_event_info(self): # Store Active Time Index event_list_index = int(self.events_list_box.index(ACTIVE)) # Clear text box from previous event details # if self.text_area.get(END) is not "\n": # self.text_area.delete(0, 'end') # Display Formatted Information about Event. self.text_area.insert(END, "Event: " + self.events_list[event_list_index].get_event_name()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Location: " + self.events_list[event_list_index].get_event_location()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Start Time: " + self.events_list[event_list_index].get_event_start_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "End Time: " + self.events_list[event_list_index].get_event_end_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "# of Staff: " + self.events_list[event_list_index].get_event_number_employees()) self.text_area.insert(END, '\n') root = Tk() root.wm_title("Scheduler (Alpha)") main_app = UserInterface(master=root) main_app.mainloop() <|fim▁end|>
populate_list_box
<|file_name|>interface.py<|end_file_name|><|fim▁begin|>from tkinter import * import tkinter import HoursParser class UserInterface(tkinter.Frame): def __init__(self, master): self.master = master self.events_list = [] # Set window size master.minsize(width=800, height=600) master.maxsize(width=800, height=600) # File Parser self.parser = HoursParser.FileParser() # Filename Label self.file_select_text = tkinter.StringVar() self.file_select_text.set(" ") # Initialize Widgets super().__init__(master) self.pack() # Label for Application self.title_label = LabelFrame(master, text="Technical Services - Scheduler (Alpha)") self.title_label.pack(fill="both", expand="yes") self.inner_label = Label(self.title_label, text="Choose Hours File") self.inner_label.pack() # Button for File Selection self.file_select_button = Button(self.title_label, text="Select File", command=lambda: self.file_button_press()) self.file_select_button.pack() # Label for File Selection Button self.file_select_label = Label(self.title_label, textvariable=self.file_select_text) self.file_select_label.pack() # Button for Parsing File self.file_parse_button = Button(self.title_label, state=DISABLED, text="Read File", command=lambda: self.parse_button_pressed()) self.file_parse_button.pack() # List of Events self.events_list_box = Listbox(self.title_label) self.events_list_box.pack() # Show Info Button self.show_info_button = Button(self.title_label, state="disabled", command=lambda: self.show_event_info()) self.show_info_button.pack() # Shows information about event self.text_area = Text(self.title_label) self.text_area.pack() # Called when Select File button is pressed. def file_button_press(self): self.parser.choose_file() self.file_select_text.set(self.parser.file_name) if self.parser.file_name is not None: self.file_parse_button['state'] = 'normal' def parse_button_pressed(self): self.events_list = self.parser.parse_file() self.populate_list_box(self.events_list_box) # Puts names of events in a list from parsed file. def populate_list_box(self, list_box): i = 0 for event in self.events_list: list_box.insert(i, event.get_event_name()) i += 1 self.show_info_button['state'] = 'normal' def <|fim_middle|>(self): # Store Active Time Index event_list_index = int(self.events_list_box.index(ACTIVE)) # Clear text box from previous event details # if self.text_area.get(END) is not "\n": # self.text_area.delete(0, 'end') # Display Formatted Information about Event. self.text_area.insert(END, "Event: " + self.events_list[event_list_index].get_event_name()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Location: " + self.events_list[event_list_index].get_event_location()) self.text_area.insert(END, '\n') self.text_area.insert(END, "Start Time: " + self.events_list[event_list_index].get_event_start_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "End Time: " + self.events_list[event_list_index].get_event_end_time()) self.text_area.insert(END, '\n') self.text_area.insert(END, "# of Staff: " + self.events_list[event_list_index].get_event_number_employees()) self.text_area.insert(END, '\n') root = Tk() root.wm_title("Scheduler (Alpha)") main_app = UserInterface(master=root) main_app.mainloop() <|fim▁end|>
show_event_info
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging <|fim▁hole|> LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body<|fim▁end|>
CONF = config.CONF
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): <|fim_middle|> <|fim▁end|>
"""Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): <|fim_middle|> def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): <|fim_middle|> def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
"""List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots']
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): <|fim_middle|> def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
"""List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots']
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): <|fim_middle|> def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
"""Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot']
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): <|fim_middle|> def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
""" Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot']
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): <|fim_middle|> # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
"""Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot']
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): <|fim_middle|> # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): <|fim_middle|> def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
"""Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id)
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): <|fim_middle|> def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
"""Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id))
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): <|fim_middle|> def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
try: self.get_snapshot(id) except exceptions.NotFound: return True return False
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): <|fim_middle|> def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
"""Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): <|fim_middle|> def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
"""Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): <|fim_middle|> def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
"""Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata']
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): <|fim_middle|> def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
"""Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata']
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): <|fim_middle|> def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
"""Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata']
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): <|fim_middle|> def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
"""Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta']
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): <|fim_middle|> def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
"""Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): <|fim_middle|> <|fim▁end|>
"""Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: <|fim_middle|> resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
url += '?%s' % urllib.urlencode(params)
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: <|fim_middle|> resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
url += '?%s' % urllib.urlencode(params)
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): <|fim_middle|> return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id)
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: <|fim_middle|> if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime)
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): <|fim_middle|> if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
return value
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: <|fim_middle|> time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message)
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def <|fim_middle|>(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
__init__
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def <|fim_middle|>(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
list_snapshots
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def <|fim_middle|>(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
list_snapshots_with_detail
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def <|fim_middle|>(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
get_snapshot
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def <|fim_middle|>(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
create_snapshot
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def <|fim_middle|>(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
update_snapshot
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def <|fim_middle|>(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
_get_snapshot_status
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def <|fim_middle|>(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
wait_for_snapshot_status
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def <|fim_middle|>(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
delete_snapshot
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def <|fim_middle|>(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
is_resource_deleted
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def <|fim_middle|>(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
reset_snapshot_status
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def <|fim_middle|>(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
update_snapshot_status
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def <|fim_middle|>(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
create_snapshot_metadata
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def <|fim_middle|>(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
get_snapshot_metadata
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def <|fim_middle|>(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
update_snapshot_metadata
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def <|fim_middle|>(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
update_snapshot_metadata_item
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def <|fim_middle|>(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def force_delete_snapshot(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
delete_snapshot_metadata_item
<|file_name|>snapshots_client.py<|end_file_name|><|fim▁begin|># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import time import urllib from tempest.common import rest_client from tempest import config from tempest import exceptions from tempest.openstack.common import log as logging CONF = config.CONF LOG = logging.getLogger(__name__) class SnapshotsClientJSON(rest_client.RestClient): """Client class to send CRUD Volume API requests.""" def __init__(self, auth_provider): super(SnapshotsClientJSON, self).__init__(auth_provider) self.service = CONF.volume.catalog_type self.build_interval = CONF.volume.build_interval self.build_timeout = CONF.volume.build_timeout def list_snapshots(self, params=None): """List all the snapshot.""" url = 'snapshots' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def list_snapshots_with_detail(self, params=None): """List the details of all snapshots.""" url = 'snapshots/detail' if params: url += '?%s' % urllib.urlencode(params) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshots'] def get_snapshot(self, snapshot_id): """Returns the details of a single snapshot.""" url = "snapshots/%s" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['snapshot'] def create_snapshot(self, volume_id, **kwargs): """ Creates a new snapshot. volume_id(Required): id of the volume. force: Create a snapshot even if the volume attached (Default=False) display_name: Optional snapshot Name. display_description: User friendly snapshot description. """ post_body = {'volume_id': volume_id} post_body.update(kwargs) post_body = json.dumps({'snapshot': post_body}) resp, body = self.post('snapshots', post_body) body = json.loads(body) return resp, body['snapshot'] def update_snapshot(self, snapshot_id, **kwargs): """Updates a snapshot.""" put_body = json.dumps({'snapshot': kwargs}) resp, body = self.put('snapshots/%s' % snapshot_id, put_body) body = json.loads(body) return resp, body['snapshot'] # NOTE(afazekas): just for the wait function def _get_snapshot_status(self, snapshot_id): resp, body = self.get_snapshot(snapshot_id) status = body['status'] # NOTE(afazekas): snapshot can reach an "error" # state in a "normal" lifecycle if (status == 'error'): raise exceptions.SnapshotBuildErrorException( snapshot_id=snapshot_id) return status # NOTE(afazkas): Wait reinvented again. It is not in the correct layer def wait_for_snapshot_status(self, snapshot_id, status): """Waits for a Snapshot to reach a given status.""" start_time = time.time() old_value = value = self._get_snapshot_status(snapshot_id) while True: dtime = time.time() - start_time time.sleep(self.build_interval) if value != old_value: LOG.info('Value transition from "%s" to "%s"' 'in %d second(s).', old_value, value, dtime) if (value == status): return value if dtime > self.build_timeout: message = ('Time Limit Exceeded! (%ds)' 'while waiting for %s, ' 'but we got %s.' % (self.build_timeout, status, value)) raise exceptions.TimeoutException(message) time.sleep(self.build_interval) old_value = value value = self._get_snapshot_status(snapshot_id) def delete_snapshot(self, snapshot_id): """Delete Snapshot.""" return self.delete("snapshots/%s" % str(snapshot_id)) def is_resource_deleted(self, id): try: self.get_snapshot(id) except exceptions.NotFound: return True return False def reset_snapshot_status(self, snapshot_id, status): """Reset the specified snapshot's status.""" post_body = json.dumps({'os-reset_status': {"status": status}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body def update_snapshot_status(self, snapshot_id, status, progress): """Update the specified snapshot's status.""" post_body = { 'status': status, 'progress': progress } post_body = json.dumps({'os-update_snapshot_status': post_body}) url = 'snapshots/%s/action' % str(snapshot_id) resp, body = self.post(url, post_body) return resp, body def create_snapshot_metadata(self, snapshot_id, metadata): """Create metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.post(url, put_body) body = json.loads(body) return resp, body['metadata'] def get_snapshot_metadata(self, snapshot_id): """Get metadata of the snapshot.""" url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.get(url) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata(self, snapshot_id, metadata): """Update metadata for the snapshot.""" put_body = json.dumps({'metadata': metadata}) url = "snapshots/%s/metadata" % str(snapshot_id) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['metadata'] def update_snapshot_metadata_item(self, snapshot_id, id, meta_item): """Update metadata item for the snapshot.""" put_body = json.dumps({'meta': meta_item}) url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.put(url, put_body) body = json.loads(body) return resp, body['meta'] def delete_snapshot_metadata_item(self, snapshot_id, id): """Delete metadata item for the snapshot.""" url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id)) resp, body = self.delete(url) return resp, body def <|fim_middle|>(self, snapshot_id): """Force Delete Snapshot.""" post_body = json.dumps({'os-force_delete': {}}) resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body) return resp, body <|fim▁end|>
force_delete_snapshot
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>from .. import Provider as CompanyProvider class Provider(CompanyProvider): formats = ( "{{last_name}} {{company_suffix}}", "{{last_name}} {{last_name}} {{company_suffix}}", "{{large_company}}", ) large_companies = ( "AZAL", "Azergold", "SOCAR", "Socar Polymer", "Global Export Fruits", "Baku Steel Company", "Azersun", "Sun Food", "Azərbaycan Şəkər İstehsalat Birliyi", "Azərsu", "Xəzər Dəniz Gəmiçiliyi", "Azərenerji", "Bakıelektrikşəbəkə", "Azəralüminium", "Bravo", "Azərpambıq Aqrar Sənaye Kompleksi", "CTS-Agro", "Azərtütün Aqrar Sənaye Kompleksi", "Azəripək", "Azfruittrade", "AF Holding", "Azinko Holding", "Gilan Holding", "Azpetrol", "Azərtexnolayn", "Bakı Gəmiqayırma Zavodu",<|fim▁hole|> ) company_suffixes = ( "ASC", "QSC", "MMC", ) def large_company(self): """ :example: 'SOCAR' """ return self.random_element(self.large_companies)<|fim▁end|>
"Gəncə Tekstil Fabriki", "Mətanət A", "İrşad Electronics",