python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright 2008 Nanorex, Inc. See LICENSE file for details.
"""
SimulatorParameters.py
Read the sim-params.txt file, and extract information needed on the
NE1 side. This currently consists of parameters controlling the
Yukawa non-bonded potential function for PAM3 and PAM5 DNA models.
NE1 generates the tables which define a user-defined potential
function for GROMACS.
@author: Eric M
@version: $Id$
@copyright: 2008 Nanorex, Inc. See LICENSE file for details.
"""
import os
import foundation.env as env
from platform_dependent.PlatformDependent import find_plugin_dir
from utilities.debug import print_compact_traceback
class SimulatorParameters(object):
def __init__(self):
ok, nd1_plugin_path = find_plugin_dir("NanoDynamics-1")
if (not ok):
env.history.redmsg("Error: can't find " + nd1_plugin_path)
nd1_plugin_path = "."
fileName = os.path.join(nd1_plugin_path, "sim-params.txt")
self._parameterValues = {}
try:
print "sim parameters used by NE1 read from: [%s]" % fileName
parametersFile = open(fileName)
for line in parametersFile:
s = line.split()
if (len(s) > 0 and s[0] == "ne1"):
if (len(s) > 1):
key = s[1]
if (len(s) > 2):
value = " ".join(s[2:])
else:
value = True
self._parameterValues[key] = value
except IOError:
msg = "Error reading [%s]" % fileName
print_compact_traceback(msg + ": ")
env.history.redmsg(msg)
self._parameterValues = {}
def _getFloatParameter(self, parameterName, defaultValue = 0.0):
if (self._parameterValues.has_key(parameterName)):
try:
value = float(self._parameterValues[parameterName])
return value
except:
print_compact_traceback()
env.history.redmsg("malformed float parameter %s in sim-params.txt" % parameterName)
return defaultValue
def _getBooleanParameter(self, parameterName, defaultValue = False):
if (self._parameterValues.has_key(parameterName)):
if (self._parameterValues[parameterName]):
return True
return False
return defaultValue
def getYukawaRSwitch(self):
return self._getFloatParameter("YukawaRSwitch", 2.0)
def getYukawaRCutoff(self):
return self._getFloatParameter("YukawaRCutoff", 3.0)
def getYukawaShift(self):
return self._getBooleanParameter("YukawaShift", True)
def getYukawaCounterionCharge(self):
return self._getFloatParameter("YukawaCounterionCharge", 2.0)
def getYukawaCounterionMolarity(self):
return self._getFloatParameter("YukawaCounterionMolarity", 0.02)
def getYukawaTemperatureKelvin(self):
return self._getFloatParameter("YukawaTemperatureKelvin", 298.0)
def getYukawaDielectric(self):
return self._getFloatParameter("YukawaDielectric", 78.5)
def getYukawaConstantMultiple(self):
return self._getFloatParameter("YukawaConstantMultiple", 1.0)
| NanoCAD-master | cad/src/simulation/SimulatorParameters.py |
# Copyright 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
SimSetup.py
Dialog for setting up to run the simulator.
@author: Mark
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
Created by Mark, under the name runSim.py.
Bruce 050324 changed some comments and did some code cleanup
(and also moved a lot of existing code for actually "running the simulator"
into runSim.py, so that file still exists, but has all different code
than before).
Bruce 050325 renamed file and class to SimSetup, to fit naming
convention for other Dialog subclasses.
"""
import os
from PyQt4.Qt import QDialog
from PyQt4.Qt import QButtonGroup
from PyQt4.Qt import QAbstractButton
from PyQt4.Qt import SIGNAL
from PyQt4.Qt import QSize, QWhatsThis
import foundation.env as env
from simulation.SimSetupDialog import Ui_SimSetupDialog
from simulation.movie import Movie
from utilities.debug import print_compact_traceback
from widgets.prefs_widgets import connect_checkbox_with_boolean_pref
from utilities.prefs_constants import Potential_energy_tracefile_prefs_key
from utilities.prefs_constants import electrostaticsForDnaDuringDynamics_prefs_key
from utilities.debug_prefs import debug_pref, Choice_boolean_False
from utilities.qt4transition import qt4todo
from utilities.TimeUtilities import timeStamp
from utilities.icon_utilities import geticon
# class FakeMovie:
#
# wware 060406 bug 1471 (sticky dialog params) - don't need a real movie, just need to hold the sim parameters
# If the sim parameters change, they might need to be updated everywhere a comment says "SIMPARAMS".
#
#bruce 060601 moving this here, since it's really an aspect of this dialog
# (in terms of what params to store, when to store them, etc);
# also fixing bug 1840 (like 1471 but work even after a sim was not aborted),
# and making the stickyness survive opening of a new file rather than being stored in the assy.
class FakeMovie:
def __init__(self, realmovie):
self.totalFramesRequested = realmovie.totalFramesRequested
self.temp = realmovie.temp
self.stepsper = realmovie.stepsper
self.watch_motion = realmovie.watch_motion # note 060705: might use __getattr__ in real movie, but ordinary attr in self
self._update_data = realmovie._update_data
self.update_cond = realmovie.update_cond # probably not needed
self.print_energy = realmovie.print_energy
def fyi_reusing_your_moviefile(self, moviefile):
pass
def might_be_playable(self):
return False
pass
_stickyParams = None # sometimes this is a FakeMovie object
class SimSetup(QDialog, Ui_SimSetupDialog): # before 050325 this class was called runSim
"""
The "Run Dynamics" dialog class for setting up and launching a simulator run.
"""
def __init__(self, win, part, previous_movie = None, suffix = ""):
"""
use previous_movie (if passed) for default values,
otherwise use the same ones last ok'd by user
(whether or not that sim got aborted), or default values if that never happened in this session;
on success or failure, make a new Movie and store it as self.movie
"""
QDialog.__init__(self, win) # win is parent.
self.setupUi(self)
self.setWindowIcon(geticon('ui/border/RunDynamics.png'))
self.whatsthis_btn.setIcon(
geticon('ui/actions/Properties Manager/WhatsThis.png'))
self.whatsthis_btn.setIconSize(QSize(22, 22))
self.whatsthis_btn.setToolTip('Enter "What\'s This?" help mode')
self.connect(self.whatsthis_btn,
SIGNAL("clicked()"),
QWhatsThis.enterWhatsThisMode)
self.watch_motion_buttongroup = QButtonGroup()
self.watch_motion_buttongroup.setExclusive(True)
for obj in self.watch_motion_groupbox.children():
if isinstance(obj, QAbstractButton):
self.watch_motion_buttongroup.addButton(obj)
self.connect(self.run_sim_btn,SIGNAL("clicked()"),self.createMoviePressed)
self.connect(self.cancel_btn,SIGNAL("clicked()"),self.close)
qt4todo('self.connect(self.watch_motion_groupbox,SIGNAL("toggled(bool)"),self.setEnabled) ???')
self.watch_motion_groupbox.setEnabled(True)
## self.part = part
# not yet needed, though in future we might display info
# about this Part in the dialog, to avoid confusion
# if it's not the main Part.
connect_checkbox_with_boolean_pref(self.potential_energy_checkbox,
Potential_energy_tracefile_prefs_key)
connect_checkbox_with_boolean_pref(
self.electrostaticsForDnaDuringDynamics_checkBox,
electrostaticsForDnaDuringDynamics_prefs_key)
self.assy = part.assy # used only for assy.filename
self.suffix = suffix
self.previous_movie = previous_movie or _stickyParams or Movie(self.assy) # used only for its parameter settings
# note: as of bruce 060601 fixing bug 1840, previous_movie is no longer ever passed by caller.
self.movie = Movie(self.assy) # public attr used by client code after we return; always a Movie even on failure.
# (we need it here since no extra method runs on failure, tho that could probably be fixed)
# bruce 050325 changes:
# We make a new Movie here (but only when we return with success).
# But we use default param settings from prior movie.
# Caller should pass info about default filename (including uniqueness
# when on selection or in clipboard item) -- i.e. the suffix.
# We should set the params and filename using a Movie method, or warn it we did so,
# or do them in its init... not yet cleaned up. ###@@@
# self.movie is now a public attribute.
#bruce 050329 comment: couldn't we set .movie to None, until we learn we succeeded? ###e ###@@@
self.setup()
self.watch_motion_groupbox.setWhatsThis(
"""<b>Watch motion in real time</b>
<p>
Enables real time graphical updates during simulation runs.
""")
self.update_number_spinbox.setWhatsThis(
"""<b>Update every <i>n units.</u></b>
<p>
Specify how often to update the model during the simulation.
This allows the user to monitor simulation results while the
simulation is running.
</p>""")
self.update_units_combobox.setWhatsThis(
"""<b>Update every <i>n units.</u></b>
<p>
Specify how often to update the model during the simulation.
This allows the user to monitor simulation results while the
simulation is running.
</p>""")
self.update_every_rbtn.setWhatsThis(
"""<b>Update every <i>n units.</u></b>
<p>
Specify how often to update the model during the simulation.
This allows the user to monitor simulation results while the
simulation is running.</p>""")
self.update_asap_rbtn.setWhatsThis(
"""<b>Update as fast as possible</b>
<p>
Update every 2 seconds, or faster (up to 20x/sec) if it doesn't
slow down the simulation by more than 20%.
</p>""")
self.temperatureSpinBox.setWhatsThis(
"""<b>Temperature</b>
<p>
The temperature of the simulation in Kelvin
(300 K = room temperature)</p>""")
self.totalFramesSpinBox.setWhatsThis(
"""<b>Total frames</b>
<p>
The total number of (movie) frames to create for the simulation run.
</p>""")
self.stepsPerFrameDoubleSpinBox.setWhatsThis(
"""<b>Steps per frame</b>
<p>
The time duration between frames in femtoseconds.
</p>""")
self.setWhatsThis(
"""<b>Run Dynamics</b>
<p>
The is the main dialog for configuring and launching a
Molecular Dynamics simulation run. Specify the simulation parameters
and click <b>Run Simulation</b> to launch.</p>
<p>
<img source=\"ui/actions/Simulation/PlayMovie.png\"><br>
The <b>Play Movie</b> command can be used to play back the
simulation.
</p>""")
if not debug_pref("GROMACS: Enable for Run Dynamics", Choice_boolean_False,
prefs_key=True):
# Hide the Simulation engine groupbox altogether.
self.md_engine_groupbox.setHidden(True)
self.exec_()
def setup(self):
self.movie.cancelled = True # We will assume the user will cancel
#bruce 050324: fixed KnownBug item 27 by making these call setValue, not assign to it:
# If the sim parameters change, they need to be updated in all places marked "SIMPARAMS"
# Movie.__init__ (movie.py), toward the end
# SimSetup.setup (SimSetup.py)
# FakeMovie.__init (runSim.py)
self.totalFramesSpinBox.setValue( self.previous_movie.totalFramesRequested )
self.temperatureSpinBox.setValue( self.previous_movie.temp )
self.stepsPerFrameDoubleSpinBox.setValue( self.previous_movie.stepsper / 10.0 )
# self.timestepSB.setValue( self.previous_movie.timestep ) # Not supported in Alpha
# new checkboxes for Alpha7, circa 060108
#self.create_movie_file_checkbox.setChecked( self.previous_movie.create_movie_file )
# whether to store movie file (see NFR/bug 1286). [bruce & mark 060108]
# create_movie_file_checkbox removed for A7 (bug 1729). mark 060321
##e the following really belongs in the realtime_update_controller,
# and the update_cond is not the best thing to set this from;
# but we can leave it here, then let the realtime_update_controller override it if it knows how. [now it does]
self.watch_motion_groupbox.setChecked( self.previous_movie.watch_motion ) # whether to move atoms in realtime
try:
#bruce 060705 use new common code, if it works
from widgets.widget_controllers import realtime_update_controller
self.ruc = realtime_update_controller(
( self.watch_motion_buttongroup, self.update_number_spinbox, self.update_units_combobox ),
self.watch_motion_groupbox
# no prefs key for checkbox
)
self.ruc.set_widgets_from_update_data( self.previous_movie._update_data ) # includes checkbox
except:
print_compact_traceback( "bug; reverting to older code in simsetep setup: ")
if self.previous_movie._update_data:
update_number, update_units, update_as_fast_as_possible_data, watchjunk = self.previous_movie._update_data
self.watch_motion_groupbox.setChecked(watchjunk) ###060705
self.watch_motion_groupbox.setButton( update_as_fast_as_possible_data)
self.update_number_spinbox.setValue( update_number)
self.update_units_combobox.setCurrentText( update_units)
#k let's hope this changes the current choice, not the popup menu item text for the current choice!
return
def createMoviePressed(self):
"""
Creates a DPB (movie) file of the current part.
[Actually only saves the params and filename which should be used
by the client code (in writemovie?) to create that file.]
The part does not have to be saved as an MMP file first, as it used to.
"""
###@@@ bruce 050324 comment: Not sure if/when user can rename the file.
QDialog.accept(self)
if self.simulation_engine_combobox.currentIndex() == 1:
# GROMACS was selected as the simulation engine.
#
# NOTE: This code is just for demo and prototyping purposes - the
# real approach will be architected and utilize plugins.
#
# Brian Helfrich 2007-04-06
#
from simulation.GROMACS.GROMACS import GROMACS
gmx = GROMACS(self.assy.part)
gmx.run("md")
else:
# NanoDynamics-1 was selected as the simulation engine
#
errorcode, partdir = self.assy.find_or_make_part_files_directory()
self.movie.cancelled = False # This is the only way caller can tell we succeeded.
self.movie.totalFramesRequested = self.totalFramesSpinBox.value()
self.movie.temp = self.temperatureSpinBox.value()
self.movie.stepsper = self.stepsPerFrameDoubleSpinBox.value() * 10.0
self.movie.print_energy = self.potential_energy_checkbox.isChecked()
# self.movie.timestep = self.timestepSB.value() # Not supported in Alpha
#self.movie.create_movie_file = self.create_movie_file_checkbox.isChecked()
# removed for A7 (bug 1729). mark 060321
self.movie.create_movie_file = True
# compute update_data and update_cond, using new or old code
try:
# try new common code for this, bruce 060705
ruc = self.ruc
update_cond = ruc.get_update_cond_from_widgets()
assert update_cond or (update_cond is False) ###@@@ remove when works, and all the others like this
# note, if those widgets are connected to env.prefs, that's not handled here or in ruc;
# I'm not sure if they are. Ideally we'd tell ruc the prefs_keys and have it handle that too,
# perhaps making it a long-lived object (though that might not be necessary).
update_data = ruc.get_update_data_from_widgets() # redundant, but we can remove it when ruc handles prefs
except:
print_compact_traceback("bug using realtime_update_controller in SimSetup, will use older code instead: ")
# this older code can be removed after A8 if we don't see that message
#bruce 060530 use new watch_motion rate parameters
self.movie.watch_motion = self.watch_motion_groupbox.isChecked() # [deprecated for setattr as of 060705]
if env.debug():
print "debug fyi: sim setup watch_motion = %r" % (self.movie.watch_motion,)
# This code works, but I'll try to replace it with calls to common code (above). [bruce 060705]
# first grab them from the UI
update_as_fast_as_possible_data = self.watch_motion_groupbox.selectedId() # 0 means yes, 1 means no (for now)
# ( or -1 means neither, but that's prevented by how the button group is set up, at least when it's enabled)
update_as_fast_as_possible = (update_as_fast_as_possible_data != 1)
update_number = self.update_number_spinbox.value() # 1, 2, etc (or perhaps 0??)
update_units = str(self.update_units_combobox.currentText()) # 'frames', 'seconds', 'minutes', 'hours'
# for sake of propogating them to the next sim run:
update_data = update_number, update_units, update_as_fast_as_possible_data, self.movie.watch_motion
## if env.debug():
## print "stored _update_data %r into movie %r" % (self.movie._update_data, self.movie)
## print "debug: self.watch_motion_groupbox.selectedId() = %r" % (update_as_fast_as_possible_data,)
## print "debug: self.update_number_spinbox.value() is %r" % self.update_number_spinbox.value() # e.g. 1
## print "debug: combox text is %r" % str(self.update_units_combobox.currentText()) # e.g. 'frames'
# Now figure out what these user settings mean our realtime updating algorithm should be,
# as a function to be used for deciding whether to update the 3D view when each new frame is received,
# which takes as arguments the time since the last update finished (simtime), the time that update took (pytime),
# and the number of frames since then (nframes, 1 or more), and returns a boolean for whether to draw this new frame.
# Notes:
# - The Qt progress update will be done independently of this, at most once per second (in runSim.py).
# - The last frame we expect to receive will always be drawn. (This func may be called anyway in case it wants
# to do something else with the info like store it somewhere, or it may not (check runSim.py for details #k),
# but its return value will be ignored if it's called for the last frame.)
# The details of these functions (and the UI feeding them) might be revised.
# This code for setting update_cond is duplicated (inexactly) in Minimize_CommandRun.doMinimize()
if update_as_fast_as_possible:
# This radiobutton might be misnamed; it really means "use the old code,
# i.e. not worse than 20% slowdown, with threshholds".
# It's also ambiguous -- does "fast" mean "fast progress"
# or "often" (which are opposites)? It sort of means "often".
update_cond = ( lambda simtime, pytime, nframes:
simtime >= max(0.05, min(pytime * 4, 2.0)) )
elif update_units == 'frames':
update_cond = ( lambda simtime, pytime, nframes, _nframes = update_number: nframes >= _nframes )
elif update_units == 'seconds':
update_cond = ( lambda simtime, pytime, nframes, _timelimit = update_number: simtime + pytime >= _timelimit )
elif update_units == 'minutes':
update_cond = ( lambda simtime, pytime, nframes, _timelimit = update_number * 60: simtime + pytime >= _timelimit )
elif update_units == 'hours':
update_cond = ( lambda simtime, pytime, nframes, _timelimit = update_number * 3600: simtime + pytime >= _timelimit )
else:
print "don't know how to set update_cond from (%r, %r)" % (update_number, update_units)
update_cond = None
# revision in this old code, 060705:
if not self.movie.watch_motion:
update_cond = False
del self.movie.watch_motion # let getattr do it
# now do this, however we got update_data and update_cond:
self.movie._update_data = update_data # for propogating them to the next sim run
self.movie.update_cond = update_cond # used this time
# end of 060705 changes
suffix = self.suffix
tStamp = timeStamp()
if self.assy.filename and not errorcode: # filename could be an MMP or PDB file.
import shutil
dir, fil = os.path.split(self.assy.filename)
fil, ext = os.path.splitext(fil)
self.movie.filename = os.path.join(partdir, fil + '.' + tStamp + suffix + '.dpb')
self.movie.origfile = os.path.join(partdir, fil + '.' + tStamp + '.orig' + ext)
shutil.copy(self.assy.filename, self.movie.origfile)
else:
self.movie.filename = os.path.join(self.assy.w.tmpFilePath, "Untitled.%s%s.dpb" % (tStamp, suffix))
# Untitled parts usually do not have a filename
#bruce 060601 fix bug 1840, also make params sticky across opening of new files
global _stickyParams
_stickyParams = FakeMovie(self.movie) # these will be used as default params next time, whether or not this gets aborted
return
pass # end of class SimSetup
# end
| NanoCAD-master | cad/src/simulation/SimSetup.py |
# Copyright 2005-2007 Nanorex, Inc. See LICENSE file for details.
"""
SimServer.py - hold attributes needed to connect to and run a SimJob.
(Appears to be specific to GAMESS in some ways.)
[bruce 071217 guess at description]
@author: Mark
@version: $Id$
@copyright: 2005-2007 Nanorex, Inc. See LICENSE file for details.
History:
By Mark. A lot of changes made by Huaicai.
"""
import sys
import foundation.env as env
class SimServer:
"""
a SimServer has all the attributes needed to connect to and run a SimJob.
"""
server_parms = {
'hostname' : 'localhost',
'ipaddress' : '127.0.0.1',
'method' : 'Local access',
'engine' : 'PC GAMESS',
'program' : 'C:\\PCGAMESS\\gamess.exe',
'tmpdir' : 'C:\\PCGAMESS\\',
'platform' : 'Windows',
'username' : 'nanorex',
'password' : '',
}
def __init__(self):
"""
Create a server with default parameters.
@note: If you want to change properties of the server,
call set_parms() instead.
"""
self.server_id = env.prefs.get('server_id')
if not self.server_id:
self.server_id = 66
else:
self.server_id += 1
env.prefs['server_id'] = self.server_id
self.parms = SimServer.server_parms
if sys.platform == 'linux2':
self.parms['platform'] = 'Linux'
self.parms['program'] = '/home/huaicai/gamess/rungms'
self.parms['engine'] = 'GAMESS'
elif sys.platform == 'darwin':
self.parms['program'] = 'rungms'
self.parms['platform'] = 'Mac Os'
self.parms['engine'] = 'GAMESS'
self.parms.keys().sort() # Sort parms.
### WARNING: Bugs will be caused if any of SimJob's own methods or
# instance variables had the same name as any of the parameter ('k') values.
for k in self.parms:
self.__dict__[k] = self.parms[k]
self.edit_cntl = None
def __getstate__(self):
"""
Called by pickle
"""
return self.server_id, self.parms, self.edit_cntl
def __setstate__(self, state):
"""
Called by unpickle
"""
self.server_id, self.parms, self.edit_cntl = state
self.set_parms(self.parms)
def set_parms(self, parms):
self.parms = parms
for k in parms:
self.__dict__[k] = parms[k]
def write_parms(self, f): # deprecated method
"""
[deprecated method]
Write server parms to file f
"""
rem = self.get_comment_character()
f.write (rem + '\n' + rem + 'Server Parameters\n' + rem + '\n')
for k in self.parms:
phrase = rem + k + ': ' + str(self.__dict__[k])
f.write (phrase + '\n')
f.write (rem+'\n')
pass
# end
| NanoCAD-master | cad/src/simulation/SimServer.py |
NanoCAD-master | cad/src/simulation/__init__.py |
|
# Copyright 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
PyrexSimulator.py - Wrapper around the pyrex/C ND-1 simulator code.
Responsible for maintaining references to strings which are passed
to the C code, and which end up referenced by variables defined in
src/sim/globals.c.
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
import os
import foundation.env as env
from platform_dependent.PlatformDependent import find_plugin_dir
_thePyrexSimulator = None
class _PyrexSimulator(object):
def __init__(self):
global _thePyrexSimulator
assert (_thePyrexSimulator is None)
_thePyrexSimulator = self
import sim # this import must not be done at toplevel
self.sim = sim.theSimulator()
ok, nd1_plugin_path = find_plugin_dir("NanoDynamics-1")
if (not ok):
env.history.redmsg("Error: can't find " + nd1_plugin_path)
nd1_plugin_path = "."
self.system_parameters_file = os.path.join(nd1_plugin_path, "sim-params.txt")
self.amber_bonded_parameters_file = os.path.join(nd1_plugin_path, "ffamber03bon.itp")
self.amber_nonbonded_parameters_file = os.path.join(nd1_plugin_path, "ffamber03nb.itp")
self.amber_charges_file = os.path.join(nd1_plugin_path, "ffamber03charge.itp")
def reInitialize(self):
self.sim.reinitGlobals()
self.sim.SystemParametersFileName = self.system_parameters_file
self.sim.AmberBondedParametersFileName = self.amber_bonded_parameters_file
self.sim.AmberNonbondedParametersFileName = self.amber_nonbonded_parameters_file
self.sim.AmberChargesFileName = self.amber_charges_file
def setup(self, mflag, filename):
self.inputFileName = filename
self.outputFileName = None
self.reInitialize()
if (mflag):
self.sim.ToMinimize = 1
self.sim.DumpAsText = 1
else:
self.sim.ToMinimize = 0
self.sim.DumpAsText = 0
self.sim.PrintFrameNums = 0
self.sim.InputFileName = self.inputFileName
def setOutputFileName(self, filename):
self.outputFileName = filename
def run(self, frame_callback = None, trace_callback = None):
if (self.outputFileName is None):
if (self.sim.DumpAsText):
outputExtension = "xyz"
else:
outputExtension = "dpb"
if (self.inputFileName.endswith(".mmp")):
self.outputFileName = self.inputFileName[:-3] + outputExtension
else:
self.outputFileName = self.inputFileName + "." + outputExtension
self.sim.OutputFileName = self.outputFileName
if (self.sim.DumpAsText):
self.sim.OutputFormat = 0
else:
self.sim.OutputFormat = 1
self.sim.go(frame_callback, trace_callback)
def getEquilibriumDistanceForBond(self, element1, element2, order):
self.reInitialize()
return self.sim.getEquilibriumDistanceForBond(element1, element2, order)
def thePyrexSimulator():
global _thePyrexSimulator
if (_thePyrexSimulator is None):
_thePyrexSimulator = _PyrexSimulator()
return _thePyrexSimulator
| NanoCAD-master | cad/src/simulation/PyrexSimulator.py |
# Copyright 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
runSim.py -- setting up and running the simulator, for Simulate or Minimize
(i.e. the same code that would change if the simulator interface changed),
and part of the implementation of user-visible commands for those operations.
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
History:
Mark created a file of this name, but that was renamed to SimSetup.py
by bruce on 050325.
Bruce 050324 pulled in lots of existing code for running the simulator
(and some code for reading its results) into this file, since that fits
its name. That existing code was mostly by Mark and Huaicai, and was
partly cleaned up by Bruce, who also put some of it into subclasses
of the experimental CommandRun class. (CommandRun and its subclasses
were subsequently moved into another file, sim_commandruns.py.)
Bruce 050331 is splitting writemovie into several methods in more than
one subclass (eventually) of a new SimRunner class.
Bruce 051115 some comments and code cleanup; add #SIMOPT wherever a
simulator executable command-line flag is hardcoded.
Bruce 051231 partly-done code for using pyrex interface to sim; see use_dylib
[and much more, by many developers, not recorded]
Bruce 080321 split out sim_commandruns.py and sim_aspect.py into their
own files.
"""
from utilities.debug import print_compact_traceback
import widgets.DebugMenuMixin as DebugMenuMixin
# DebugMenuMixin needs refactoring
# to move this variable (sim_params_set) (and related code?) out of it;
# see its module docstring for more info [bruce 080104 comment]
from utilities import debug_flags
from platform_dependent.PlatformDependent import fix_plurals
from platform_dependent.PlatformDependent import find_or_make_Nanorex_subdir
from platform_dependent.PlatformDependent import hhmmss_str
from platform_dependent.PlatformDependent import find_plugin_dir
import os, sys, time
from math import sqrt
from time import sleep
from datetime import datetime
from PyQt4.Qt import QApplication, QCursor, Qt, QStringList
from PyQt4.Qt import QProcess, QObject, QFileInfo, SIGNAL
from utilities.Log import redmsg, greenmsg, orangemsg, quote_html, _graymsg
import foundation.env as env
from foundation.env import seen_before
from geometry.VQT import A, vlen
import re
from model.chunk import Chunk
from model.elements import Singlet
from utilities.debug_prefs import debug_pref, Choice, Choice_boolean_True, Choice_boolean_False
from utilities.constants import filesplit
from processes.Process import Process
from processes.Plugins import checkPluginPreferences, verifyExecutable
from widgets.StatusBar import AbortHandler, FileSizeProgressReporter
from simulation.PyrexSimulator import thePyrexSimulator
from simulation.SimulatorParameters import SimulatorParameters
from simulation.YukawaPotential import YukawaPotential
from simulation.GromacsLog import GromacsLog
from utilities.prefs_constants import electrostaticsForDnaDuringAdjust_prefs_key
from utilities.prefs_constants import electrostaticsForDnaDuringMinimize_prefs_key
from utilities.prefs_constants import electrostaticsForDnaDuringDynamics_prefs_key
from utilities.prefs_constants import neighborSearchingInGromacs_prefs_key
from utilities.prefs_constants import gromacs_enabled_prefs_key
from utilities.prefs_constants import gromacs_path_prefs_key
from utilities.prefs_constants import cpp_enabled_prefs_key
from utilities.prefs_constants import cpp_path_prefs_key
from utilities.prefs_constants import nv1_enabled_prefs_key
from utilities.prefs_constants import nv1_path_prefs_key
from utilities.GlobalPreferences import pref_create_pattern_indicators
# some non-toplevel imports too (of which a few must remain non-toplevel)
# ==
debug_sim_exceptions = 0 # DO NOT COMMIT WITH 1 -- set this to reproduce a bug mostly fixed by Will today #bruce 060111
debug_all_frames = 0 # DO NOT COMMIT with 1
debug_all_frames_atom_index = 1 # index of atom to print in detail, when debug_all_frames
DEBUG_SIM = False # DO NOT COMMIT with True
debug_pyrex_prints = 0 # prints to stdout the same info that gets shown transiently in statusbar
debug_timing_loop_on_sbar = 0
_USE_PYREX_SIM = True
# Use pyrex sim by default. Use debug menu to use the standalone sim. mark 060314.
if debug_sim_exceptions:
debug_all_frames = 1
FAILURE_ALREADY_DOCUMENTED = -10101
# ==
def _timestep_flag_and_arg( mflag = False): #bruce 060503
timestep_fs_str = debug_pref("dynamics timestep (fs)",
Choice(["0.1", "0.2", "0.5", "1.0"]),
non_debug = True )
timestep_fs = float(timestep_fs_str)
# kluge: we use a string in the menu, since float 0.1 shows up in
# menu text as 0.100000000000000001 or so
timestep = timestep_fs * 1e-15
use_timestep_arg = (timestep_fs != 0.1) and not mflag
# only supply the arg if not minimizing,
# and if a non-default value is chosen
# (in case the code to supply it has a bug,
# or supplies it to the sim in the wrong format)
return use_timestep_arg, timestep
##_timestep_flag_and_arg()
## # Exercise the debug_pref so it shows up in the debug menu
## # before the first sim/min run...
## # Oops, this doesn't work from here, since this module is not imported
## # until it's needed! Never mind for now, since it won't be an issue
## # later when timestep is again supported as a movie attribute.
def _verifyGromppAndMdrunExecutables(gromacs_plugin_path):
gromacs_bin_dir, junk_exe = os.path.split(gromacs_plugin_path)
if (sys.platform == 'win32'):
dot_exe = ".exe"
else:
dot_exe = ""
grompp = os.path.join(gromacs_bin_dir, "grompp%s" % dot_exe)
message = verifyExecutable(grompp)
if (message):
return message
mdrun = os.path.join(gromacs_bin_dir, "mdrun%s" % dot_exe)
message = verifyExecutable(mdrun)
if (message):
return message
return None
# ==
class GromacsProcess(Process):
verboseGromacsOutput = False
def standardOutputLine(self, line):
Process.standardOutputLine(self, line)
if (self.verboseGromacsOutput):
if (self.runningGrompp and False):
print "grompp stdout: " + line.rstrip()
if (self.runningMdrun and False):
print "mdrun stdout: " + line.rstrip()
def standardErrorLine(self, line):
Process.standardErrorLine(self, line)
if (self.verboseGromacsOutput):
if (self.runningGrompp and False):
print "grompp stderr: " + line.rstrip()
if (self.runningMdrun):
print "mdrun stderr: " + line.rstrip()
if (line.startswith("ERROR:")):
msg = redmsg("Gromacs " + line.rstrip().rstrip("-"))
env.history.message(msg)
def prepareForGrompp(self):
self.runningGrompp = True
self.runningMdrun = False
def prepareForMdrun(self):
self.runningGrompp = False
self.runningMdrun = True
class SimRunner:
"""
Class for running the simulator.
[subclasses can run it in special ways, maybe]
"""
#bruce 050330 making this from writemovie and maybe some of Movie/SimSetup;
# experimental, esp. since i don't yet know how much to factor the
# input-file writing, process spawning, file-growth watching, file reading,
# file using. Surely in future we'll split out file using into separate
# code, and maybe file-growth watching if we run processes remotely
# (or we might instead be watching results come in over a tcp stream,
# frames mixed with trace records).
# So for now, let's make the minimal class for running the sim,
# up to having finished files to look at but not looking at them;
# then the old writemovie might call this class to do most of its work
# but also call other classes to use the results.
# wware 060406 bug 1263 - provide a mechanism to be notified when the
# program is exiting. This is set to True in ops_files.py. This is a class
# (not instance) variable, which matters because ops_files.py can set this
# without a reference to the currently active SimRunner instance.
PREPARE_TO_CLOSE = False
used_atoms = None
def __init__(self, part, mflag,
simaspect = None,
use_dylib_sim = _USE_PYREX_SIM,
cmdname = "Simulator",
cmd_type = 'Minimize',
useGromacs = False,
background = False,
hasPAM = False,
useAMBER = False,
typeFeedback = False):
# [bruce 051230 added use_dylib_sim; revised 060102; 060106 added cmdname]
"""
set up external relations from the part we'll operate on;
take mflag arg, since someday it'll specify the subclass to use.
"""
self.assy = assy = part.assy # needed?
#self.tmpFilePath = assy.w.tmpFilePath
self.win = assy.w # might be used only for self.win.progressbar.launch
self.part = part # needed?
self.mflag = mflag # see docstring
self.simaspect = simaspect # None for entire part, or an object describing
# what aspect of it to simulate [bruce 050404]
self.errcode = 0 # public attr used after we're done;
# 0 or None = success (so far), >0 = error (msg emitted)
self.said_we_are_done = False #bruce 050415
self.pyrexSimInterrupted = False
#wware 060323, bug 1725, if interrupted we don't need so many warnings
self.useGromacs = useGromacs
self.background = background
self.hasPAM = hasPAM
self.useAMBER = useAMBER
self.typeFeedback = typeFeedback
self.gromacsLog = None
self.tracefileProcessor = None
prefer_standalone_sim = \
debug_pref("force use of standalone sim",
Choice_boolean_False,
prefs_key = 'use-standalone-sim',
non_debug = True )
if prefer_standalone_sim:
use_dylib_sim = False
self.use_dylib_sim = use_dylib_sim #bruce 051230
self.cmdname = cmdname
self.cmd_type = cmd_type #060705
if not use_dylib_sim:
msg = "Using the standalone simulator (not the pyrex simulator)"
env.history.message(greenmsg(msg))
return
def verifyNanoVision1Plugin(self):
"""
Verify NanoVision-1 plugin.
@return: True if NanoVision-1 is properly enabled.
@rtype: boolean
"""
plugin_name = "NanoVision-1"
plugin_prefs_keys = (nv1_enabled_prefs_key, nv1_path_prefs_key)
errorcode, errortext_or_path = \
checkPluginPreferences(plugin_name, plugin_prefs_keys,
insure_executable = True)
if errorcode:
msg = redmsg("Verify Plugin: %s (code %d)" % (errortext_or_path, errorcode))
env.history.message(msg)
return False
self.nv1_executable_path = errortext_or_path
return True
def verifyGromacsPlugin(self):
"""
Verify GROMACS plugin.
@return: True if GROMACS is properly enabled.
@rtype: boolean
"""
plugin_name = "GROMACS"
plugin_prefs_keys = (gromacs_enabled_prefs_key, gromacs_path_prefs_key)
errorcode, errortext_or_path = \
checkPluginPreferences(plugin_name, plugin_prefs_keys,
extra_check = _verifyGromppAndMdrunExecutables)
if errorcode:
msg = redmsg("Verify Plugin: %s (code %d)" % (errortext_or_path, errorcode))
env.history.message(msg)
return False
program_path = errortext_or_path
self.gromacs_bin_dir, junk_exe = os.path.split(program_path)
plugin_name = "CPP"
plugin_prefs_keys = (cpp_enabled_prefs_key, cpp_path_prefs_key)
errorcode, errortext_or_path = \
checkPluginPreferences(plugin_name, plugin_prefs_keys,
insure_executable = True)
if errorcode:
msg = redmsg("Verify Plugin: %s (code %d)" % (errortext_or_path, errorcode))
env.history.message(msg)
return False
self.cpp_executable_path = errortext_or_path
return True
def mdrunPollFunction(self):
if (not self.mdrunLogFile):
try:
logFile = open(self.mdrunLogFileName, 'rU')
except IOError:
# file hasn't been created by mdrun yet, just try
# again next time around.
return
self.mdrunLogFile = logFile
self.mdrunLogLineBuffer = ""
if (self.mdrunLogFile):
while (True):
line = self.mdrunLogFile.readline()
if (not line):
return
self.mdrunLogLineBuffer = self.mdrunLogLineBuffer + line
if (self.mdrunLogLineBuffer.endswith('\n')):
self.gromacsLog.addLine(self.mdrunLogLineBuffer)
self.mdrunLogLineBuffer = ""
def run_using_old_movie_obj_to_hold_sim_params(self, movie):
self._movie = movie # general kluge for old-code compat
# (lots of our methods still use this and modify it)
# note, this movie object (really should be a simsetup object?)
# does not yet know a proper alist (or any alist, I hope) [bruce 050404]
self.errcode = self.set_options_errQ( )
# set movie alist, output filenames, sim executable pathname (verify it exists)
#obs comment [about the options arg i removed?? or smth else?]
# options include everything that affects the run except the set of atoms and the part
if self.errcode: # used to be a local var 'r'
# bruce 051115 comment: more than one reason this can happen;
# one is sim executable missing
return
self.sim_input_file = self.sim_input_filename()
# might get name from options or make up a temporary filename
launchNV1 = debug_pref("GROMACS: Launch NV1", Choice_boolean_False)
if (self.mflag == 1 and self.useGromacs):
if (not self.verifyGromacsPlugin()):
self.errcode = FAILURE_ALREADY_DOCUMENTED
return
if (self.background and launchNV1):
if (not self.verifyNanoVision1Plugin()):
self.errcode = FAILURE_ALREADY_DOCUMENTED
return
self.set_waitcursor(True)
progressBar = self.win.statusBar().progressBar
# Disable some QActions (menu items/toolbar buttons) while the sim is running.
self.win.disable_QActions_for_sim(True)
try: #bruce 050325 added this try/except wrapper, to always restore cursor
self.write_sim_input_file()
# for Minimize, this uses simaspect to write file;
# puts it into movie.alist too, via writemovie
self.simProcess = None #bruce 051231
sp = SimulatorParameters()
self.yukawaRCutoff = sp.getYukawaRCutoff()
self.spawn_process()
# spawn_process is misnamed since it can go thru either
# interface (pyrex or exec OS process), since it also monitors
# progress and waits until it's done, and insert results back
# into part, either in real time or when done.
# result error code (or abort button flag) stored in self.errcode
if (self.mflag == 1 and self.useGromacs):
ok, gromacs_plugin_path = find_plugin_dir("GROMACS")
if (not ok):
msg = redmsg(gromacs_plugin_path)
env.history.message(self.cmdname + ": " + msg)
self.errcode = -11112
return
progressBar.setRange(0, 0)
progressBar.reset()
progressBar.show()
if (sys.platform == 'win32'):
dot_exe = ".exe"
else:
dot_exe = ""
sim_bin_dir = self.sim_bin_dir_path()
grompp = \
os.path.join(self.gromacs_bin_dir, "grompp%s" % dot_exe)
mdrun = os.path.join(self.gromacs_bin_dir, "mdrun%s" % dot_exe)
gromacsFullBaseFileName = self._movie.filename
gromacsFullBaseFileInfo = QFileInfo(gromacsFullBaseFileName)
gromacsWorkingDir = gromacsFullBaseFileInfo.dir().absolutePath()
gromacsBaseFileName = gromacsFullBaseFileInfo.fileName()
env.history.message("%s: GROMACS files at %s%s%s.*" %
(self.cmdname, gromacsWorkingDir, os.sep,
gromacsFullBaseFileInfo.completeBaseName()))
gromacsProcess = GromacsProcess()
gromacsProcess.setProcessName("grompp")
gromacsProcess.prepareForGrompp()
gromacsProcess.redirect_stdout_to_file("%s-grompp-stdout.txt" %
gromacsFullBaseFileName)
gromacsProcess.redirect_stderr_to_file("%s-grompp-stderr.txt" %
gromacsFullBaseFileName)
gromppArgs = [
"-f", "%s.mdp" % gromacsBaseFileName,
"-c", "%s.gro" % gromacsBaseFileName,
"-p", "%s.top" % gromacsBaseFileName,
"-n", "%s.ndx" % gromacsBaseFileName,
"-o", "%s.tpr" % gromacsBaseFileName,
"-po", "%s-out.mdp" % gromacsBaseFileName,
]
gromacsProcess.setWorkingDirectory(gromacsWorkingDir)
gromacs_topo_dir = \
self.gromacs_bin_dir[0:len(self.gromacs_bin_dir) - 4]
gromacs_topo_dir = \
os.path.join(gromacs_topo_dir, "share", "gromacs", "top")
environmentVariables = gromacsProcess.environment()
environmentVariables += "GMXLIB=%s" % gromacs_topo_dir
gromacsProcess.setEnvironment(environmentVariables)
abortHandler = AbortHandler(self.win.statusBar(), "grompp")
errorCode = gromacsProcess.run(grompp, gromppArgs, False, abortHandler)
abortHandler = None
if (errorCode != 0):
msg = redmsg("Gromacs minimization failed, grompp returned %d" % errorCode)
env.history.message(self.cmdname + ": " + msg)
self.errcode = 2;
else:
progressBar.setRange(0, 0)
progressBar.reset()
gromacsProcess.setProcessName("mdrun")
gromacsProcess.prepareForMdrun()
trajectoryOutputFile = None
if (self.background and launchNV1):
trajectoryOutputFile = "%s/%s.%s" % \
(gromacsFullBaseFileInfo.absolutePath(),
gromacsFullBaseFileInfo.completeBaseName(), "nh5")
else:
if (not self.background):
progressBar.show()
gromacsProcess.redirect_stdout_to_file("%s-mdrun-stdout.txt" %
gromacsFullBaseFileName)
gromacsProcess.redirect_stderr_to_file("%s-mdrun-stderr.txt" %
gromacsFullBaseFileName)
trajectoryOutputFile = \
"%s.%s" % (gromacsFullBaseFileName, "trr")
mdrunArgs = None
if (self.background):
fullBaseFilename = gromacsFullBaseFileName
if (sys.platform == 'win32'):
fullBaseFilename = "\"" + fullBaseFilename + "\""
mdrunArgs = [
os.path.join(gromacs_plugin_path, "mdrunner.bat"),
gromacs_topo_dir,
mdrun,
fullBaseFilename
]
else:
self.mdrunLogFile = None
self.mdrunLogFileName = "%s-mdrun.log" % gromacsFullBaseFileName
try:
os.remove(self.mdrunLogFileName)
except:
# Ignore the error that it isn't there. We just want it gone.
pass
mdrunArgs = [
"-s", "%s.tpr" % gromacsFullBaseFileName,
"-o", "%s" % trajectoryOutputFile,
"-e", "%s.edr" % gromacsFullBaseFileName,
"-c", "%s-out.gro" % gromacsFullBaseFileName,
"-g", self.mdrunLogFileName,
]
if (self.hasPAM):
tableFile = "%s.xvg" % gromacsFullBaseFileName
yp = YukawaPotential(sp)
yp.writeToFile(tableFile)
mdrunArgs += [ "-table", tableFile,
"-tablep", tableFile ]
if (self.background):
abortHandler = None
scriptSuffix = None
if (sys.platform == 'win32'):
scriptSuffix = "bat"
else:
scriptSuffix = "sh"
os.spawnv(os.P_NOWAIT,
os.path.join(gromacs_plugin_path,
"mdrunner.%s" % scriptSuffix),
mdrunArgs);
else:
self.gromacsLog = GromacsLog()
abortHandler = \
AbortHandler(self.win.statusBar(), "mdrun")
errorCode = \
gromacsProcess.run(mdrun, mdrunArgs,
self.background,
abortHandler,
self.mdrunPollFunction)
abortHandler = None
if (errorCode != 0):
msg = redmsg("GROMACS minimization failed, mdrun returned %d" % errorCode)
env.history.message(self.cmdname + ": " + msg)
self.errcode = 3;
if (self.background and errorCode == 0):
if (launchNV1):
hdf5DataStoreDir = \
gromacsWorkingDir + os.sep + \
gromacsFullBaseFileInfo.completeBaseName()
os.mkdir(hdf5DataStoreDir)
sleep(1) # Give GMX/HDF5 a chance to write basic info
# Determine the GMX process id (pid) for passing to nv1.
#
# (Py)QProcess.pid() doesn't return anything useable
# for new, non-child processes, read the pid from the
# mdrun log file.
mdrunLogFileName = \
"%s-mdrun.log" % gromacsFullBaseFileName
pid = None
fileOpenAttemptIndex = 0
while (fileOpenAttemptIndex < 3):
try:
logFile = open(mdrunLogFileName, 'r')
for line in logFile:
index = line.find(" pid: ");
if (index != -1):
pid = line[index+6:]
pid = pid.split(" ")[0];
break
logFile.close()
fileOpenAttemptIndex = 99
except:
fileOpenAttemptIndex += 1
env.history.message(self.cmdname + ": Waiting for GROMACS process identifier availability...")
sleep(1)
# Write the input file into the HDF5 data store
# directory. (It is part of data store.)
if (launchNV1):
inputFileName = hdf5DataStoreDir + os.sep + "input.mmp"
env.history.message(self.cmdname + ": Writing input.mmp file to HDF5 data store directory.")
all_atoms = {}
self.part.writemmpfile(inputFileName,
add_atomids_to_dict = all_atoms)
# Write a file that maps the ids of the atoms actually
# used for simulation to the atom ids of the complete
# structure stored in the MMP file above.
if (launchNV1):
used_atoms = self.used_atoms
assert used_atoms is not None, \
"self.used_atoms didn't get stored"
mapFilename = \
hdf5DataStoreDir + os.sep + "trajAtomIdMap.txt"
self.writeTrajectoryAtomIdMapFile(mapFilename,
used_atoms, all_atoms)
# Launch the NV1 process
if (launchNV1):
nv1 = self.nv1_executable_path
nv1Process = Process()
nv1Args = [
"-f", hdf5DataStoreDir + ".nh5",
"-p", "GMX", "%s" % pid,
]
nv1Process.setStandardOutputPassThrough(True)
nv1Process.setStandardErrorPassThrough(True)
nv1Process.setProcessName("nv1")
env.history.message(self.cmdname + ": Launching NanoVision-1...")
nv1Process.run(nv1, nv1Args, True)
else:
if (pid != None):
env.history.message(self.cmdname + ": GROMACS process " + pid + " has been launched.")
else:
env.history.message(self.cmdname + ": GROMACS process has been launched; unable to determine its identifier.")
except:
print_compact_traceback("bug in simulator-calling code: ")
self.errcode = -11111
self.set_waitcursor(False)
self.win.disable_QActions_for_sim(False)
if not self.errcode:
return # success
if self.errcode == 1: # User pressed Abort button in progress dialog.
msg = redmsg("Aborted.")
env.history.message(self.cmdname + ": " + msg)
if self.simProcess: #bruce 051231 added condition (since won't be there when use_dylib)
##Tries to terminate the process the nice way first, so the process
## can do whatever clean up it requires. If the process
## is still running after 2 seconds (a kludge). it terminates the
## process the hard way.
#self.simProcess.tryTerminate()
#QTimer.singleShot( 2000, self.simProcess, SLOT('kill()') )
# The above does not work, so we'll hammer the process with SIGKILL.
# This works. Mark 050210
self.simProcess.kill()
elif not self.pyrexSimInterrupted and \
self.errcode != FAILURE_ALREADY_DOCUMENTED: # wware 060323 bug 1725
# Something failed...
msg = "Simulation failed: exit code or internal error code %r " % \
self.errcode #e identify error better!
env.history.message(self.cmdname + ": " + redmsg(msg))
#fyi this was 'cmd' which was wrong, it says 'Simulator'
# even for Minimize [bruce 060106 comment, fixed it now]
self.said_we_are_done = True
# since saying we aborted or had an error is good enough...
###e revise if kill can take time.
return # caller should look at self.errcode
# semi-obs comment? [by bruce few days before 050404, partly expresses an intention]
# results themselves are a separate object (or more than one?) stored in attrs... (I guess ###k)
# ... at this point the caller probably extracts the results object and uses it separately
# or might even construct it anew from the filename and params
# depending on how useful the real obj was while we were monitoring the progress
# (since if so we already have it... in future we'll even start playing movies as their data comes in...)
# so not much to do here! let caller care about res, not us.
def set_options_errQ(self): #e maybe split further into several setup methods? #bruce 051115 removed unused 'options' arg
"""
Set movie alist (from simaspect or entire part);
debug-msg if it was already set (and always ignore old value).
Figure out and set filenames, including sim executable path.
All inputs and outputs are self attrs or globals or other obj attrs...
except, return error code if sim executable missing
or on other errors detected by subrs.
old docstring:
Caller should specify the options for this simulator run
(including the output file name);
these might affect the input file we write for it
and/or the arguments given to the simulator executable.
Temporary old-code compatibility: use self._movie
for simsetup params and other needed params, and store new ones into it.
"""
part = self.part
movie = self._movie
# set up alist (list of atoms for sim input and output files, in order)
if movie.alist is not None:
# this movie object is being reused, which is a bug.
# complain... and try to work around.
if debug_flags.atom_debug:
# since I expect this is possible for "save movie file" until fixed...
# [bruce 050404] (maybe not? it had assert 0)
print "BUG (worked around??): movie object being reused unexpectedly"
movie.alist = None
movie.alist_fits_entire_part = False # might be changed below
if not self.simaspect:
# No prescribed subset of atoms to minimize. Use all atoms in the part.
# Make sure some chunks are in the part.
if not part.molecules: # Nothing in the part to minimize.
msg = redmsg("Can't create movie. No chunks in part.")
####@@@@ is this redundant with callers? yes for simSetup,
# don't know about minimize, or the weird fileSave call in MWsem.
env.history.message(msg)
return -1
movie.set_alist_from_entire_part(part) ###@@@ needs improvement, see comments in it
for atom in movie.alist:
assert atom.molecule.part == part ###@@@ remove when works
movie.alist_fits_entire_part = True # permits optims... but note it won't be valid
# anymore if the part changes! it's temporary... not sure it deserves to be an attr
# rather than local var or retval.
else:
# the simaspect should know what to minimize...
alist = self.simaspect.atomslist()
movie.set_alist(alist)
for atom in movie.alist: # redundant with set_alist so remove when works
assert atom.molecule.part == part
# Set up filenames.
# We use the process id to create unique filenames for this instance of the program
# so that if the user runs more than one program at the same time, they don't use
# the same temporary file names.
# We now include a part-specific suffix [mark 051030]]
# [This will need revision when we can run more than one sim process
# at once, with all or all but one in the "background" [bruce 050401]]
# simFilesPath = "~/Nanorex/SimFiles". Mark 051028.
simFilesPath = find_or_make_Nanorex_subdir('SimFiles')
# Create temporary part-specific filename, for example:
# "partname-minimize-pid1000".
# We'll be appending various extensions to tmp_file_prefix to make temp
# file names for sim input and output files as needed (e.g. mmp, xyz,
# etc.)
junk, basename, ext = filesplit(self.assy.filename)
if not basename: # The user hasn't named the part yet.
basename = "Untitled"
timestampString = ""
if (self.background):
# Add a timestamp to the pid so that multiple backgrounded
# calculations don't clobber each other's files.
timestamp = datetime.today()
timestampString = timestamp.strftime(".%y%m%d%H%M%S")
self.tmp_file_prefix = \
os.path.join(simFilesPath,
"%s-minimize-pid%d%s" % (basename, os.getpid(),
timestampString))
r = self.old_set_sim_output_filenames_errQ( movie, self.mflag)
if r: return r
# don't call sim_input_filename here, that's done later for some reason
# prepare to spawn the process later (and detect some errors now)
bin_dir = self.sim_bin_dir_path()
# Make sure the simulator exists (as dylib or as standalone program)
if self.use_dylib_sim:
#bruce 051230 experimental code
self.dylib_path = bin_dir
# this works for developers if they set up symlinks... might not be right...
worked = self.import_dylib_sim(self.dylib_path)
if not worked:
# The dylib filename on Windows can be either sim.dll or sim.pyd -- should we mention them both?
# If the imported name is not the usual one, or if two are present, should we print a warning?
##e Surely this message text (and the other behavior suggested above) should depend on the platform
# and be encapsulated in some utility function for loading dynamic libraries. [bruce 060104]
msg = redmsg("The simulator dynamic library [sim.so or sim.dll, in " + self.dylib_path +
"] is missing or could not be imported. Trying command-line simulator.")
env.history.message(self.cmdname + ": " + msg)
## return -1
self.use_dylib_sim = False
####@@@@ bug report: even after this, it will find tracefile from prior run (if one exists) and print its warnings.
# probably we should remove that before this point?? [bruce 051230] [hmm, did my later removal of the old tracefile
# fix this, or is it not removed until after this point?? bruce question 060102]
if not self.use_dylib_sim:
# "program" is the full path to the simulator executable.
if sys.platform == 'win32':
program = os.path.join(bin_dir, 'simulator.exe')
else:
program = os.path.join(bin_dir, 'simulator')
if not os.path.exists(program):
msg = redmsg("The simulator program [" + program + "] is missing. Simulation aborted.")
env.history.message(self.cmdname + ": " + msg)
return -1
self.program = program
return None # no error
def sim_bin_dir_path(self): #bruce 060102 split this out
"""
Return pathname of bin directory that ought to contain simulator executable and/or dynamic library.
(Doesn't check whether it exists.)
"""
# filePath = the current directory NE-1 is running from.
filePath = os.path.dirname(os.path.abspath(sys.argv[0]))
return os.path.normpath(filePath + '/../bin')
def import_dylib_sim(self, dylib_path): #bruce 051230 experimental code
"""
Try to import the dynamic library version of the simulator, under the module name 'sim',
located in dylib_path. Return a success flag.
"""
import sys
if not sys.modules.has_key('sim'):
oldpath = sys.path
sys.path = [dylib_path] + oldpath
##k Do we need to include oldpath here? if not, we get better error detection if we leave it out.
# But we might need to (in principle), if this import has to do another one behind the scenes for some reason.
##e maybe for some errors we should remove this invalid module so we can try the import again later??
# This might never work, since maybe Python removes it unless it got too far to try again;
# if it does ever import it it won't do more (even with reload) until you rerun the app.
# So it's probably not worth improving this error handling code.
try:
import sim
assert sys.modules.has_key('sim')
worked = True
except:
print_compact_traceback("error trying to import dylib sim: ")
worked = False
#e should we worry about whether sys.modules.has_key('sim') at this point? Might depend on how it failed.
sys.path = oldpath
else:
worked = True # optimistic
if worked:
try:
from sim import theSimulator
except:
worked = False
print_compact_traceback("error trying to import Minimize and Dynamics from dylib sim: ")
return worked
def old_set_sim_output_filenames_errQ(self, movie, mflag):
"""
Old code, not yet much cleaned up. Uses and/or sets movie.filename,
with movie serving to hold desired sim parameters
(more like a SimSetup object than a Movie object in purpose).
Stores shell command option for using tracefile (see code, needs cleanup).
Returns error code (nonzero means error return needed from entire SimRunner.run,
and means it already emitted an error message).
"""
# figure out filename for trajectory or final-snapshot output from simulator
# (for sim-movie or minimize op), and store it in movie.moviefile
# (in some cases it's the name that was found there).
if mflag == 1: # single-frame XYZ file
if movie.filename and debug_flags.atom_debug:
print "atom_debug: warning: ignoring filename %r, bug??" % movie.filename
movie.filename = self.tmp_file_prefix + ".xyz" ## "sim-%d.xyz" % pid
if mflag == 2: #multi-frame DPB file
if movie.filename and debug_flags.atom_debug:
print "atom_debug: warning: ignoring filename %r, bug??" % movie.filename
movie.filename = self.tmp_file_prefix + ".dpb" ## "sim-%d.dpb" % pid
if movie.filename:
moviefile = movie.filename
else:
msg = redmsg("Can't create movie. Empty filename.")
env.history.message(self.cmdname + ": " + msg)
return -1
# Check that the moviefile has a valid extension.
ext = moviefile[-4:]
if ext not in ['.dpb', '.xyz']:
# Don't recognize the moviefile extension.
msg = redmsg("Movie [" + moviefile + "] has unsupported extension.")
env.history.message(self.cmdname + ": " + msg)
print "writeMovie: " + msg
return -1
movie.filetype = ext #bruce 050404 added this
# Figure out tracefile name, store in self.traceFileName,
# and come up with sim-command argument for it, store that in self.traceFileArg.
if mflag:
#bruce 050407 comment: mflag true means "minimize" (value when true means output filetype).
# Change: Always write tracefile, so Minimize can see warnings in it.
# But let it have a different name depending on the output file extension,
# so if you create xxx.dpb and xxx.xyz, the trace file names differ.
# (This means you could save one movie and one minimize output for the same xxx,
# and both trace files would be saved too.) That change is now in movie.get_trace_filename().
self.traceFileName = movie.get_trace_filename()
# (same as in other case, but retval differs due to movie.filetype)
else:
# The trace filename will be the same as the movie filename, but with "-trace.txt" tacked on.
self.traceFileName = movie.get_trace_filename() # presumably uses movie.filename we just stored
# (I guess this needn't know self.tmp_file_prefix except perhaps via movie.filename [bruce 050401])
if self.traceFileName:
self.traceFileArg = "-q" + self.traceFileName #SIMOPT
else:
self.traceFileArg = ""
# This was the old tracefile - obsolete as of 2005-03-08 - Mark
## traceFileArg = "-q"+ os.path.join(self.tmpFilePath, "sim-%d-trace.txt" % pid) #SIMOPT
return None # no error
def sim_input_filename(self):
"""
Figure out the simulator input filename
(previously set options might specify it or imply how to make it up;
if not, make up a suitable temp name)
and return it; don't record it (caller does that),
and no need to be deterministic (only called once if that matters).
"""
# We always save the current part to an MMP file before starting
# the simulator. In the future, we may want to check if assy.filename
# is an MMP file and use it if not assy.has_changed().
# [bruce 050324 comment: our wanting this is unlikely, and becomes more so as time goes by,
# and in any case would only work for the main Part (assy.tree.part).]
return self.tmp_file_prefix + ".mmp" ## "sim-%d.mmp" % pid
def write_sim_input_file(self):
"""
Write the appropriate data from self.part (as modified by self.simaspect)
to an input file for the simulator (presently always in mmp format)
using the filename self.sim_input_file
(overwriting any existing file of the same name).
"""
part = self.part
mmpfile = self.sim_input_file # the filename to write to
movie = self._movie # old-code compat kluge
assert movie.alist is not None #bruce 050404
self.used_atoms = {} #bruce 080325 [review: redundant with movie.alist??]
if not self.simaspect: ## was: if movie.alist_fits_entire_part:
# note: as of 080325, this case certainly runs for Run Dynamics,
# and probably runs for all whole-part Adjust, Minimize, or
# Dynamics operations. [bruce 080325 comment]
if DEBUG_SIM:
print "part.writemmpfile(%r)" % (mmpfile,)
stats = {}
part.writemmpfile( mmpfile,
leave_out_sim_disabled_nodes = True,
sim = True,
dict_for_stats = stats,
add_atomids_to_dict = self.used_atoms
)
#bruce 051209 added options (used to be hardcoded in files_mmp), plus a new one, dict_for_stats
# As of 051115 this is still called for Run Sim [Run Dynamics].
# As of 050412 this didn't yet turn singlets into H;
# but as of long before 051115 it does (for all calls -- so it would not be good to use for Save Selection!).
#bruce 050811 added sim = True to fix bug 254 for sim runs, for A6.
# (and 051209, according to a longer comment now removed [by bruce 080321],
# added dict_for_stats to complete that fix)
nsinglets_H = stats.get('nsinglets_H', 0)
if nsinglets_H: #bruce 051209 this message code is approximately duplicated elsewhere in this file
info = fix_plurals( "(Treating %d bondpoint(s) as Hydrogens, during simulation)" % nsinglets_H )
env.history.message( info)
else:
# note: as of 080325, this case certainly runs for Adjust 1 atom
# (from its glpane cmenu), and probably runs for all part-subset
# Adjust, Minimize, or Dynamics operations. [bruce 080325 comment]
if DEBUG_SIM:
print "simaspect.writemmpfile(%r)" % (mmpfile,)
# note: simaspect has already been used to set up movie.alist; simaspect's own alist copy is used in following:
self.simaspect.writemmpfile( mmpfile, add_atomids_to_dict = self.used_atoms)
# this also turns singlets into H
# obs comments:
# bruce 050325 revised this to use whatever alist was asked for above (set of atoms, and order).
# But beware, this might only be ok right away for minimize, not simulate (since for sim it has to write all jigs as well).
## movie.natoms = natoms = len(movie.alist) # removed by bruce 050404 since now done in set_alist etc.
###@@@ why does that trash a movie param? who needs that param? it's now redundant with movie.alist
return
def set_waitcursor(self, on_or_off): # [WARNING: this code is now duplicated in at least one other place, as of 060705]
"""
For on_or_off True, set the main window waitcursor.
For on_or_off False, revert to the prior cursor.
[It might be necessary to always call it in matched pairs, I don't know [bruce 050401]. #k]
"""
if on_or_off:
# == Change cursor to Wait (hourglass) cursor
##Huaicai 1/10/05, it's more appropriate to change the cursor
## for the main window, not for the progressbar window
QApplication.setOverrideCursor( QCursor(Qt.WaitCursor) )
#oldCursor = QCursor(win.cursor())
#win.setCursor(QCursor(Qt.WaitCursor) )
else:
QApplication.restoreOverrideCursor() # Restore the cursor
#win.setCursor(oldCursor)
return
def spawn_process(self): # misnamed, since (1) also includes monitor_progress, and (2) doesn't always use a process
"""
Actually spawn the process [or the extension class object],
making its args [or setting its params] based on some of self's attributes.
Wait til we're done with this simulation, then record results in other self attributes.
"""
if DEBUG_SIM:
#bruce 051115 confirmed this is always called for any use of sim (Minimize or Run Sim)
print "calling spawn_process"
# First figure out process arguments
# [bruce 050401 doing this later than before, used to come before writing sim-input file]
self.setup_sim_args() # stores them in an attribute, whose name and value depends on self.use_dylib_sim
# Now run the sim to completion (success or fail or user abort),
# as well as whatever updates we do at the same time in the cad code
# (progress bar, showing movie in real time [nim but being added circa 051231], ...)
if self.use_dylib_sim:
self.sim_loop_using_dylib() #bruce 051231 wrote this anew
else:
self.sim_loop_using_standalone_executable() #bruce 051231 made this from last part of old spawn_process code
return
def setup_sim_args(self): #bruce 051231 split this out of spawn_process, added dylib case
"""
Set up arguments for the simulator, using one of two different interfaces:
either constructing a command line for the standalone executable simulator,
or creating and setting up an instance of an extension class defined in the
sim module (a dynamic library). (But don't start it running.)
We use the same method to set up both kinds of interface, so that it will
be easier to keep them in sync as the code evolves.
WARNING: We also set a few attributes of self which cause side effects later;
in one case, the attribute looks just like a sim-executable command line option
(purely for historical reasons).
"""
# set one of the sim-interface-format flags
use_dylib = self.use_dylib_sim
use_command_line = not self.use_dylib_sim
# (The rest of this method would permit both of these flags to be set together, if desired;
# that might be useful if we want to try one interface, and if it fails, try the other.)
movie = self._movie # old-code compat kluge
self.totalFramesRequested = movie.totalFramesRequested
self.update_cond = movie.update_cond
moviefile = movie.filename
if use_command_line:
program = self.program
outfileArg = "-o%s" % moviefile #SIMOPT
traceFileArg = self.traceFileArg
infile = self.sim_input_file
ext = movie.filetype #bruce 050404 added movie.filetype
mflag = self.mflag
# "formarg" = File format argument -- we need this even when use_dylib,
# since it's also used as an internal flag via self._formarg
if ext == ".dpb":
formarg = ''
elif ext == ".xyz":
formarg = "-x" #SIMOPT (value also used as internal flag)
else:
assert 0
self._formarg = formarg # kluge
# the use_dylib code for formarg is farther below
self._simopts = self._simobj = self._arguments = None # appropriate subset of these is set below
use_timestep_arg = False
if 1: ##@@ bruce 060503: add debug_pref to let user vary simulator timestep
# (we also read the value on import, in separate code above, to make sure it gets into the debug menu right away)
use_timestep_arg, timestep = _timestep_flag_and_arg(mflag)
# boolean and float (timestep in seconds)
if use_timestep_arg:
env.history.message(orangemsg("Note: using experimental non-default dynamics timestamp of %r femtoseconds" % (timestep * 1e15)))
if use_command_line:
# "args" = arguments for the simulator.
#SIMOPT -- this appears to be the only place the entire standalone simulator command line is created.
if mflag:
#argument to enable or disable electrostatics
electrostaticArg = '--enable-electrostatic='
if self.cmd_type == 'Adjust' or self.cmd_type == 'Adjust Atoms':
electrostaticFlag = self.getElectrostaticPrefValueForAdjust()
else:
electrostaticFlag = self.getElectrostaticPrefValueForMinimize()
## electrostaticArg.append(str(electrostaticFlag))
electrostaticArg += str(electrostaticFlag) #bruce 070601 bugfix
if (self.useGromacs):
gromacsArgs = ["--write-gromacs-topology",
moviefile,
"--path-to-cpp",
self.cpp_executable_path
]
else:
gromacsArgs = []
if (self.hasPAM):
# vdw-cutoff-radius in nm, and must match the
# user potential function table passed to
# mdrun. See GROMACS user manual section
# 6.6.2
gromacsArgs += [ "--vdw-cutoff-radius=%f" % self.yukawaRCutoff ]
# [bruce 05040 infers:] mflag true means minimize; -m tells this to the sim.
# (mflag has two true flavors, 1 and 2, for the two possible output filetypes for Minimize.)
# [later, bruce 051231: I think only one of the two true mflag values is presently supported.]
args = [program, '-m', str(formarg),
traceFileArg, outfileArg,
electrostaticArg,
infile] + gromacsArgs #SIMOPT
else:
# THE TIMESTEP ARGUMENT IS MISSING ON PURPOSE.
# The timestep argument "-s + (movie.timestep)" is not supported for Alpha. #SIMOPT
electrostaticArg = '--enable-electrostatic='
electrostaticFlag = self.getElectrostaticPrefValueForDynamics()
## electrostaticArg.append(str(electrostaticFlag))
electrostaticArg += str(electrostaticFlag) #bruce 070601 bugfix
args = [program,
'-f' + str(movie.totalFramesRequested), #SIMOPT
'-t' + str(movie.temp), #SIMOPT
'-i' + str(movie.stepsper), #SIMOPT
'-r', #SIMOPT
electrostaticArg,
str(formarg),
traceFileArg,
outfileArg,
infile]
if use_timestep_arg: #bruce 060503; I'm guessing that two separate arguments are needed for this, and that %f will work
args.insert(1, '--time-step')
args.insert(2, '%f' % timestep)
args += [ "--system-parameters", self.system_parameters_file ]
if DEBUG_SIM:
print "program = ",program
print "Spawnv args are %r" % (args,) # note: we didn't yet remove args equal to "", that's done below
arguments = QStringList()
for arg in args:
# wware 051213 sim's getopt doesn't like empty arg strings
if arg != "":
arguments.append(arg)
self._arguments = arguments
del args, arguments
if use_dylib:
sim = thePyrexSimulator()
sim.setup(mflag, infile)
simobj = sim.sim
if DebugMenuMixin.sim_params_set:
for attr, value in DebugMenuMixin.sim_param_values.items():
setattr(simobj, attr, value)
simopts = simobj
# order of set of remaining options should not matter;
# for correspondence see sim/src files sim.pyx, simhelp.c, and simulator.c
if formarg == '-x':
simopts.DumpAsText = 1 # xyz rather than dpb, i guess
else:
assert formarg == ''
simopts.DumpAsText = 0
if movie.print_energy:
simopts.PrintPotentialEnergy = 1
if self.traceFileName:
simopts.TraceFileName = self.traceFileName # note spelling diff, 'T' vs 't' (I guess I like this difference [b 060102])
#k not sure if this would be ok to do otherwise, since C code doesn't turn "" into NULL and might get confused
sim.setOutputFileName(moviefile)
if not mflag:
# The timestep argument "-s + (movie.timestep)" or Dt is not supported for Alpha...
if use_timestep_arg: #bruce 060503
simopts.Dt = timestep
simopts.NumFrames = movie.totalFramesRequested # SIMPARAMS
simopts.Temperature = movie.temp
simopts.IterPerFrame = movie.stepsper
simopts.PrintFrameNums = 0
simopts.EnableElectrostatic = self.getElectrostaticPrefValueForDynamics()
if mflag:
self.set_minimize_threshhold_prefs(simopts)
if self.cmd_type == 'Adjust' or self.cmd_type == 'Adjust Atoms':
simopts.EnableElectrostatic = self.getElectrostaticPrefValueForAdjust()
simopts.NeighborSearching = 0
else:
simopts.EnableElectrostatic = self.getElectrostaticPrefValueForMinimize()
simopts.NeighborSearching = self.getNeighborSearchingPrefValue()
if (self.useGromacs):
simopts.GromacsOutputBaseName = moviefile
simopts.PathToCpp = self.cpp_executable_path
if (self.hasPAM):
# vdw-cutoff-radius in nm, and must match the
# user potential function table passed to
# mdrun. See GROMACS user manual section
# 6.6.2
simopts.VanDerWaalsCutoffRadius = self.yukawaRCutoff
if (self.useAMBER):
simopts.UseAMBER = self.useAMBER
simopts.TypeFeedback = self.typeFeedback
#e we might need other options to make it use Python callbacks (nim, since not needed just to launch it differently);
# probably we'll let the later sim-start code set those itself.
self._simopts = simopts
self._simobj = simobj
# return whatever results are appropriate -- for now, we stored each one in an attribute (above)
return # from setup_sim_args
def getElectrostaticPrefValueForAdjust(self):
#ninad20070509
#int EnableElectrostatic =1 implies electrostatic is enabled
#and 0 implies it is disabled. This sim arg is defined in sim.pyx in sim/src
if self.useGromacs and env.prefs[electrostaticsForDnaDuringAdjust_prefs_key]:
val = 1
else:
val = 0
return val
def getElectrostaticPrefValueForMinimize(self):
#ninad20070509
# int EnableElectrostatic =1 implies electrostatic is enabled
#and 0 implies it is disabled. This sim arg is defined in sim.pyx in sim/src
if self.useGromacs and env.prefs[electrostaticsForDnaDuringMinimize_prefs_key]:
val = 1
else:
val = 0
return val
def getNeighborSearchingPrefValue(self):
if env.prefs[neighborSearchingInGromacs_prefs_key]:
val = 1
else:
val = 0
return val
def getElectrostaticPrefValueForDynamics(self):
#ninad20070509
# int EnableElectrostatic =1 implies electrostatic is enabled
#and 0 implies it is disabled. This sim arg is defined in sim.pyx in sim/src
if env.prefs[electrostaticsForDnaDuringDynamics_prefs_key]:
val = 1
else:
val = 0
return val
def set_minimize_threshhold_prefs(self, simopts): #bruce 060628, revised 060705
def warn(msg):
env.history.message(orangemsg("Warning: ") + quote_html(msg))
try:
if env.debug():
print "debug: running set_minimize_threshhold_prefs"
###obs design scratch:
# we'll probably use different prefs keys depending on an arg that tells us which command-class to use,
# Adjust, Minimize, or Adjust Atoms; maybe some function in prefs_constants will return the prefs_key,
# so all the UI code can call it too. [bruce 060705]
from utilities.prefs_constants import Adjust_endRMS_prefs_key, Adjust_endMax_prefs_key
from utilities.prefs_constants import Adjust_cutoverRMS_prefs_key, Adjust_cutoverMax_prefs_key
from utilities.prefs_constants import Minimize_endRMS_prefs_key, Minimize_endMax_prefs_key
from utilities.prefs_constants import Minimize_cutoverRMS_prefs_key, Minimize_cutoverMax_prefs_key
# kluge for A8 -- ideally these prefs keys or their prefs values
# would be set as movie object attrs like all other sim params
cmd_type = self.cmd_type
if cmd_type == 'Adjust' or cmd_type == 'Adjust Atoms' or cmd_type == 'Check AtomTypes':
endRMS_prefs_key = Adjust_endRMS_prefs_key
endMax_prefs_key = Adjust_endMax_prefs_key
cutoverRMS_prefs_key = Adjust_cutoverRMS_prefs_key
cutoverMax_prefs_key = Adjust_cutoverMax_prefs_key
elif cmd_type == 'Minimize':
endRMS_prefs_key = Minimize_endRMS_prefs_key
endMax_prefs_key = Minimize_endMax_prefs_key
cutoverRMS_prefs_key = Minimize_cutoverRMS_prefs_key
cutoverMax_prefs_key = Minimize_cutoverMax_prefs_key
else:
assert 0, "don't know cmd_type == %r" % (cmd_type,)
# The following are partly redundant with the formulas,
# which is intentional, for error checking of the formulas.
# Only the first (endRMS) values are independent.
if cmd_type == 'Adjust' or cmd_type == 'Check AtomTypes':
defaults = (100.0, 500.0, 100.0, 500.0) # also hardcoded in prefs_constants.py
elif cmd_type == 'Adjust Atoms':
defaults = (50.0, 250.0, 50.0, 250.0)
elif cmd_type == 'Minimize':
defaults = (1.0, 5.0, 50.0, 250.0) # revised 060705, was (1.0, 10.0, 50.0, 300.0); also hardcoded in prefs_constants.py
endRMS = env.prefs[endRMS_prefs_key]
endMax = env.prefs[endMax_prefs_key]
cutoverRMS = env.prefs[cutoverRMS_prefs_key]
cutoverMax = orig_cutoverMax = env.prefs[cutoverMax_prefs_key]
# -1 means left blank, use default; any 0 or negative value entered explicitly will have the same effect.
# For an explanation of the logic of these formulas, see email from bruce to nanorex-all of 060619,
# "test UI for minimizer thresholds". These are mainly for testing -- for final release (A8 or maybe A8.1)
# we are likely to hide all but the first from the UI by default, with the others always being -1.
# Revising formulas for A8 release, bruce 060705.
if cmd_type == 'Adjust Atoms':
# kluge, because it doesn't have its own prefs values, and has its own defaults, but needs to be adjustable:
# use fixed values, but if Adjust prefs are made stricter, let those limit these fixed values too
endRMS = min( endRMS, defaults[0] )
endMax = min( endMax, defaults[1] )
cutoverRMS = min( cutoverRMS, defaults[2] )
cutoverMax = min( cutoverMax, defaults[3] )
if endRMS <= 0:
endRMS = defaults[0] # e.g. 1.0; note, no other defaults[i] needs to appear in these formulas
if endMax <= 0:
endMax = 5.0 * endRMS # revised 060705 (factor was 10, now 5)
elif endMax < endRMS:
warn("endMax < endRMS is not allowed, using endMax = endRMS")
endMax = endRMS # sim C code would use 5.0 * endRMS if we didn't fix this here
if cutoverRMS <= 0:
cutoverRMS = max( 50.0, endRMS ) # revised 060705
if cutoverMax <= 0:
cutoverMax = 5.0 * cutoverRMS # revised 060705, was 300.0
if cutoverRMS < endRMS:
warn("cutoverRMS < endRMS is not allowed, using cutoverRMS,Max = endRMS,Max")
cutoverRMS = endRMS
cutoverMax = endMax
elif cutoverMax < endMax:
warn("cutoverMax < endMax is not allowed, using cutoverRMS,Max = endRMS,Max")
cutoverRMS = endRMS
cutoverMax = endMax
if cutoverMax < cutoverRMS:
if orig_cutoverMax <= 0:
warn("cutoverMax < cutoverRMS is not allowed, using cutoverMax = 5.0 * cutoverRMS")
# revised 060705 (factor was 6, now 5)
cutoverMax = 5.0 * cutoverRMS # sim C code would use 5.0 * cutoverRMS if we didn't fix this here
else:
warn("cutoverMax < cutoverRMS is not allowed, using cutoverMax = cutoverRMS")
cutoverMax = cutoverRMS # sim C code would use 5.0 * cutoverRMS if we didn't fix this here
if (endRMS, endMax, cutoverRMS, cutoverMax) != defaults or env.debug():
msg = "convergence criteria: endRMS = %0.2f, endMax = %0.2f, cutoverRMS = %0.2f, cutoverMax = %0.2f" % \
(endRMS, endMax, cutoverRMS, cutoverMax)
if (endRMS, endMax, cutoverRMS, cutoverMax) == defaults:
msg += " (default values -- only printed since ATOM_DEBUG is set)"
msg = _graymsg( msg)
env.history.message( msg)
simopts.MinimizeThresholdEndRMS = endRMS # for sim.so, but also grabbed from here later by other code in this file
simopts.MinimizeThresholdEndMax = endMax # ditto
simopts.MinimizeThresholdCutoverRMS = cutoverRMS
simopts.MinimizeThresholdCutoverMax = cutoverMax
## # only some of the following are needed elsewhere; maybe they could be grabbed from simopts but I'm not sure
## self.endRMS = endRMS
## self.endMax = endMax
## self.cutoverRMS = cutoverRMS
## self.cutoverMax = cutoverMax
except:
print_compact_traceback("error in set_minimize_threshhold_prefs (the ones from the last run might be used): ")
warn("internal error setting convergence criteria; the wrong ones might be used.")
pass
return
def sim_loop_using_standalone_executable(self): #bruce 051231 made this from part of spawn_process; compare to sim_loop_using_dylib
"#doc"
movie = self._movie
arguments = self._arguments
#bruce 050404 let simProcess be instvar so external code can abort it [this is still used as of 051231]
self.simProcess = None
try:
self.remove_old_moviefile(movie.filename) # can raise exceptions #bruce 051230 split this out
self.remove_old_tracefile(self.traceFileName)
## Start the simulator in a different process
self.simProcess = QProcess()
simProcess = self.simProcess
if DEBUG_SIM: #bruce 051115 revised this debug code
# wware 060104 Create a shell script to re-run simulator
outf = open("args", "w")
# On the Mac, "-f" prevents running .bashrc
# On Linux it disables filename wildcards (harmless)
outf.write("#!/bin/sh -f\n")
for a in arguments:
outf.write(str(a) + " \\\n")
outf.write("\n")
outf.close()
def blabout():
print "stdout:", simProcess.readStdout()
def blaberr():
print "stderr:", simProcess.readStderr()
QObject.connect(simProcess, SIGNAL("readyReadStdout()"), blabout)
QObject.connect(simProcess, SIGNAL("readyReadStderr()"), blaberr)
simProcess.setArguments(arguments)
###BUG: the above line may have never been ported to Qt4; for me it's saying AttributeError: setArguments.
# (One way to make it happen is to remove sim.so but leave the simulator executable accessible.)
# [bruce 070601 comment]
if self._movie.watch_motion:
env.history.message(orangemsg("(watch motion in real time is only implemented for pyrex interface to simulator)"))
# note: we have no plans to change that; instead, the pyrex interface will become the usual one
# except for background or remote jobs. [bruce 060109]
if not self._movie.create_movie_file:
env.history.message(orangemsg("(option to not create movie file is not yet implemented)")) # for non-pyrex sim
# NFR/bug 1286 not useful for non-pyrex sim, won't be implemented, this msg will be revised then
# to say "not supported for command-line simulator"
start = time.time() #bruce 060103 compute duration differently
simProcess.start()
# Launch the progress bar, and let it monitor and show progress and wait until
# simulator is finished or user aborts it.
self.monitor_progress_by_file_growth(movie) #bruce 060103 made this no longer help us compute duration
duration = time.time() - start
movie.duration = duration #bruce 060103 (more detailed comment in other place this occurs)
except: # We had an exception.
print_compact_traceback("exception in simulation; continuing: ")
if simProcess:
#simProcess.tryTerminate()
simProcess.kill()
simProcess = None
self.errcode = -1 # simulator failure
# now sim is done (or abort was pressed and it has not yet been killed)
# and self.errcode is error code or (for a specific hardcoded value)
# says abort was pressed.
# what all cases have in common is that user wants us to stop now
# (so we might or might not already be stopped, but we will be soon)
# and self.errcode says what's going on.
# [bruce 050407:]
# For now:
# Since we're not always stopped yet, we won't scan the tracefile
# for error messages here... let the caller do that.
# Later:
# Do it continuously as we monitor progress (in fact, that will be
# *how* we monitor progress, rather than watching the filesize grow).
return
def remove_old_moviefile(self, moviefile): #bruce 051230 split this out of spawn_process
"remove the moviefile if it exists, after warning existing Movie objects that we'll do so; can raise exceptions"
if os.path.exists(moviefile):
#bruce 050428: do something about this being the moviefile for an existing open movie.
try:
## print "calling apply2movies",moviefile
self.assy.apply2movies( lambda movie: movie.fyi_reusing_your_moviefile( moviefile) )
# note that this is only correct if we're sure it won't be called for the new Movie
# we're making right now! For now, this is true. Later we might need to add an "except for this movie" arg.
except:
#e in future they might tell us to lay off this way... for now it's a bug, but we'll ignore it.
print_compact_traceback("exception in preparing to reuse moviefile for new movie ignored: ")
pass
#bruce 050407 moving this into the try, since it can fail if we lack write permission
# (and it's a good idea to give up then, so we're not fooled by an old file)
if DEBUG_SIM:
print "deleting moviefile: [",moviefile,"]"
os.remove (moviefile) # Delete before spawning simulator.
return
#bruce 051231: here is an old comment related to remove_old_moviefile;
# I don't know whether it's obsolete regarding the bug it warns about:
# delete old moviefile we're about to write on, and warn anything that might have it open
# (only implemented for the same movie obj, THIS IS A BUG and might be partly new... ####@@@@)
def remove_old_tracefile(self, tracefile): #bruce 060101
"remove the tracefile if it exists, after warning anything that might care [nim]; can raise exceptions"
if os.path.exists(tracefile):
os.remove(tracefile) # can raise exception, e.g. due to directory permission error
return
def monitor_progress_by_file_growth(self, movie): #bruce 051231 split this out of sim_loop_using_standalone_executable
filesize, pbarCaption, pbarMsg = self.old_guess_filesize_and_progbartext( movie)
# only side effect: history message [bruce 060103 comment]
# pbarCaption and pbarMsg are not used any longer. [mark 060105 comment]
# (but they or similar might be used again soon, eg for cmdname in tooltip -- bruce 060112 comment)
statusBar = self.win.statusBar()
progressReporter = FileSizeProgressReporter(movie.filename, filesize)
self.errcode = statusBar.show_progressbar_and_stop_button(
progressReporter,
cmdname = self.cmdname, #bruce 060112
showElapsedTime = True )
# that 'launch' method is misnamed, since it also waits for completion;
# its only side effects [as of bruce 060103] are showing/updating/hiding progress dialog, abort button, etc.
return
def old_guess_filesize_and_progbartext(self, movie):
"#doc [return a triple of useful values for a progressbar, and emit a related history msg]"
#bruce 060103 added docstring
#bruce 050401 now calling this after spawn not before? not sure... note it emits a history msg.
# BTW this is totally unclean, all this info should be supplied by the subclass
# or caller that knows what's going on, not guessed by this routine
# and the filesize tracking is bogus for xyz files, etc etc, should be
# tracking status msgs in trace file. ###@@@
formarg = self._formarg # old-code kluge
mflag = self.mflag
natoms = len(movie.alist)
moviefile = movie.filename
# We cannot determine the exact final size of an XYZ trajectory file.
# This formula is an estimate. "filesize" must never be larger than the
# actual final size of the XYZ file, or the progress bar will never hit 100%,
# even though the simulator finished writing the file.
# - Mark 050105
#bruce 050407: apparently this works backwards from output file file format and minimizeQ (mflag)
# to figure out how to guess the filesize, and the right captions and text for the progressbar.
if formarg == "-x": #SIMOPT (used as internal flag, review if we change how this is passed to sim executable!)
# Single shot minimize.
if mflag: # Assuming mflag = 2. If mflag = 1, filesize could be wrong. Shouldn't happen, tho.
filesize = natoms * 16 # single-frame xyz filesize (estimate)
pbarCaption = "Adjust" # might be changed below
#bruce 050415: this string used to be tested in ProgressBar.py, so it couldn't have "All" or "Selection".
# Now it can have them (as long as it starts with Minimize, for now) --
# so we change it below (to caption from caller), or use this value if caller didn't provide one.
pbarMsg = "Adjusting..."
# Write XYZ trajectory file.
else:
filesize = movie.totalFramesRequested * ((natoms * 28) + 25) # multi-frame xyz filesize (estimate)
pbarCaption = "Save File" # might be changed below
pbarMsg = "Saving XYZ trajectory file " + os.path.basename(moviefile) + "..."
else:
# Multiframe minimize
if mflag:
filesize = (max(100, int(sqrt(natoms))) * natoms * 3) + 4
pbarCaption = "Adjust" # might be changed below
pbarMsg = None #bruce 050401 added this
# Simulate
else:
filesize = (movie.totalFramesRequested * natoms * 3) + 4
pbarCaption = "Simulator" # might be changed below
pbarMsg = "Creating movie file " + os.path.basename(moviefile) + "..."
msg = "Simulation started: Total Frames: " + str(movie.totalFramesRequested)\
+ ", Steps per Frame: " + str(movie.stepsper)\
+ ", Temperature: " + str(movie.temp)
env.history.message(self.cmdname + ": " + msg)
#bruce 050415: let caller specify caption via movie object's _cmdname
# (might not be set, depending on caller) [needs cleanup].
# For important details see same-dated comment above.
try:
caption_from_movie = movie._cmdname
except AttributeError:
caption_from_movie = None
if caption_from_movie:
pbarCaption = caption_from_movie
return filesize, pbarCaption, pbarMsg
#bruce 060103 pared this old comment down to its perhaps-useful parts:
## handle abort button (in progress bar or maybe elsewhere, maybe a command key)
## (btw abort or sim-process-crash does not imply failure, since there might be
## usable partial results, even for minimize with single-frame output);
## process other user events (or some of them) (maybe);
## and eventually return when the process is done,
## whether by abort, crash, or success to end;
## return True if there are any usable results,
## and have a results object available in some public attribute.
def sim_loop_using_dylib(self): #bruce 051231; compare to sim_loop_using_standalone_executable
# 051231 6:29pm: works, except no trace file is written so results in history come from prior one (if any)
"""
#doc
"""
movie = self._movie
if debug_flags.atom_debug and movie.duration:
print "atom_debug: possible bug: movie.duration was already set to", movie.duration
movie.duration = 0.0 #k hopefully not needed
# provide a reference frame for later movie-playing (for complete fix of bug 1297) [bruce 060112]
movie.ref_frame = (self.__frame_number, A(map(lambda a: a.sim_posn(), movie.alist))) # see similar code in class Movie
#e this could be slow, and the simobj already knows it, but I don't think getFrame has access to it [bruce 060112]
simopts = self._simopts
simobj = self._simobj
if self.mflag:
numframes = 0
else:
numframes = simopts.NumFrames
progressBar = self.win.statusBar().progressBar
progressBar.reset()
progressBar.setRange(0, numframes)
progressBar.setValue(0)
progressBar.show()
self.abortHandler = AbortHandler(self.win.statusBar(), self.cmdname)
try:
self.remove_old_moviefile(movie.filename) # can raise exceptions #bruce 051230 split this out
except:
#bruce 060705 do this here -- try not to prevent the upcoming sim
print_compact_traceback("problem removing old moviefile, continuing anyway: ")
env.history.message(orangemsg("problem removing old moviefile, continuing anyway"))
try:
self.remove_old_tracefile(self.traceFileName)
except:
#bruce 060705 do this here -- try not to prevent the upcoming sim
print_compact_traceback("problem removing old tracefile, continuing anyway: ")
env.history.message(orangemsg("problem removing old tracefile, continuing anyway"))
try:
if not self._movie.create_movie_file:
env.history.message(orangemsg("(option to not create movie file is not yet implemented)")) # for pyrex sim
# NFR/bug 1286; other comments describe how to implement it; it would need a warning
# (esp if both checkboxes unchecked, since no frame output in that case, tho maybe tracef warnings alone are useful)
editwarning = "Warning: editing structure while watching motion causes tracebacks; cancelling an abort skips some real time display time"
if self._movie.watch_motion: #bruce 060705 added this condition
if not seen_before(editwarning): #bruce 060317 added this condition
env.history.message(orangemsg( editwarning ))
env.call_qApp_processEvents() # so user can see that history message
###@@@ SIM CLEANUP desired: [bruce 060102]
# (items 1 & 2 & 4 have been done)
# 3. if callback caller in C has an exception from callback, it should not *keep* calling it, but reset it to NULL
# wware 060309, bug 1343
self.startTime = start = time.time()
if self.abortHandler.getPressCount() < 1:
# checked here since above processEvents can take time, include other tasks
# do these before entering the "try" clause
# note: we need the frame callback even if not self._movie.watch_motion,
# since it's when we check for user aborts and process all other user events.
frame_callback = self.sim_frame_callback
trace_callback = self.tracefile_callback
minflag = movie.minimize_flag
###@@@ should we merge this logic with how we choose the simobj class? [bruce 060112]
self.tracefileProcessor = TracefileProcessor(self, minimize = minflag, simopts = simopts)
# so self.tracefile_callback does something [bruce 060109]
from sim import SimulatorInterrupted #bruce 060112 - not sure this will work here vs outside 'def' ###k
self.sim_frame_callback_prep()
if DebugMenuMixin.sim_params_set:
for attr, expected in DebugMenuMixin.sim_param_values.items():
found = getattr(simobj, attr)
if found != expected:
env.history.message(orangemsg(attr + ' expected=' + str(expected) + ' found=' + str(found)))
try:
thePyrexSimulator().run( frame_callback = frame_callback, trace_callback = trace_callback )
# note: if this calls a callback which raises an exception, that exception gets
# propogated out of this call, with correct traceback info (working properly as of sometime on 060111).
# If a callback sets simobj.Interrupted (but doesn't raise an exception),
# this is turned into an exception like "sim.SimulatorInterrupted: simulator was interrupted".
# It also generates a tracefile line "# Warning: minimizer run was interrupted "
# (presumably before that exception gets back to here,
# which means a tracefile callback would presumably see it if we set one --
# but as of 060111 there's a bug in which that doesn't happen since all callbacks
# are turned off by Interrupted).
if debug_flags.atom_debug:
print "atom_debug: pyrex sim: returned normally"
except SimulatorInterrupted:
self.pyrexSimInterrupted = True # wware 060323 bug 1725
# This is the pyrex sim's new usual exit from a user abort, as of sometime 060111.
# Before that it was RuntimeError, but that could overlap with exceptions raised by Python callbacks
# (in fact, it briefly had a bug where all such exceptions turned into RuntimeErrors).
#
# I didn't yet fully clean up this code for the new exception. [bruce 060112] ####@@@@
if debug_sim_exceptions: #bruce 060111
print_compact_traceback("fyi: sim.go aborted with this: ")
# following code is wrong unless this was a user abort, but I'm too lazy to test for that from the exception text,
# better to wait until it's a new subclass of RuntimeError I can test for [bruce 060111]
env.history.statusbar_msg("Aborted")
if debug_flags.atom_debug:
print "atom_debug: pyrex sim: aborted"
if self.PREPARE_TO_CLOSE:
# wware 060406 bug 1263 - exiting the program is an acceptable way to leave this loop
self.errcode = -1
elif self.abortHandler.getPressCount() < 1:
if not debug_sim_exceptions:
#bruce 060712
print_compact_traceback("fyi: sim.go aborted with this: ")
msg3 = "possible bug in simulator: abort not caused by abortbutton"
env.history.message(redmsg(msg3)) #bruce 060712
print "error: abort without abortbutton doing it (did a subtask intervene and finish it?)"
print " (or this can happen due to sim bug in which callback exceptions turn into RuntimeErrors)"####@@@@
self.abortHandler.finish()
self.abortHandler = None
## bug: this fails to cause an abort to be reported by history. might relate to bug 1303.
# or might only occur due to current bugs in the pyrex sim, since I think user abort used to work. [bruce 060111]
# Initial attempt to fix that -- need to improve errcode after reviewing them all
# (check for errorcode spelling error too? or rename it?) ####@@@@
if not self.errcode:
print "self.errcode was not set, using -1"
self.errcode = -1 # simulator failure [wrong errorcode for user abort, fix this]
pass
pass
if 1: # even if aborting
duration = time.time() - start
#e capture and print its stdout and stderr [not yet possible via pyrex interface]
movie.duration = duration #bruce 060103
except: # We had an exception.
print_compact_traceback("exception in simulation; continuing: ")
##e terminate it, if it might be in a different thread; destroy object; etc
# show the exception message in the history window - wware 060314
type, value, traceback = sys.exc_info()
msg = redmsg("%s: %s" % (type, value))
env.history.message(msg)
self.errcode = FAILURE_ALREADY_DOCUMENTED
self.abortHandler.finish() # whether or not there was an exception and/or it aborted
self.abortHandler = None
return
env.history.progress_msg("") # clear out elapsed time messages
env.history.statusbar_msg("Done.") # clear out transient statusbar messages
self.abortHandler.finish() # whether or not there was an exception and/or it aborted
self.abortHandler = None
return
__last_3dupdate_time = -1
__last_progress_update_time = -1
__frame_number = 0 # starts at 0 so incrementing it labels first frame as 1 (since initial frame is not returned)
#k ought to verify that in sim code -- seems correct, looking at coords and total number of frames
# note: we never need to reset __frame_number since this is a single-use object.
# could this relate to bug 1297? [bruce 060110] (apparently not [bruce 060111])
## __sim_work_time = 0.05 # initial value -- we'll run sim_frame_callback_worker 20 times per second, with this value
__last_3dupdate_frame = 0
__last_pytime = 0.03 # guess (this is a duration)
def sim_frame_callback_prep(self):
self.__last_3dupdate_time = self.__last_progress_update_time = time.time()
def sim_frame_callback_update_check(self, simtime, pytime, nframes):
"[#doc is in SimSetup.py and in caller]"
#bruce 060705 revised this, so self.update_cond of None is not an error, so it can be the usual way to say "never update"
res = True # whether to update this time
use_default_cond = False
if self.update_cond == '__default__':
use_default_cond = True
elif self.update_cond:
try:
res = self.update_cond(simtime, pytime, nframes) # res should be a boolean value
except:
self.update_cond = '__default__' # was None
print_compact_traceback("exception in self.update_cond ignored, reverting to default cond: ")
use_default_cond = True
else:
res = False # was: use_default_cond = True
if use_default_cond:
try:
res = (simtime >= max(0.05, min(pytime * 4, 2.0)))
except:
print_compact_traceback("exception in default cond, just always updating: ")
res = True
## if res and debug_flags.atom_debug: # DO NOT COMMIT THIS, even with 'if res' -- might print too often and slow it down
## print "debug: %d sim_frame_callback_update_check returns %r, args" % (self.__frame_number,res), \
## simtime, pytime, nframes #bruce 060712
return res
def sim_frame_callback(self, last_frame):
"Per-frame callback function for simulator object."
from sim import SimulatorInterrupted
if last_frame and env.debug():
print "debug: last_frame is true" #bruce 060712
# Note: this was called 3550 times for minimizing a small C3 sp3 hydrocarbon... better check the elapsed time quickly.
#e Maybe we should make this into a lambda, or even code it in C, to optimize it.
if self.PREPARE_TO_CLOSE:
# wware 060406 bug 1263 - if exiting the program, interrupt the simulator
from sim import SimulatorInterrupted
raise SimulatorInterrupted
self.__frame_number += 1
if debug_all_frames:
from sim import theSimulator
if debug_sim_exceptions:
# intentionally buggy code
print "frame %d" % self.__frame_number, self._simobj.getTheFrame() # this is a bug, that attr should not exist
else:
# correct code
print "frame %d" % self.__frame_number, theSimulator().getFrame()[debug_all_frames_atom_index]
pass
try:
# Decide whether to update the 3D view and/or the progress indicators.
# Original code: let sim use up most of the real time used, measuring redraw timing in order to let that happen.
# see below for more info.
#bruce 060530 generalizing this to ask self.update_cond how to decide.
now = time.time() # real time
simtime = now - self.__last_3dupdate_time # time the sim has been churning away since the last update was completed
pytime = self.__last_pytime
nframes = self.__frame_number - self.__last_3dupdate_frame
update_3dview = self.sim_frame_callback_update_check( simtime, pytime, nframes ) # call this even if later code overrides it
# always show the last frame - wware 060314
if last_frame or debug_all_frames:
update_3dview = True
# now we know whether we want to update the 3d view (and save new values for the __last variables used above).
if update_3dview:
if debug_pyrex_prints:
print "sim hit frame %d in" % self.__frame_number, simtime
#e maybe let frame number be an arg from C to the callback in the future?
self.__last_3dupdate_frame = self.__frame_number
self.__last_3dupdate_time = now_start = now
# this gets set again below, and again [060712] after all time spent in this function when update_3dview is true;
# this set is probably not needed, but it may help with debugging or exceptions sometimes;
# the later intermediate one is the same, except it's more likely that it may help with those things.
# [bruce 060712 revised this comment & related code]
try:
self.sim_frame_callback_worker( self.__frame_number) # might call self.abort_sim_run() or set self.need_process_events
except:
print_compact_traceback("exception in sim_frame_callback_worker, aborting run: ")
self.abort_sim_run("exception in sim_frame_callback_worker(%d)" % self.__frame_number ) # sets flag inside sim object
self.__last_3dupdate_time = time.time() # this will be set yet again (see comment above)
# [following comment might be #obs, but I don't understand the claim of an effect on abortability -- bruce 060712]
# use this difference to adjust 0.05 above, for the upcoming period of sim work;
# note, in current code this also affects abortability
# pytime code moved from here to end of method, bruce 060712, to fix bad logic bug introduced 060601,
# which caused A8 watch realtime "as fast as possible" to be far slower than in A7, due to rendering time
# being counted as simtime (which was because rendering was moved out of sim_frame_callback_worker on 060601)
# update 'now' for use in progress_update decision
now = self.__last_3dupdate_time
pass
if now >= self.__last_progress_update_time + 1.0 or update_3dview and now >= self.__last_progress_update_time + 0.2:
# update progressbar [wware 060310, bug 1343]
# [optim by bruce 060530 -- at most once per second when not updating 3d view, or 5x/sec when updating it often]
self.need_process_events = True
self.__last_progress_update_time = now
msg = None
# wware 060309, bug 1343, 060628, bug 1898
tp = self.tracefileProcessor
if tp:
pt = tp.progress_text()
if pt:
msg = self.cmdname + ": " + pt
if msg is not None:
env.history.statusbar_msg(msg)
if self.mflag:
# Minimization, give "Elapsed Time" message
msg = "Elapsed time: " + hhmmss_str(int(time.time() - self.startTime))
else:
# Dynamics, give simulation frame number, total frames, and time, wware 060419
msg = (("Frame %d/%d, T=" % (self.__frame_number, self.totalFramesRequested)) +
hhmmss_str(int(time.time() - self.startTime)))
env.history.progress_msg(msg)
if self.mflag:
self.win.statusBar().progressBar.setValue(0)
else:
self.win.statusBar().progressBar.setValue(self.__frame_number)
pass
# do the Qt redrawing for either the GLPane or the status bar (or anything else that might need it),
# only if something done above set a flag requesting it
self.sim_frame_callback_updates() # checks/resets self.need_process_events, might call call_qApp_processEvents
#bruce 060601 bug 1970
if update_3dview:
#bruce 060712 fix logic bug introduced on 060601 [for Mac/Linux A8, though the bug surely affects Windows A8 too] --
# measure pytime only now, so it includes GLPane redraw time as it needs to.
# (This also means it includes sbar updates and redraw, but only when update_3dview occurred;
# that makes sense, since what it controls is the frequency of the redraws of all kinds that happen then,
# but not the frequency of the progress_update sbar redraws that sometimes happen not then (at most one per second).)
self.__last_3dupdate_time = time.time() # this is the last time we set this, in this method run
pytime = self.__last_3dupdate_time - now_start
self.__last_pytime = pytime
if debug_pyrex_prints:
print "python stuff when update_3dview took", pytime
# old results of that, before we did nearly so much sbar updating:
# python stuff took 0.00386619567871 -- for when no real work done, just overhead; small real egs more like 0.03
if debug_timing_loop_on_sbar:
# debug: show timing loop properties on status bar
msg = "sim took %0.3f, hit frame %03d, py took %0.3f" % \
(simtime, self.__frame_number, pytime)
env.history.statusbar_msg(msg)
pass
pass
except SimulatorInterrupted, e:
# With the precautions on the sim side, in sim.pyx and simhelp.c, the only time we'll
# ever get a SimulatorInterrupted exception is as the result of an actual interruption
# of the simulator, not as a result of any exception thrown by a Python callback or by
# any anomalous occurrence in the simulator C code. We don't want a traceback printed
# for a simulator interruption so in this event, just ignore the exception.
# wware, bug 2022, 060714
pass
except:
#bruce 060530 -- ideally we'd propogate the exception up to our caller the sim,
# and it would propogate it back to the python calling code in this object,
# so there would be no need to print it here. But that seems to be broken now,
# whether in the sim or in the calling Python I don't know, so I'll print it here too.
# But then I'll reraise it for when that gets fixed, and since even now it does succeed
# in aborting the sim.
print_compact_traceback("exception in sim_frame_callback (will be propogated to sim): ")
raise
return # from sim_frame_callback
aborting = False #bruce 060601
need_process_events = False #bruce 060601
def sim_frame_callback_worker(self, frame_number): #bruce 060102
"""
Do whatever should be done on frame_callbacks that don't return immediately
(due to not enough time passing), EXCEPT for Qt-related progress updates other than gl_update --
caller must do those separately in sim_frame_callback_updates, if this method sets self.need_process_events.
Might raise exceptions -- caller should protect itself from them until the sim does.
+ stuff new frame data into atom positions
+? fix singlet positions, if not too slow
+ gl_update
"""
if not self.aborting: #bruce 060601 replaced 'if 1'
if self.abortHandler and self.abortHandler.getPressCount() > 0:
# extra space to distinguish which line got it -- this one is probably rarer, mainly gets it if nested task aborted(??)
self.abort_sim_run("got real abort at frame %d" % frame_number) # this sets self.aborting flag
## # mflag == 1 => minimize, user preference determines whether we watch it in real time
## # mflag == 0 => dynamics, watch_motion (from movie setup dialog) determines real time
## elif ((not self.mflag and self._movie.watch_motion) or
## (self.mflag and env.prefs[Adjust_watchRealtimeMinimization_prefs_key])):
elif self._movie.watch_motion:
from sim import theSimulator
frame = theSimulator().getFrame()
# stick the atom posns in, and adjust the singlet posns
newPositions = frame
movie = self._movie
#bruce 060102 note: following code is approximately duplicated somewhere else in this file.
try:
movie.moveAtoms(newPositions)
except ValueError: #bruce 060108
# wrong number of atoms in newPositions (only catches a subset of possible model-editing-induced errors)
self.abort_sim_run("can't apply frame %d, model has changed" % frame_number)
else:
if 1: #bruce 060108 part of fixing bug 1273
movie.realtime_played_framenumber = frame_number
movie.currentFrame = frame_number
self.part.changed() #[bruce 060108 comment: moveAtoms should do this ###@@@]
self.part.gl_update()
# end of approx dup code
self.need_process_events = True #bruce 060601
return
def sim_frame_callback_updates(self): #bruce 060601 split out of sim_frame_callback_worker so it can be called separately
"""
Do Qt-related updates which are needed after something has updated progress bar displays or done gl_update
or printed history messages, if anything has set self.need_process_events to indicate it needs this
(and reset that flag):
- tell Qt to process events
- see if user aborted, if so, set flag in simulator object so it will abort too
(but for now, separate code will also terminate the sim run in the usual way,
reading redundantly from xyz file)
"""
if self.need_process_events:
# tell Qt to process events (for progress bar, its abort button, user moving the dialog or window, changing display mode,
# and for gl_update)
self.need_process_events = False
env.call_qApp_processEvents()
self.need_process_events = False # might not be needed; precaution in case of recursion
#e see if user aborted
if self.abortHandler and self.abortHandler.getPressCount() > 0:
self.abort_sim_run("frame %d" % self.__frame_number) # this also sets self.aborting [bruce 06061 revised text]
return
def tracefile_callback(self, line): #bruce 060109, revised 060112; needs to be fast; should optim by passing step method to .go
tp = self.tracefileProcessor
if tp:
tp.step(line)
def abort_sim_run(self, why = "(reason not specified by internal code)" ): #bruce 060102
"#doc"
wasaborting = self.aborting
self.aborting = True #bruce 060601
self.need_process_events = True #bruce 060601 precaution; might conceivably improve bugs in which abort confirm dialog is not taken down
self._simopts.Interrupted = True
if not self.errcode:
self.errcode = -1
####@@@@ temporary kluge in case of bugs in RuntimeError from that or its handler;
# also needed until we clean up our code to use the new sim.SimulatorInterrupt instead of RuntimeError [bruce 060111]
if not wasaborting: #bruce 060601 precaution
env.history.message( redmsg( "aborting sim run: %s" % why ))
return
tracefileProcessor = None
def print_sim_warnings(self): #bruce 050407; revised 060109, used whether or not we're not printing warnings continuously
"""
Print warnings and errors from tracefile (if this was not already done);
then print summary/finishing info related to tracefile.
Note: this might change self.said_we_are_done to False or True, or leave it alone.
"""
# Note: this method is sometimes called after errors, and that is usually a bug but might sometimes be good;
# caller needs cleanup about this.
# Meanwhile, possible bug -- not sure revisions of 060109 (or prior state) is fully safe when called after errors.
if not self.tracefileProcessor:
# we weren't printing tracefile warnings continuously -- print them now
try:
simopts = self._simopts
except:
# I don't know if this can happen, no time to find out, not safe for A8 to assume it can't [bruce 060705]
print "no _simopts"
simopts = None
self.tracefileProcessor = TracefileProcessor(self, simopts = simopts)
# this might change self.said_we_are_done and/or use self.traceFileName, now and/or later
try:
tfile = self.traceFileName
except AttributeError:
return # sim never ran (not always an error, I suspect)
if not tfile:
return # no trace file was generated using a name we provide
# (maybe the sim wrote one using a name it made up... nevermind that here)
try:
ff = open(tfile, "rU") # "U" probably not needed, but harmless
except:
#bruce 051230 fix probably-unreported bug when sim program is missing
# (tho ideally we'd never get into this method in that case)
print_compact_traceback("exception opening trace file %r: " % tfile)
env.history.message( redmsg( "Error: simulator trace file not found at [%s]." % tfile ))
self.tracefileProcessor.mentioned_sim_trace_file = True #k not sure if this is needed or has any effect
return
lines = ff.readlines()
## remove this in case those non-comment lines matter for the summary (unlikely, so add it back if too slow) [bruce 060112]
## lines = filter( lambda line: line.startswith("#"), lines )
## # not just an optimization, since TracefileProcessor tracks non-# lines for status info
ff.close()
for line in lines:
self.tracefileProcessor.step(line)
# print summary/done
self.tracefileProcessor.finish()
return
def writeTrajectoryAtomIdMapFile(self, filename, used_atoms, all_atoms):
"""
Write a file that maps the ids of the atoms actually used for simulation
(used_atoms) to the atom ids of the same atoms within the complete
structure (a Part) as it was stored in an MMP file (all_atoms).
@param filename: pathname of file to create and write to.
@param used_atoms: dict of atoms used (atom.key -> atom id used for sim)
@param all_atoms: dict of all atoms in one Part (atom.key -> atom id
used when writing the file for that Part)
"""
#brian & bruce 080325
print "writeTrajectoryAtomIdMapFile", filename, \
len(used_atoms), len(all_atoms) # remove when works @@@@@
try:
fileHandle = open(filename, 'w')
header1 = "# Format: simulation_atom_id mmp_file_atom_id\n"
# format uses -1 for missing atom errors (should never happen)
fileHandle.write(header1)
header2 = "# (%d atoms used, %d atoms in all)\n" % \
( len(used_atoms), len(all_atoms) )
fileHandle.write(header2)
# compute the data
data = {}
for key, used_atom_id in used_atoms.iteritems():
all_atoms_id = all_atoms.get(key, -1)
if all_atoms_id == -1:
print "error: atom %r is in used_atoms (id %r) " \
"but not all_atoms" % (key, used_atom_id)
# todo: if this ever happens, also print
# a red summary message to history
data[used_atom_id] = all_atoms_id
continue
items = data.items()
items.sort()
# write the data
for used_atom_id, all_atoms_id in items:
fileHandle.write("%s %s\n" % (used_atom_id, all_atoms_id))
fileHandle.write("# end\n")
fileHandle.close()
except:
msg = self.cmdname + ": Failed to write [%s] " \
"(the simulation atom id to mmp file atom id map file)." % \
filename
env.history.message(redmsg(msg))
return
pass # end of class SimRunner
# ==
_print_sim_comments_to_history = False
"""
Date: 12 Jan 2006
From: ericm
To: bruce
Subject: Minimize trace file format
Here's the code that writes the trace file during minimize:
write_traceline("%4d %20f %20f %s %s\n", frameNumber, rms, max_force, callLocation, message);
You can count on the first three not changing.
Note that with some debugging flags on you get extra lines of this
same form that have other info in the same places. I think you can
just use the rms value for progress and it will do strange things if
you have that debugging flag on. If you want to ignore those lines,
you can only use lines that have callLocation=="gradient", and that
should work well.
-eric
"""
class TracefileProcessor: #bruce 060109 split this out of SimRunner to support continuous tracefile line processing
"""
Helper object to filter tracefile lines and print history messages as they come and at the end
"""
findRmsForce = re.compile("rms ([0-9.]+) pN")
findHighForce = re.compile("high ([0-9.]+) pN")
formattedCommentRegex = re.compile(r'^(# [^:]+:)(.*)')
def __init__(self, owner, minimize = False, simopts = None):
"""
store owner in self, so we can later set owner.said_we_are_done = True; also start
"""
self.owner = owner
# a SimRunner object; has attrs like part, _movie (with alist), used_atoms, ...
# the ones we set or use here are said_we_are_done, traceFileName, _movie, part
self.simopts = simopts #bruce 060705 for A8
self.minimize = minimize # whether to check for line syntax specific to Minimize
self.__last_plain_line_words = None # or words returned from string.split(None, 4)
self.start() # too easy for client code to forget to do this
self._pattern_atom_id_cache = {} # note: this cache and its associated methods
# might be moved to another object, like self.owner
return
def start(self):
"""
prepare to loop over lines
"""
self.seen = {} # whether we saw each known error or warning tracefile-keyword
self.donecount = 0 # how many Done keywords we saw in there
self.mentioned_sim_trace_file = False # public, can be set by client code
self.currentPatternName = ""
self.PAM5_handles = []
def step(self, line): #k should this also be called by __call__ ? no, that would slow down its use as a callback.
"""
do whatever should be done immediately with this line, and save things to do later;
this bound method might be used directly as a trace_callback [but isn't, for clarity, as of 060109]
"""
if not line.startswith("#"):
# this happens a lot, needs to be as fast as possible
if self.minimize:
# check for "gradient" seems required based on current syntax (and will usually be true)
# (as documented in email from ericm today) (if too slow, deferring until line used is tolerable,
# but might result in some missed lines, at least if sim internal debug flags are used) [bruce 060112]
words = line.split(None, 4) # split in at most 4 places
if len(words) >= 4 and words[3] == 'gradient': # 4th word -- see also self.progress_text()
self.__last_plain_line_words = words
elif debug_flags.atom_debug:
print "atom_debug: weird tracef line:", line ####@@@@ remove this? it happens normally at the end of many runs
return
if _print_sim_comments_to_history: #e add checkbox or debug-pref for this??
env.history.message("tracefile: " + line)
# don't discard initial "#" or "# "
m = self.formattedCommentRegex.match(line)
if (m):
start = m.group(1)
rest = m.group(2)
if (start == "# Warning:" or start == "# Error:"):
self.gotWarningOrError(start, line)
elif start == "# Done:":
self.gotDone(start, rest)
elif start.startswith("# Pattern "):
self.gotPattern(start, rest)
## else:
## print "other formatted trace line: " + line.rstrip()
return
def gotWarningOrError(self, start, line):
self.owner.said_we_are_done = False # not needed if lines come in their usual order
if not self.seen:
env.history.message( "Messages from simulator trace file:") #e am I right to not say this just for Done:?
self.mentioned_sim_trace_file = True
if start == "# Warning:":
cline = orangemsg(line)
else:
cline = redmsg(line)
env.history.message( cline) # leave in the '#' I think
self.seen[start] = True
def gotDone(self, start, rest):
# "Done:" line - emitted iff it has a message on it; doesn't trigger mention of tracefile name
# if we see high forces, color the Done message orange, bug 1238, wware 060323
if 1:
#bruce 060705
simopts = self.simopts
try:
endRMS = simopts.MinimizeThresholdEndRMS
except AttributeError:
print "simopts %r had no MinimizeThresholdEndRMS"
endRMS = 1.0 # was 2.0
try:
endMax = simopts.MinimizeThresholdEndMax
except AttributeError:
print "simopts %r had no MinimizeThresholdEndMax"
endMax = 5.0 # was 2.0
epsilon = 0.000001 # guess; goal is to avoid orangemsg due to roundoff when printing/reading values
pass
foundRms = self.findRmsForce.search(rest)
if foundRms:
foundRms = float(foundRms.group(1))
foundHigh = self.findHighForce.search(rest)
if foundHigh:
foundHigh = float(foundHigh.group(1))
highForces = ((foundRms != None and foundRms > endRMS + epsilon) or
(foundHigh != None and foundHigh > endMax + epsilon))
self.donecount += 1
text = rest.strip()
if text:
line = start + " " + text
if "# Error:" in self.seen:
line = redmsg(line)
elif highForces or ("# Warning:" in self.seen):
line = orangemsg(line)
env.history.message(line) #k is this the right way to choose the color?
# but don't do this, we want the main Done too: [bruce 050415]:
## self.owner.said_we_are_done = True
return
def gotPattern(self, start, rest):
"""
"""
if (start == "# Pattern match:"):
self.gotPatternMatch(rest)
elif (start == "# Pattern makeVirtualAtom:"):
self.gotPatternMakeVirtualAtom(rest)
elif (start == "# Pattern makeBond:"):
self.gotPatternMakeBond(rest)
elif (start == "# Pattern setStretchType:"):
self.gotPatternSetStretchType(rest)
elif (start == "# Pattern makeVanDerWaals:"):
self.gotPatternMakeVanDerWaals(rest)
elif (start == "# Pattern setType:"):
self.gotPatternSetType(rest)
else:
print "gotPattern(): unknown type: ", start, rest
# if debug_pref is set, create graphical indicators for it
# (possibly using info created by the always-on processing of the line)
if pref_create_pattern_indicators():
self.createPatternIndicator( start, rest)
return
# Pattern match: [31] (PAM5-basepair-handle) 2 6 22 13
# [match number]
# (pattern name)
# atoms matched...
def gotPatternMatch(self, rest):
line = rest.rstrip().split()
# pattern match number = line[0]
self.currentPatternName = line[1]
# actual atoms matched follow
# Pattern makeVirtualAtom: [5] {41} 3 1 5 20 11 x 0.814144 0.147775 0.000000
# [match number]
# {new atom id}
# number of parent atoms
# GROMACS function number
# parent1, parent2, parent3, parent4
# parameterA, parameterB, parameterC
def gotPatternMakeVirtualAtom(self, rest):
pass
# Pattern makeBond: [5] {47} {48} 1.046850 834.100000
# [match number]
# atom1, atom2 ({} indicates atom created by ND1)
# ks, r0
def gotPatternMakeBond(self, rest):
# note: similar code is present in createPatternIndicator
line = rest.rstrip().split()
if (self.currentPatternName == "(PAM5-basepair-handle)"):
atom1 = self._atomID(line[1])
atom2 = self._atomID(line[2])
ks = float(line[3])
r0 = float(line[4])
self.PAM5_handles += [[atom1, atom2, ks, r0]]
# Pattern setStretchType: [9] 12 11 1.000000 509.000000
# [match number]
# atom1, atom2 ({} indicates atom created by ND1)
# ks, r0
def gotPatternSetStretchType(self, rest):
pass
def gotPatternMakeVanDerWaals(self, rest):
pass
def gotPatternSetType(self, rest):
line = rest.rstrip().split()
atom = self.interpret_pattern_atom_id(line[1])
atom.setOverlayText(line[2])
def newAtomPositions(self, positions):
for handle in self.PAM5_handles:
atom1 = handle[0]
atom2 = handle[1]
ks = handle[2] # N/m
r0 = handle[3] # pm
pos1 = positions[atom1-1] # angstroms
pos2 = positions[atom2-1] # angstroms
delta = 100.0 * vlen(A(pos1) - A(pos2)) # pm
force = abs((delta - r0) * ks) # pN
env.history.message("Force on handle %d: %f pN" % (atom2, force))
def _atomID(self, idString):
if (idString.startswith("{")):
s = idString[1:-1]
return int(s)
return int(idString)
def progress_text(self): ####@@@@ call this instead of printing that time stuff
"""
Return some brief text suitable for periodically displaying on statusbar to show progress
"""
words = self.__last_plain_line_words
if not words:
return ""
if len(words) == 4: #k needed?
words = list(words) + [""]
try:
frameNumber, rms, max_force, callLocation, message = words
assert callLocation == 'gradient'
except:
return "?"
return "frame %s: rms force = %s; high force = %s" % (frameNumber, rms, max_force)
# 'high' instead of 'max' is to match Done line syntax (by experiment as of 060112)
def finish(self):
if not self.donecount:
self.owner.said_we_are_done = False # not needed unless other code has bugs
# Note [bruce 050415]: this happens when user presses Abort,
# since we don't abort the sim process gently enough. This should be fixed.
#bruce 051230 changed following from redmsg to orangemsg
env.history.message( orangemsg( "Warning: simulator trace file should normally end with \"# Done:\", but it doesn't."))
self.mentioned_sim_trace_file = True
if self.mentioned_sim_trace_file:
# sim trace file was mentioned; user might wonder where it is...
# but [bruce 050415] only say this if the location has changed since last time we said it,
# and only include the general advice once per session.
global last_sim_tracefile
tfile = self.owner.traceFileName #bruce 060110 try to fix bug 1299
if last_sim_tracefile != tfile:
preach = (last_sim_tracefile is None)
last_sim_tracefile = tfile
msg = "(The simulator trace file was [%s]." % tfile
if preach:
msg += " It might be overwritten the next time you run a similar command."
msg += ")"
env.history.message( msg)
return
def createPatternIndicator( self, start, rest): #bruce 080520
"""
"""
### TODO: add exception protection to caller.
# start looks like "# Pattern <patterntype>:"
patterntype = start[:-1].strip().split()[2]
assy = self.owner.part.assy
if patterntype == "makeVirtualAtom":
# for format details see:
#
# http://www.nanoengineer-1.net/mediawiki/index.php?title=Tracefile_pattern_lines
#
# which says:
#
# rest looks like
#
# [4] {22} 3 1 1 2 3 x -0.284437 0.710930 0.000000
#
# i.e.
#
# [match sequence] {atom ID} num_parents
# function_id parentID1 parentID2 parentID3 parentID4 A B C
#
# In this case, there are only three parents, so parentID4 is "x"
# instead of a number. Function_id 1 with 3 parents only uses two
# parameters (A and B), so C is zero.
#
# For a three parent virtual site with function_id 1, here is how you
# find the location of the site:
#
# Multiply the vector (parentID2 - parentID1) * A
# Multiply the vector (parentID3 - parentID1) * B
# Add the above two vectors to parentID1
#
# This is the only style of virtual site currently in use. See the
# GROMACS user manual for the definition of other types of virtual sites.
words = rest.strip().split()
( matchseq, site_atom_id, num_parents, function_id,
parentID1, parentID2, parentID3, parentID4,
A, B, C, ) = words
if 'killing old site_atoms before this point is nim': ####
site_atom = self.interpret_pattern_atom_id( site_atom_id, ok_to_not_exist = True)
if site_atom is not None:
site_atom.kill()
# review: will this automatically clear out the dict entry which maps site_atom_id to site_atom??
pass
num_parents = int(num_parents)
function_id = int(function_id)
parent_atoms = map( self.interpret_pattern_atom_id, \
[parentID1, parentID2, parentID3, parentID4][:num_parents] )
A, B, C = map(float, [A, B, C])
if (num_parents, function_id) == (3, 1):
# the only style of virtual site currently in use (as of 20080501)
from model.virtual_site_indicators import add_virtual_site
site_params = ( function_id, A, B)
mt_name = "%s %s %0.2f %0.2f" % (matchseq, site_atom_id, A, B)
site_atom = add_virtual_site(assy, parent_atoms, site_params,
MT_name = mt_name
)
self.define_new_pattern_atom_id(site_atom_id, site_atom)
assy.w.win_update() ### IMPORTANT OPTIM: do this only once, later (not in this method)
## self.needs_win_update = True -- how often to check this and do a real update??
else:
print "unrecognized kind of virtual site:", start + " " + rest.strip()
pass
elif patterntype == "makeBond":
# note: similar code is present in gotPatternMakeBond
#
# Pattern makeBond: [5] {47} {48} 1.046850 834.100000
# [match number]
# atom1, atom2 ({} indicates atom created by ND1)
# ks, r0
words = rest.strip().split()
( matchseq, atom_id1, atom_id2, ks_s, r0_s, ) = words
atom1 = self.interpret_pattern_atom_id(atom_id1)
atom2 = self.interpret_pattern_atom_id(atom_id2)
ks = float(ks_s) # N/m
r0 = float(r0_s) # pm
## print "strut: r0: %f ks: %f" % (r0, ks), atom1, "-", atom2
# create a strut between atom1 and atom2, length r0, stiffness ks.
atoms = [atom1, atom2]
bond_params = ( ks, r0 )
mt_name = "%s %s-%s" % (matchseq, atom1, atom2) # ks and r0 too?
# future: not always used, only used if atoms are not virtual sites
from model.virtual_site_indicators import add_virtual_bond
add_virtual_bond( assy, atoms, bond_params, MT_name = mt_name)
pass
return # from createPatternIndicator
def interpret_pattern_atom_id(self, id_string, ok_to_not_exist = False):
"""
Interpret a pattern atom id string of the form "23" or "{23}"
as a real atom (using self.owner._movie.alist to map atom id number
(as index in that list) to atom). Cache interpretations in self,
for efficiency and so new atoms can be added without modifying alist.
@return: the atom, or None if it doesn't exist and ok_to_not_exist.
"""
# note: this method & its cache may need to be moved to another object.
try:
return self._pattern_atom_id_cache[ id_string ]
except KeyError:
atom_id_num = self._atomID( id_string)
atom_id_index = atom_id_num - 1
alist = self.owner._movie.alist
if not (0 <= atom_id_index < len(alist)):
# atom does not exist in alist
if ok_to_not_exist:
return None
else:
assert 0, "atom_id_num %d not found, only %d atoms" % \
( atom_id_num, len(alist))
return None
pass
res = alist[atom_id_index]
self._pattern_atom_id_cache[ id_string ] = res
return res
pass
def define_new_pattern_atom_id( self, id_string, atom):
if self._pattern_atom_id_cache.has_key( id_string ):
old_atom = self._pattern_atom_id_cache[ id_string ]
print "killing old_atom", old_atom # should not happen by the time we're done, maybe never
old_atom.kill() ###k
self._pattern_atom_id_cache[ id_string ] = atom
print "defined", id_string, atom #####
return
pass # end of class TracefileProcessor
# this global needs to preserve its value when we reload!
try:
last_sim_tracefile
except:
last_sim_tracefile = None
else:
pass
def part_contains_pam_atoms(part, kill_leftover_sim_feedback_atoms = False):
"""
Returns non-zero if the given part contains any pam atoms.
Returns less than zero if the part contains a mixture of pam and
other atoms, or more than one type of pam atom. Singlets
and "sim feedback atoms" (role == 'virtual-site') don't count.
@param kill_leftover_sim_feedback_atoms: if true, do that as a side effect.
"""
# probably written by EricM
#bruce 080520 added kill_leftover_sim_feedback_atoms option,
# and made this function non-private
from utilities.constants import MODEL_PAM5, MODEL_PAM3
# PAM3 PAM5 other
contents = [ False, False, False ]
kill_these = []
def check_for_pam(n):
if (isinstance(n, Chunk)):
for a in n.atoms.itervalues():
elt = a.element
if (elt is Singlet):
continue
if elt.role == 'virtual-site':
if kill_leftover_sim_feedback_atoms:
kill_these.append(a)
elif (elt.pam == MODEL_PAM3):
contents[0] = True
elif (elt.pam == MODEL_PAM5):
contents[1] = True
else:
contents[2] = True
part.topnode.apply2all(check_for_pam)
# do this last, since it can't be done during apply2all:
for atom in kill_these:
atom.kill()
if (contents[0]): # has PAM3
if (contents[1]): # has PAM5
return -2 # mixture of PAM3 and PAM5
if (contents[2]): # has other
return -1 # mixture of PAM3 and other
return 1 # just PAM3
if (contents[1]): # has PAM5
if (contents[2]): # has other
return -1 # mixture of PAM5 and other
return 1 # just PAM5
return 0 # just other (or empty)
# ==
# writemovie used to be here, but is now split into methods
# of class SimRunner above [bruce 050401]
# ... but here's a compatibility stub... i guess
#obs comment:
# Run the simulator and tell it to create a dpb or xyz trajectory file.
# [bruce 050324 moved this here from fileIO.py. It should be renamed to run_simulator,
# since it does not always try to write a movie, but always tries to run the simulator.
# In fact (and in spite of not always making a movie file),
# maybe it should be a method of the Movie object,
# which is used before the movie file is made to hold the params for making it.
# (I'm not sure how much it's used when we'll make an .xyz file for Minimize.)
# If it's not made a Movie method, then at least it should be revised
# to accept the movie to use as an argument; and, perhaps, mainly called by a Movie method.
# For now, I renamed assy.m -> assy.current_movie, and never grab it here at all
# but let it be passed in instead.] ###@@@
def writemovie(part,
movie,
mflag = 0,
simaspect = None,
print_sim_warnings = False,
cmdname = "Simulator",
cmd_type = 'Minimize',
useGromacs = False,
background = False,
useAMBER = False,
typeFeedback = False):
#bruce 060106 added cmdname
"""
Write an input file for the simulator, then run the simulator,
in order to create a moviefile (.dpb file), or an .xyz file containing all
frames(??), or an .xyz file containing what would have
been the moviefile's final frame. The name of the file it creates is found in
movie.filename (it's made up here for mflag != 0, but must be inserted by caller
for mflag == 0 ###k). The movie is created for the atoms in the movie's alist,
or the movie will make a new alist from part if it doesn't have one yet
(for Minimize Selection, it will probably already have one when this is called ###@@@).
(This should be thought of as a Movie method even though it isn't one yet.)
DPB = Differential Position Bytes (binary file)
XYZ = XYZ trajectory file (text file)
mflag: [note: mflag is called mtype in some of our callers!]
0 = default, runs a full simulation using parameters stored in the movie object.
1 = run the simulator with -m and -x flags, creating a single-frame XYZ file.
2 = run the simulator with -m flags, creating a multi-frame DPB moviefile.
Return value: false on success, true (actually an error code but no caller uses that)
on failure (error message already emitted).
Either way (success or not), also copy errors and warnings from tracefile to history,
if print_sim_warnings = True. Someday this should happen in real time;
for now [as of 050407] it happens once when we're done.
"""
#bruce 050325 Q: why are mflags 0 and 2 different, and how? this needs cleanup.
hasPAM = part_contains_pam_atoms(part)
# POSSIBLE BUG: this check is done on entire Part
# even if we're only minimizing a subset of it.
if (hasPAM < 0):
if (hasPAM < -1):
msg = "calculations with mixed PAM3 and PAM5 atoms are not supported"
else:
msg = "calculations with mixed PAM and other atoms are not supported"
env.history.message(orangemsg(msg))
# note: no return statement (intentional?)
hasPAM = not not hasPAM
simrun = SimRunner(part,
mflag,
simaspect = simaspect,
cmdname = cmdname,
cmd_type = cmd_type,
useGromacs = useGromacs,
background = background,
hasPAM = hasPAM,
useAMBER = useAMBER,
typeFeedback = typeFeedback)
#e in future mflag should choose subclass (or caller should)
movie._simrun = simrun #bruce 050415 kluge... see also the related movie._cmdname kluge
movie.currentFrame = 0 #bruce 060108 moved this here, was in some caller's success cases
movie.realtime_played_framenumber = 0 #bruce 060108
movie.minimize_flag = not not mflag # whether we're doing some form of Minimize [bruce 060112]
# wware 060420 - disable atom/bond highlighting while simulating, improves simulator performance
part.assy.o.is_animating = True
simrun.run_using_old_movie_obj_to_hold_sim_params(movie)
part.assy.o.is_animating = False
if 1:
#bruce 060108 part of fixing bug 1273
fn = movie.realtime_played_framenumber
if fn:
if not movie.minimize_flag: #bruce 060112
#e a more accurate condition would be something like "if we made a movie file and bragged about it"
msg = "(current atom positions correspond to movie frame %d)" % fn
env.history.message(greenmsg(msg))
assert movie.currentFrame == fn
if print_sim_warnings and simrun.errcode != FAILURE_ALREADY_DOCUMENTED:
# If there was a clear error then don't print a lot of lower-priority less urgent stuff
# after the bright red error message.
try:
simrun.print_sim_warnings()
#bruce 051230 comment: this runs even if sim executable was not found; why?? ####@@@@
# guess: need to check error code from run_using_old_movie_obj_to_hold_sim_params;
# that's done by checking simrun.errcode, but I wonder if for some values (like user aborted sim)
# we should still print the warnings? So I'll refrain from not trying to print them on errcode, for now.
# Instead I made it (print_sim_warnings) handle the error of not finding the trace file,
# instead of raising an exception then.
except:
print_compact_traceback("bug in print_sim_warnings, ignored: ")
return simrun.errcode
# ==
#bruce 050324 moved readxyz here from fileIO, added filename and alist args,
# removed assy arg (though soon we'll need it or a history arg),
# standardized indentation, revised docstring [again, 050404] and some comments.
#bruce 050404 reworded messages & revised their printed info,
# and changed error return to return the error message string
# (so caller can print it to history if desired).
# The original in fileIO was by Huaicai shortly after 050120.
#bruce 050406 further revisions (as commented).
def readxyz(filename, alist):
"""
Read a single-frame XYZ file created by the simulator, typically for
minimizing a part. Check file format, check element types against those
in alist (the number of atoms and order of their elements must agree).
[As of 050406, also permit H in the file to match a singlet in alist.]
This test will probably fail unless the xyz file was created
using the same atoms (in the same order) as in alist. If the atom set
is the same (and the same session, or the same chunk in an mmp file,
is involved), then the fact that we sort atoms by key when creating
alists for writing sim-input mmp files might make this order likely to match.
On error, print a message to stdout and also return it to the caller.
On success, return a list of atom new positions
in the same order as in the xyz file (hopefully the same order as in alist).
"""
from model.elements import Singlet
xyzFile = filename ## was assy.m.filename
lines = open(xyzFile, "rU").readlines()
if len(lines) < 3: ##Invalid file format
msg = "readxyz: %s: File format error (fewer than 3 lines)." % xyzFile
print msg
return msg
atomList = alist ## was assy.alist, with assy passed as an arg
# bruce comment 050324: this list or its atoms are not modified in this function
## stores the new position for each atom in atomList
newAtomsPos = []
try:
numAtoms_junk = int(lines[0])
rms_junk = float(lines[1][4:])
except ValueError:
msg = "readxyz: %s: File format error in Line 1 and/or Line 2" % xyzFile
print msg
return msg
atomIndex = 0
for line in lines[2:]:
words = line.split()
if len(words) != 4:
msg = "readxyz: %s: Line %d format error." % (xyzFile, lines.index(line) + 1)
#bruce 050404 fixed order of printfields, added 1 to index
print msg
return msg
try:
if words[0] != atomList[atomIndex].element.symbol:
if words[0] == 'H' and atomList[atomIndex].element == Singlet:
#bruce 050406 permit this, to help fix bug 254 by writing H to sim for Singlets in memory
pass
else:
msg = "readxyz: %s: atom %d (%s) has wrong element type." % (xyzFile, atomIndex+1, atomList[atomIndex])
#bruce 050404: atomIndex is not very useful, so I added 1
# (to make it agree with likely number in mmp file)
# and the atom name from the model.
###@@@ need to fix this for H vs singlet (then do we revise posn here or in caller?? probably in caller)
print msg
return msg
newAtomsPos += [map(float, words[1:])]
except ValueError:
msg = "readxyz: %s: atom %d (%s) position number format error." % (xyzFile, atomIndex+1, atomList[atomIndex])
#bruce 050404: same revisions as above.
print msg
return msg
except:
#bruce 060108 added this case (untested) since it looks necessary to catch atomList[atomIndex] attributeerrors
msg = "readxyz: %s: error (perhaps fewer atoms in model than in xyz file)" % (xyzFile,)
print msg
return msg
atomIndex += 1
if (len(newAtomsPos) != len(atomList)): #bruce 050225 added some parameters to this error message
msg = "readxyz: The number of atoms from %s (%d) is not matching with the current model (%d)." % \
(xyzFile, len(newAtomsPos), len(atomList))
print msg
return msg #bruce 050404 added error return after the above print statement; not sure if its lack was new or old bug
return newAtomsPos
def readGromacsCoordinates(filename, atomList, tracefileProcessor = None):
"""
Read a coordinate file created by gromacs, typically for
minimizing a part.
On error, print a message to stdout and also return it to the caller.
On success, return a list of atom new positions
in the same order as in the xyz file (hopefully the same order as in alist).
"""
translateFileName = None
if (filename.endswith("-out.gro")):
translateFileName = filename[:-8] + ".translate"
elif (filename.endswith(".gro")):
translateFileName = filename[:-4] + ".translate"
try:
translateFile = open(translateFileName, "rU")
dX = float(translateFile.readline()) * 10.0
dY = float(translateFile.readline()) * 10.0
dZ = float(translateFile.readline()) * 10.0
translateFile.close()
except IOError:
# Ok for file not to exist, assume no translation
dX = 0.0
dY = 0.0
dZ = 0.0
try:
lines = open(filename, "rU").readlines()
except IOError:
msg = "readGromacsCoordinates: %s: Can't open or read file." % filename
print msg
return msg
except:
msg = "readGromacsCoordinates: %s: Exception opening or reading file" % filename
print_compact_traceback(msg + ": ")
return msg + " (see console prints)."
if len(lines) < 3: ##Invalid file format
msg = "readGromacsCoordinates: %s: File format error (fewer than 3 lines)." % filename
print msg
return msg
newAtomsPos = []
allAtomPositions = []
try:
numAtoms_junk = int(lines[1])
except ValueError:
msg = "readGromacsCoordinates: %s: File format error in Line 2" % filename
print msg
return msg
atomIndex = 0
for line in lines[2:-1]:
# 1 2 3 4
#01234567890123456789012345678901234567890123456789
# 1xxx A1 1 9.683 9.875 0.051
xstr = line[20:28]
ystr = line[28:36]
zstr = line[36:44]
extraString = line[44:]
if (extraString.strip() != ""):
return "GROMACS minimize returned malformed results (output overflow?)"
if (xstr == " nan" or ystr == " nan" or zstr == " nan"):
return "GROMACS minimize returned undefined results"
try:
x = float(xstr) * 10.0 + dX
y = float(ystr) * 10.0 + dY
z = float(zstr) * 10.0 + dZ
except ValueError, e:
return "Error parsing GROMACS minimize results: [%s][%s][%s]" % (xstr, ystr, zstr)
atomIndex += 1
if (atomIndex <= len(atomList)):
# coordinates of virtual sites are reported at end of
# list, and we want to ignore them.
newAtomsPos += [[x, y, z]]
allAtomPositions += [[x, y, z]]
if (len(newAtomsPos) != len(atomList)):
msg = "readGromacsCoordinates: The number of atoms from %s (%d) is not matching with the current model (%d)." % \
(filename, len(newAtomsPos), len(atomList))
print msg
return msg
if (tracefileProcessor):
tracefileProcessor.newAtomPositions(allAtomPositions)
return newAtomsPos
# end
| NanoCAD-master | cad/src/simulation/runSim.py |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'SimSetupDialog.ui'
#
# Created: Fri Jun 06 12:02:42 2008
# by: PyQt4 UI code generator 4.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_SimSetupDialog(object):
def setupUi(self, SimSetupDialog):
SimSetupDialog.setObjectName("SimSetupDialog")
SimSetupDialog.resize(QtCore.QSize(QtCore.QRect(0,0,250,350).size()).expandedTo(SimSetupDialog.minimumSizeHint()))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Policy(5),QtGui.QSizePolicy.Policy(3))
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(SimSetupDialog.sizePolicy().hasHeightForWidth())
SimSetupDialog.setSizePolicy(sizePolicy)
SimSetupDialog.setMinimumSize(QtCore.QSize(0,350))
SimSetupDialog.setModal(True)
self.gridlayout = QtGui.QGridLayout(SimSetupDialog)
self.gridlayout.setMargin(9)
self.gridlayout.setSpacing(6)
self.gridlayout.setObjectName("gridlayout")
self.hboxlayout = QtGui.QHBoxLayout()
self.hboxlayout.setMargin(0)
self.hboxlayout.setSpacing(6)
self.hboxlayout.setObjectName("hboxlayout")
self.whatsthis_btn = QtGui.QToolButton(SimSetupDialog)
self.whatsthis_btn.setObjectName("whatsthis_btn")
self.hboxlayout.addWidget(self.whatsthis_btn)
spacerItem = QtGui.QSpacerItem(21,25,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout.addItem(spacerItem)
self.cancel_btn = QtGui.QPushButton(SimSetupDialog)
self.cancel_btn.setDefault(False)
self.cancel_btn.setObjectName("cancel_btn")
self.hboxlayout.addWidget(self.cancel_btn)
self.run_sim_btn = QtGui.QPushButton(SimSetupDialog)
self.run_sim_btn.setDefault(True)
self.run_sim_btn.setObjectName("run_sim_btn")
self.hboxlayout.addWidget(self.run_sim_btn)
self.gridlayout.addLayout(self.hboxlayout,4,0,1,1)
spacerItem1 = QtGui.QSpacerItem(20,16,QtGui.QSizePolicy.Minimum,QtGui.QSizePolicy.Expanding)
self.gridlayout.addItem(spacerItem1,3,0,1,1)
self.parms_grpbox = QtGui.QGroupBox(SimSetupDialog)
self.parms_grpbox.setObjectName("parms_grpbox")
self.vboxlayout = QtGui.QVBoxLayout(self.parms_grpbox)
self.vboxlayout.setMargin(4)
self.vboxlayout.setSpacing(4)
self.vboxlayout.setObjectName("vboxlayout")
self.hboxlayout1 = QtGui.QHBoxLayout()
self.hboxlayout1.setMargin(0)
self.hboxlayout1.setSpacing(4)
self.hboxlayout1.setObjectName("hboxlayout1")
self.vboxlayout1 = QtGui.QVBoxLayout()
self.vboxlayout1.setMargin(0)
self.vboxlayout1.setSpacing(4)
self.vboxlayout1.setObjectName("vboxlayout1")
self.textLabel5 = QtGui.QLabel(self.parms_grpbox)
self.textLabel5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.textLabel5.setObjectName("textLabel5")
self.vboxlayout1.addWidget(self.textLabel5)
self.textLabel2 = QtGui.QLabel(self.parms_grpbox)
self.textLabel2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.textLabel2.setObjectName("textLabel2")
self.vboxlayout1.addWidget(self.textLabel2)
self.textLabel3 = QtGui.QLabel(self.parms_grpbox)
self.textLabel3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.textLabel3.setObjectName("textLabel3")
self.vboxlayout1.addWidget(self.textLabel3)
self.hboxlayout1.addLayout(self.vboxlayout1)
self.vboxlayout2 = QtGui.QVBoxLayout()
self.vboxlayout2.setMargin(0)
self.vboxlayout2.setSpacing(4)
self.vboxlayout2.setObjectName("vboxlayout2")
self.totalFramesSpinBox = QtGui.QSpinBox(self.parms_grpbox)
self.totalFramesSpinBox.setMaximum(1000000)
self.totalFramesSpinBox.setMinimum(1)
self.totalFramesSpinBox.setSingleStep(15)
self.totalFramesSpinBox.setProperty("value",QtCore.QVariant(900))
self.totalFramesSpinBox.setObjectName("totalFramesSpinBox")
self.vboxlayout2.addWidget(self.totalFramesSpinBox)
self.stepsPerFrameDoubleSpinBox = QtGui.QDoubleSpinBox(self.parms_grpbox)
self.stepsPerFrameDoubleSpinBox.setDecimals(2)
self.stepsPerFrameDoubleSpinBox.setSingleStep(0.1)
self.stepsPerFrameDoubleSpinBox.setProperty("value",QtCore.QVariant(1.0))
self.stepsPerFrameDoubleSpinBox.setObjectName("stepsPerFrameDoubleSpinBox")
self.vboxlayout2.addWidget(self.stepsPerFrameDoubleSpinBox)
self.temperatureSpinBox = QtGui.QSpinBox(self.parms_grpbox)
self.temperatureSpinBox.setMaximum(99999)
self.temperatureSpinBox.setProperty("value",QtCore.QVariant(300))
self.temperatureSpinBox.setObjectName("temperatureSpinBox")
self.vboxlayout2.addWidget(self.temperatureSpinBox)
self.hboxlayout1.addLayout(self.vboxlayout2)
spacerItem2 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout1.addItem(spacerItem2)
self.vboxlayout.addLayout(self.hboxlayout1)
self.potential_energy_checkbox = QtGui.QCheckBox(self.parms_grpbox)
self.potential_energy_checkbox.setObjectName("potential_energy_checkbox")
self.vboxlayout.addWidget(self.potential_energy_checkbox)
self.gridlayout.addWidget(self.parms_grpbox,0,0,1,1)
self.watch_motion_groupbox = QtGui.QGroupBox(SimSetupDialog)
self.watch_motion_groupbox.setCheckable(True)
self.watch_motion_groupbox.setChecked(True)
self.watch_motion_groupbox.setObjectName("watch_motion_groupbox")
self.gridlayout1 = QtGui.QGridLayout(self.watch_motion_groupbox)
self.gridlayout1.setMargin(4)
self.gridlayout1.setSpacing(2)
self.gridlayout1.setObjectName("gridlayout1")
self.hboxlayout2 = QtGui.QHBoxLayout()
self.hboxlayout2.setMargin(0)
self.hboxlayout2.setSpacing(4)
self.hboxlayout2.setObjectName("hboxlayout2")
self.update_every_rbtn = QtGui.QRadioButton(self.watch_motion_groupbox)
self.update_every_rbtn.setObjectName("update_every_rbtn")
self.hboxlayout2.addWidget(self.update_every_rbtn)
self.update_number_spinbox = QtGui.QSpinBox(self.watch_motion_groupbox)
self.update_number_spinbox.setMaximum(9999)
self.update_number_spinbox.setMinimum(1)
self.update_number_spinbox.setProperty("value",QtCore.QVariant(1))
self.update_number_spinbox.setObjectName("update_number_spinbox")
self.hboxlayout2.addWidget(self.update_number_spinbox)
self.update_units_combobox = QtGui.QComboBox(self.watch_motion_groupbox)
self.update_units_combobox.setObjectName("update_units_combobox")
self.hboxlayout2.addWidget(self.update_units_combobox)
spacerItem3 = QtGui.QSpacerItem(71,16,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout2.addItem(spacerItem3)
self.gridlayout1.addLayout(self.hboxlayout2,1,0,1,1)
self.update_asap_rbtn = QtGui.QRadioButton(self.watch_motion_groupbox)
self.update_asap_rbtn.setChecked(True)
self.update_asap_rbtn.setObjectName("update_asap_rbtn")
self.gridlayout1.addWidget(self.update_asap_rbtn,0,0,1,1)
self.gridlayout.addWidget(self.watch_motion_groupbox,1,0,1,1)
self.md_engine_groupbox = QtGui.QGroupBox(SimSetupDialog)
self.md_engine_groupbox.setObjectName("md_engine_groupbox")
self.vboxlayout3 = QtGui.QVBoxLayout(self.md_engine_groupbox)
self.vboxlayout3.setMargin(4)
self.vboxlayout3.setSpacing(4)
self.vboxlayout3.setObjectName("vboxlayout3")
self.hboxlayout3 = QtGui.QHBoxLayout()
self.hboxlayout3.setMargin(0)
self.hboxlayout3.setSpacing(4)
self.hboxlayout3.setObjectName("hboxlayout3")
self.simulation_engine_combobox = QtGui.QComboBox(self.md_engine_groupbox)
self.simulation_engine_combobox.setObjectName("simulation_engine_combobox")
self.hboxlayout3.addWidget(self.simulation_engine_combobox)
spacerItem4 = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.hboxlayout3.addItem(spacerItem4)
self.vboxlayout3.addLayout(self.hboxlayout3)
self.electrostaticsForDnaDuringDynamics_checkBox = QtGui.QCheckBox(self.md_engine_groupbox)
self.electrostaticsForDnaDuringDynamics_checkBox.setChecked(True)
self.electrostaticsForDnaDuringDynamics_checkBox.setObjectName("electrostaticsForDnaDuringDynamics_checkBox")
self.vboxlayout3.addWidget(self.electrostaticsForDnaDuringDynamics_checkBox)
self.gridlayout.addWidget(self.md_engine_groupbox,2,0,1,1)
self.retranslateUi(SimSetupDialog)
QtCore.QObject.connect(self.cancel_btn,QtCore.SIGNAL("clicked()"),SimSetupDialog.close)
QtCore.QMetaObject.connectSlotsByName(SimSetupDialog)
def retranslateUi(self, SimSetupDialog):
SimSetupDialog.setWindowTitle(QtGui.QApplication.translate("SimSetupDialog", "Run Dynamics", None, QtGui.QApplication.UnicodeUTF8))
SimSetupDialog.setToolTip(QtGui.QApplication.translate("SimSetupDialog", "Run Dynamics Setup Dialog", None, QtGui.QApplication.UnicodeUTF8))
self.cancel_btn.setText(QtGui.QApplication.translate("SimSetupDialog", "Cancel", None, QtGui.QApplication.UnicodeUTF8))
self.run_sim_btn.setText(QtGui.QApplication.translate("SimSetupDialog", "Run Simulation", None, QtGui.QApplication.UnicodeUTF8))
self.parms_grpbox.setTitle(QtGui.QApplication.translate("SimSetupDialog", "Simulation parameters", None, QtGui.QApplication.UnicodeUTF8))
self.textLabel5.setText(QtGui.QApplication.translate("SimSetupDialog", "Total frames:", None, QtGui.QApplication.UnicodeUTF8))
self.textLabel2.setText(QtGui.QApplication.translate("SimSetupDialog", "Steps per frame:", None, QtGui.QApplication.UnicodeUTF8))
self.textLabel3.setText(QtGui.QApplication.translate("SimSetupDialog", "Temperature:", None, QtGui.QApplication.UnicodeUTF8))
self.totalFramesSpinBox.setToolTip(QtGui.QApplication.translate("SimSetupDialog", "Total Frames value", None, QtGui.QApplication.UnicodeUTF8))
self.totalFramesSpinBox.setSuffix(QtGui.QApplication.translate("SimSetupDialog", " frames", None, QtGui.QApplication.UnicodeUTF8))
self.stepsPerFrameDoubleSpinBox.setSuffix(QtGui.QApplication.translate("SimSetupDialog", " femtoseconds", None, QtGui.QApplication.UnicodeUTF8))
self.temperatureSpinBox.setToolTip(QtGui.QApplication.translate("SimSetupDialog", "Temperature", None, QtGui.QApplication.UnicodeUTF8))
self.temperatureSpinBox.setSuffix(QtGui.QApplication.translate("SimSetupDialog", " K", None, QtGui.QApplication.UnicodeUTF8))
self.potential_energy_checkbox.setText(QtGui.QApplication.translate("SimSetupDialog", "Plot energy in tracefile", None, QtGui.QApplication.UnicodeUTF8))
self.watch_motion_groupbox.setTitle(QtGui.QApplication.translate("SimSetupDialog", "Watch motion in real time", None, QtGui.QApplication.UnicodeUTF8))
self.update_every_rbtn.setToolTip(QtGui.QApplication.translate("SimSetupDialog", "Specify how often to update the screen during the simulation.", None, QtGui.QApplication.UnicodeUTF8))
self.update_every_rbtn.setText(QtGui.QApplication.translate("SimSetupDialog", "Update every", None, QtGui.QApplication.UnicodeUTF8))
self.update_number_spinbox.setToolTip(QtGui.QApplication.translate("SimSetupDialog", "Specify how often to update the screen during the simulation.", None, QtGui.QApplication.UnicodeUTF8))
self.update_units_combobox.setToolTip(QtGui.QApplication.translate("SimSetupDialog", "Specify how often to update the screen during the simulation.", None, QtGui.QApplication.UnicodeUTF8))
self.update_units_combobox.addItem(QtGui.QApplication.translate("SimSetupDialog", "frames", None, QtGui.QApplication.UnicodeUTF8))
self.update_units_combobox.addItem(QtGui.QApplication.translate("SimSetupDialog", "seconds", None, QtGui.QApplication.UnicodeUTF8))
self.update_units_combobox.addItem(QtGui.QApplication.translate("SimSetupDialog", "minutes", None, QtGui.QApplication.UnicodeUTF8))
self.update_units_combobox.addItem(QtGui.QApplication.translate("SimSetupDialog", "hours", None, QtGui.QApplication.UnicodeUTF8))
self.update_asap_rbtn.setToolTip(QtGui.QApplication.translate("SimSetupDialog", "Update every 2 seconds, or faster if it doesn\'t slow simulation by more than 20%", None, QtGui.QApplication.UnicodeUTF8))
self.update_asap_rbtn.setText(QtGui.QApplication.translate("SimSetupDialog", "Update as fast as possible", None, QtGui.QApplication.UnicodeUTF8))
self.md_engine_groupbox.setTitle(QtGui.QApplication.translate("SimSetupDialog", "Molecular Dynamics Engine", None, QtGui.QApplication.UnicodeUTF8))
self.simulation_engine_combobox.setToolTip(QtGui.QApplication.translate("SimSetupDialog", "Choose the simulation engine with which to minimize energy.", None, QtGui.QApplication.UnicodeUTF8))
self.simulation_engine_combobox.addItem(QtGui.QApplication.translate("SimSetupDialog", "NanoDynamics-1 (Default)", None, QtGui.QApplication.UnicodeUTF8))
self.simulation_engine_combobox.addItem(QtGui.QApplication.translate("SimSetupDialog", "GROMACS", None, QtGui.QApplication.UnicodeUTF8))
self.electrostaticsForDnaDuringDynamics_checkBox.setText(QtGui.QApplication.translate("SimSetupDialog", "Electrostatics for DNA reduced model", None, QtGui.QApplication.UnicodeUTF8))
| NanoCAD-master | cad/src/simulation/SimSetupDialog.py |
# Copyright 2008 Nanorex, Inc. See LICENSE file for details.
"""
GromacsLog.py
Parse the .log file output of the GROMACS mdrun program, specifically
during conjugate gradients minimization. Note the energy values for
various components as they change, and report their final values.
@author: Eric M
@version: $Id$
@copyright: 2008 Nanorex, Inc. See LICENSE file for details.
GROMACS is not very computer friendly about its output formats. In
this case, table headings and values appear on alternate lines, and
which ones are included varies from run to run.
Sample output:
Configuring nonbonded kernels...
Testing ia32 SSE2 support... present.
Step Time Lambda
0 0.00000 0.00000
Energies (kJ/mol)
Bond Harmonic Pot. LJ-14 Coulomb-14 LJ (SR)
1.67136e+01 3.91076e+01 6.14294e-01 0.00000e+00 4.51586e+01
Coulomb (SR) Potential Kinetic En. Total Energy Temperature
0.00000e+00 1.01594e+02 0.00000e+00 1.01594e+02 0.00000e+00
Pressure (bar)
0.00000e+00
F-max = 1.11581e+01 on atom 237
F-Norm = 7.33720e+01
Step Time Lambda
0 0.00000 0.00000
Energies (kJ/mol)
Bond Harmonic Pot. LJ-14 Coulomb-14 LJ (SR)
1.65284e+01 3.88862e+01 6.14476e-01 0.00000e+00 4.51403e+01
Coulomb (SR) Potential Kinetic En. Total Energy Temperature
0.00000e+00 1.01169e+02 0.00000e+00 1.01169e+02 0.00000e+00
Pressure (bar)
0.00000e+00
Step Time Lambda
0 0.00000 0.00000
Energies (kJ/mol)
Bond Harmonic Pot. LJ (SR) Coulomb (SR) Potential
1.96551e+00 1.16216e+00 0.00000e+00 0.00000e+00 3.12768e+00
Kinetic En. Total Energy Temperature Pressure (bar)
0.00000e+00 3.12768e+00 0.00000e+00 0.00000e+00
"""
import foundation.env as env
from utilities.Log import quote_html, orangemsg
AVOGADRO = 6.022045e23 # particles/mol
class GromacsLog(object):
def __init__(self):
self.state = 0
self._resetColumns()
def addLine(self, line):
columns = line.split()
if (len(columns) == 3 and
columns[0] == 'Step' and
columns[1] == 'Time' and
columns[2] == 'Lambda'):
self._resetColumns()
self.state = 1
return
if (self.state == 1 and len(columns) == 3):
self.step = columns[0]
self.state = 2
return
if (self.state == 2 and len(columns) == 0):
self.state = 3
return
if (self.state == 3 and len(columns) == 2 and columns[0] == 'Energies'):
self.state = 4
return
if (self.state == 4):
if (len(columns) > 0):
self.state = 5
self.column_headers = self._extractColumns(line.rstrip())
return
else:
self._emitColumns()
self.state = 0
return
if (self.state == 5):
if (len(columns) == len(self.column_headers)):
self._addColumns(self.column_headers, columns)
self.state = 4
return
else:
self.state = 0 # this never happens
return
# Stepsize too small, or no change in energy.
# Converged to machine precision,
# but not to the requested precision Fmax < 0.006022
#
# Polak-Ribiere Conjugate Gradients did not converge to Fmax < 0.006022 in 100001 steps.
if (line.find("converge") >= 0 and line.find("Fmax") >= 0):
env.history.message("Energy (Bond, Strut, Nonbonded): (%f, %f, %f) zJ" %
(self.getBondEnergy(),
self.getHarmonicEnergy(),
self.getNonbondedEnergy()))
env.history.message("Total Energy %f zJ" % self.getTotalEnergy())
if (line.find("machine") >= 0 or line.find("did not") >= 0):
env.history.message(orangemsg(quote_html(line.rstrip())))
else:
env.history.message(quote_html(line.rstrip()))
return
def _extractColumns(self, line):
columns = []
while (len(line) >= 15):
col = line[:15]
line = line[15:]
columns.append(col.strip())
return columns
def _resetColumns(self):
self._values = {}
self.step = "-"
def _addColumns(self, headers, values):
for i in range(len(headers)):
key = headers[i]
value = values[i]
self._values[key] = value
def _emitColumns(self):
result = "%s: (%f + %f + %f) = %f zJ" % (self.step,
self.getBondEnergy(),
self.getHarmonicEnergy(),
self.getNonbondedEnergy(),
self.getTotalEnergy())
env.history.statusbar_msg(result)
def kJ_per_mol_to_zJ(self, kJ_per_mol):
joules = kJ_per_mol * 1000.0 / AVOGADRO
return joules * 1e21
def _getSingleEnergy(self, key):
if (self._values.has_key(key)):
try:
value = float(self._values[key])
except:
print "_getSingleEnergy(): malformed value for %s: '%s'" % (key, self._values[key])
value = 0.0
return self.kJ_per_mol_to_zJ(value)
return 0.0
def getBondEnergy(self):
return self._getSingleEnergy("Bond") + self._getSingleEnergy("Morse") + self._getSingleEnergy("Angle")
def getHarmonicEnergy(self):
return self._getSingleEnergy("Harmonic Pot.")
def getNonbondedEnergy(self):
return self._getSingleEnergy("LJ-14") + self._getSingleEnergy("LJ (SR)") + self._getSingleEnergy("Buck.ham (SR)")
def getTotalEnergy(self):
return self._getSingleEnergy("Total Energy")
| NanoCAD-master | cad/src/simulation/GromacsLog.py |
# Copyright 2005-2009 Nanorex, Inc. See LICENSE file for details.
"""
movie.py -- class Movie, used for simulation parameters and open movie files
@author: Mark
@version: $Id$
@copyright: 2005-2009 Nanorex, Inc. See LICENSE file for details.
History:
Initially by Mark.
Some parts rewritten by Bruce circa 050427.
"""
import os, sys
from struct import unpack
from PyQt4.Qt import Qt, qApp, QApplication, QCursor, SIGNAL
from utilities.Log import redmsg, orangemsg, greenmsg
from geometry.VQT import A
from foundation.state_utils import IdentityCopyMixin
from operations.move_atoms_and_normalize_bondpoints import move_atoms_and_normalize_bondpoints
from utilities import debug_flags
from platform_dependent.PlatformDependent import fix_plurals
from utilities.debug import print_compact_stack, print_compact_traceback
from files.dpb_trajectory.moviefile import MovieFile #e might be renamed, creation API revised, etc
import foundation.env as env
ADD = True
SUBTRACT = False
FWD = 1
REV = -1
_DEBUG0 = 0
_DEBUG1 = 0 # DO NOT COMMIT WITH 1
_DEBUG_DUMP = 0 # DO NOT COMMIT WITH 1
playDirection = { FWD : "Forward", REV : "Reverse" }
# ==
class Movie(IdentityCopyMixin): #bruce 080321 bugfix: added IdentityCopyMixin
"""
Movie object.
Multiple purposes (which ought to be split into separate objects more than
they have been so far):
- Holds state of one playable or playing movie,
and provides methods for playing it,
and has moviefile name and metainfo;
- also (before its moviefile is made) holds parameters needed
for setting up a new simulator run
(even for Minimize, though it might never make a moviefile);
- those parameters might be used as defaults (by external code) for setting
up another sim run.
Warnings:
- methods related to playing are intimately tied to movieMode.py's
Property Manager;
- so far, only supports playing one movie at a time;
- so far, provisions/checks for changing Parts during movie playing are
limited.
Movie lifecycle
[bruce 050427 intention -- some details are obs or need review #####@@@@@]:
- If we make the movie in this session (or someday, if we read a movie
node from an mmp file), we give it an alist, and we should never change
that alist after that, but we'll need to check it sometimes, in case of
atoms changing Parts or being killed.
If we make the movie as a way of playing the trajectory in an existing
file, then when we do that (or when needed) we come up with an alist,
and likewise never change the alist after that.
(In the future, if there's a "play on selection" option, this only
affects which atoms move when we play the movie -- it doesn't alter the
alist, which is needed in its original form for interpreting frames in
the moviefile.)
Maybe the Movie is not meant to be ever played (e.g. it's just for
holding sim params, perhaps for Minimize), but if it is, then external
code optionally queries might_be_playable() to decide whether to try
playing it (e.g. when entering "Play Movie"), calls cueMovie() before
actually starting to play it (this verifies it has a valid alist (or that
one can be constructed) which is playable now, and perhaps changes the
current Part so the playing movie will be visible), then calls other
methods to control the playing, then calls _close when the playing is done
(which snuggles singlets and disables some movieMode PM controls).
But even between _close and the next cueMovie(), the alist is maintained
-- switching Parts is not enough to try reloading the movie for
playing on different atoms. If changing it to play on different
atoms is ever needed, we'll add specific support for that.
Not only is alist maintained, so is valuable info about the moviefile,
like cached frames. The file might be closed (to save on open files
for when we have multiple loaded movies, and to help us detect
whether the file gets overwritten with new data); if closed,
it's reopened on the next cueMovie(), and it's always rechecked on
cueMovie() for being overwritten. #####@@@@@ doit or testit
State variables involved in all this (incomplete list, there's also
currentFrame and a few others in the playing-state):
- isOpen() tells whether we're between cueMovie() and _close. (It has no
guaranteed relation to whether any file object is open, though in
practice it might coincide with that for the moviefile.)
- alist is None or a valid list of atoms (this might be replaced by
an object for holding that list)
- the first time cueMovie() is called, the movie file header is parsed,
and an alist is assigned if possible and not already known, and an
"alist_and_moviefile" object to hold both of them and keep them in
correspondence is created, and if this works the file is never again
fully reparsed, though it might be rechecked later to ensure it hasn't
been overwritten.
#####@@@@@ is it ok to do this for each existing call of cueMovie()?
- might_be_playable() returns True if this object *might* be playable,
provided cueMovie() has not yet been called and succeeded
(i.e. if we don't yet have an alist_and_moviefile object);
but after cueMovie() has once succeeded, it returns True iff the alist
is currently ok to try to play from the file (according to our
alist_and_moviefile).
(This might always be True, depending on our policy for atoms moved
to other parts or killed, but it might trigger history warnings in
some cases -- not yet decided #####@@@@@).
It won't actually recheck the file (to detect overwrites) until
cueMovie() is called. (The goal is for might_be_playable to be fast
enough to use in e.g. updating a node-icon in the MT, in the future.)
"""
#bruce 050324 comment: note that this class is misnamed --
# it's really a SimRunnerAndResultsUser... which might
# make and then use .xyz or .dpb results; if .dpb, it's able
# to play the movie; if .xyz, it just makes it and uses it once
# and presently doesn't even do it in methods, but in external
# code (nonetheless it's used). Probably should split it into subclasses
# and have one for .xyz and one for .dpb, and put that ext code
# into one of them as methods. ###@@@
#bruce 050329 comment: this file is mostly about the movie-playable DPB file;
# probably it should turn into a subclass of SimRun, so the objects for other
# kinds of sim runs (eg minimize) can be different. The superclass would
# have most of the "writemovie" function (write sim input and run sim)
# as a method, with subclasses customizing it.
duration = 0.0 # seconds it took to create moviefile (if one was created and after that's finished) [bruce 060112]
ref_frame = None # None or (frame_number, sim_posn_array) for a reference frame for use in playing the movie
# from purely differential data in old-format moviefiles (to finish fixing bug 1297; someday can help improve fix to 1273)
# (see comments in get_sim_posns for relevant info and caveats) [bruce 060112]
ignore_slider_and_spinbox = False # (in case needed before init done)
minimize_flag = False # whether we're doing some form of Minimize [bruce 060112]
def __init__(self, assy, name=None):
"""
###doc; note that this Movie might be made to hold params for a sim run,
and then be told its filename, or to read a previously saved file;
pre-050326 code always stored filename from outside and didn't tell
this object how it was becoming valid, etc...
"""
self.assy = assy
self.win = self.assy.w
self.glpane = self.assy.o ##e if in future there's more than one glpane, recompute this whenever starting to play the movie
# for future use: name of the movie that appears in the modelTree.
self.name = name or "" # assumed to be a string by some code
# the name of the movie file
self.filename = "" #bruce 050326 comment: so far this is only set by external code; i'll change that someday
## # movie "file object"
## self.fileobj = None
# the total number of frames actually in our moviefile [might differ from number requested]
self.totalFramesActual = 0
# bruce 050324 split uses of self.totalFrames into totalFramesActual and totalFramesRequested
# to help fix some bugs, especially when these numbers differ
# currentFrame is the most recent frame number of this movie that was played (i.e. used to set model atom positions)
# by either movieMode or real-time viewing of movie as it's being created
# [bruce 060108 added realtime update of this attr, and revised some related code, re bug 1273]
self.currentFrame = 0
# the most recent frame number of this movie that was played during its creation by realtime dynamics, or 0 [bruce 060108]
self.realtime_played_framenumber = 0
# the starting (current) frame number when we last entered MOVIE mode ###k
self.startFrame = 0
# a flag that indicates whether this Movie has been cueMovie() since the last _close
# [*not* whether moviefile is open or closed, like it indicated before bruce 050427]
self.isOpen = False
# a flag that indicates the current direction the movie is playing
self.playDirection = FWD
#e someday [mark, unknown date]: a flag that indicates if the movie and the part are synchronized
## self.isValid = False # [bruce 050427 comment: should be renamed to avoid confusion with QColor.isValid]
# the number of atoms in each frame of the movie.
self.natoms = 0
# show each frame when _playToFrame is called
self.showEachFrame = False
# a flag that indicates the movie is paused
self.isPaused = True
# 'movie_is_playing' is a flag that indicates a movie is playing. It is
# used by other code to speed up rendering times by optionally disabling
# the (re)building of display lists for each frame of the movie.
# [Mark 051209.]
self.win.movie_is_playing = False
# moveToEnd: a flag that indicates the movie is currently fast-forwarding to the end.
# [bruce 050428 comment: in present code, self.moveToEnd might not be properly maintained
# (it's never set back to False except by _pause; I don't know if _pause is
# always called, but if it needs to be, this is not documented),
# and it's also not used. I suggest replacing it with a logical combination
# of other flags, if it's ever needed.]
self.moveToEnd = False
# a flag that indicates if the wait (hourglass) cursor is displayed.
self.waitCursor = False
# a flag to tell whether we should add energy information to the tracefile during dynamics runs
self.print_energy = False
# simulator parameters to be used when creating this movie,
# or that were used when it was created;
# these should be stored in the dpb file header so they
# can be retrieved later. These will be the default values used by
# the simsetup dialog, or were the values entered by the user.
# If the sim parameters change, they might need to be updated in all places marked "SIMPARAMS" in the code.
self.totalFramesRequested = 900
# bruce 050325 added totalFramesRequested, changed some uses of totalFrames to this
self.temp = 300
self.stepsper = 10
## self.watch_motion = False # whether to show atom motion in realtime [changed by Mark, 060424]
## # (note: this default value affects Dynamics, but not Minimize, which uses its own user pref for this,
## # but never changes this value to match that [as of 060424; note added by Bruce])
self._update_data = None
self.update_cond = None # as of 060705 this is also used to derive self.watch_motion, in __getattr__
self.timestep = 10
# Note [bruce 050325]: varying the timestep is not yet supported,
# and this attribute is not presently used in the cad code.
# support for new options for Alpha7 [bruce 060108]
self.create_movie_file = True # whether to store movie file
# [nim (see NFR/bug 1286), treated as always T -- current code uses growing moviefile length to measure progress;
# to implem this, use framebuffer callback instead, but only when this option is asked for.]
# bruce 050324 added these:
self.alist = None # list of atoms for which this movie was made, if this has yet been defined
self.alist_and_moviefile = None #bruce 050427: hold checked correspondence between alist and moviefile, if we have one
self.debug_dump("end of init")
return
##bruce 050428 removing 1-hour-old why_not_playable feature,
## since ill-conceived, incomplete, and what's there doesn't work (for unknown reasons).
## why_not_playable = ""
## # reason why we're not playable (when we're not), if known (always a string; a phrase; never used if truly playable)
def might_be_playable(self): #bruce 050427
"""
Is it reasonable to try to play this movie?
This does NOT check whether it's still valid for its atoms or the current part;
if the caller then tries to play it, we'll check that and complain.
BUT if it has been previously checked and found invalid, this should return False or perhaps redo the check.
For more info see docstring of class Movie.
"""
if self.file_trashed:
#bruce 050428: some new sim in this process has trashed our file (even if it didn't complete properly)
# (btw, this doesn't yet help if it was some other process which did the trashing)
#e (it would be nicer if new sims would store into a temp file until they completed successfully;
# even then they need to set this (via method below)
# and they might want to ask before doing that, or use a new filename)
return False #e history message??
filename_ok = self.filename and self.filename.endswith('.dpb') and os.path.exists(self.filename)
if not self.alist_and_moviefile:
## if not filename_ok and not self.why_not_playable:
## self.why_not_playable = "need filename of existing .dpb file" # can this ever be seen??
return filename_ok # ok to try playing it, though we don't yet know for sure whether this will work.
else:
res = self.alist_and_moviefile.might_be_playable()
## self.why_not_playable = self.alist_and_moviefile.why_not_playable
## #e (is this a sign the whynot should be part of the retval?)
return res
file_trashed = False
def fyi_reusing_your_moviefile( self, moviefile):
"""
If moviefile happens to be the name of our own moviefile,
know that it's being trashed and we'll never again be playable
(unless we have not yet read any data from it,
in which case we're no worse off than before, I suppose).
"""
if self.filename and self.filename == moviefile and self.alist_and_moviefile:
self.warning( "overwriting moviefile (previously open movie will no longer be playable)." )
# note, this wording is from point of view of caller -- *we* are the previously open movie.
# I'm assuming it's overwritten, not only removed, since no reason to remove it except to overwrite it.
self.file_trashed = True
## self.why_not_playable = "moviefile overwritten by a more recent simulation" #e or get more detail from an arg?
return
def __getattr__(self, attr): # in class Movie
if attr == 'history':
#bruce 050913 revised this; I suspect it's not needed and could be removed
print_compact_stack("deprecated code warning: something accessed Movie.history attribute: ") #bruce 060705 -> _stack
return env.history
elif attr == 'watch_motion': #bruce 060705
## if env.debug():
## print ("debug: fyi: Movie.watch_motion attribute computed from update_cond: ")
try:
return not not self.update_cond
except:
return False
raise AttributeError, attr
def destroy(self): #bruce 050325
# so far this is only meant to be called before the file has been made
# (eg it doesn't destroy our big fancy subobjects);
# it should be revised to work either way and _close if necessary.
# for now, just break cycles.
self.win = self.assy = self.part = self.alist = self.fileobj = None
del self.fileobj # obs attrname
del self.part
# == methods for letting this object (just after __init__) represent a previously saved movie file
def represent_this_moviefile( self, mfile, part = None): #bruce 050326
"""
Try to start representing the given moviefile (which must end
with '.dpb');
Return true iff this succeeds; if it fails DON'T emit error message.
if part is supplied, also [NIM] make sure mfile is valid for current
state of that part.
"""
#e should the checking be done in the caller (a helper function)?
assert mfile.endswith(".dpb") # for now
if os.path.exists(mfile):
self.filename = mfile
###e do more... but what is needed? set alist? only if we need to play it, and we might not... (PlotTool doesn't)
assert not part # this is nim; should call the validity checker
return True
else:
pass #e env.history.message(redmsg(...)) -- is this a good idea? I think caller wants to do this... ###k
self.destroy()
return False
pass
# == methods for letting this object represent a movie (or xyz) file we're about to make, or just did make
def set_alist(self, alist): #bruce 050325
"""
Verify this list of atoms is legal (as an alist to make a movie from),
and set it as this movie's alist. This only makes sense before making
a moviefile, or after reading one we didn't make in the same session
(or whose alist we lost) and figuring out somehow what existing
atoms it should apply to. But nothing is checked about whether this
alist fits the movie file, if we have one, and/or the other params
we have -- that's assumed done by the caller.
"""
alist = list(alist) # make our own copy (in case caller modifies its copy), and ensure type is list
atm0 = alist[0]
assert atm0.molecule.part, "atm0.molecule.part %r should exist" % atm0.molecule.part
for atm in alist:
assert atm.molecule.part == atm0.molecule.part, \
"atm %r.molecule.part %r should equal atm0.molecule.part %r" % \
(atm, atm.molecule.part, atm0.molecule.part)
# all atoms have the same Part, which is not None, and there's at least one atom.
self.alist = alist
self.natoms = len(alist) #bruce 050404; note, some old code might reset this (perhaps wrongly?) when reading a .dpb file
return
def set_alist_from_entire_part(self, part):
"""
Set self.alist to a list of all atoms of the given Part,
in the order in which they would be written to a new mmp file.
"""
# force recompute of part.alist, since it's not yet invalidated when it needs to be
part.alist = None
del part.alist
## self.alist = part.alist
alist = part.alist # this recomputes it
self.set_alist(alist) #e could optimize (bypass checks in that method)
return
# == methods for playing the movie file we know about (ie the movie we represent)
# [bruce 050427 comments/warnings:
# These methods need to be refactored, since they intimately know about movieMode's dashboard (they update it).
# They also use and maintain state-of-playing variables in self (which might be useful for any manner of playing the movie).
# One of the methods does recursive processing of QEvents and doesn't return until the movie is paused (I think).
# ]
def _cueMovieCheck(self): #bruce 050427
"""
Checks movie file to determine that its playable and that it's ok to
start playing it.
If not, emit complaints on history widget, don't set our state
variables; return False. If so, return True.
"""
# remember what to unset if things don't work when we return
we_set_alist = False
we_set_alist_and_moviefile = False
if not self.might_be_playable():
env.history.message( redmsg( "Movie is not playable.")) # can't happen, I think... if it can, it should give more info.
return False
if not self.alist_and_moviefile:
# we haven't yet set up this correspondence. Do it now. Note that the filename needs to be set up here,
# but the alist might or might not have been (and if it was, it might not be all the atoms in the current Part).
if self.alist is None:
# alist should equal current Part; then verify moviefile works for that.
#e In future, we might also permit alist to come from selection now,
# if this has right natoms for moviefile and whole part doesn't.
# *Or* we might let it come from main part if we're in some other part which doesn't fit.
# For now, nothing so fancy. [bruce 050427]
self.set_alist_from_entire_part( self.assy.part)
we_set_alist = True
if self.ref_frame is None: #bruce 060112
# make a reference frame the best we can from current atom positions
# [note: only needed if movie file format needs one, so once we support new DPB file format,
# we should make this conditional on the format, or do it in a callback provided to alist_and_moviefile ###@@@]
if self.currentFrame:
# should never happen once bug 1297 fix is completed
print "warning: making ref_frame from nonzero currentFrame %d" % self.currentFrame
self.ref_frame = ( self.currentFrame, A(map(lambda a: a.sim_posn(), self.alist)) )
else:
# Client code supplied ref_frame, and that would be the fastest frame to start playing,
# but we should not change currentFrame to match it since it should match actual atom posns,
# not "desired frame to play next". Besides, I'm not sure this only runs when we cue the movie.
# Some other code can play to frame 0 when cueing movie, if desired. For now that's nim;
# it would require figuring out where movies are cued and when to autoplay to the desired frame
# and which frame that was (presumably the reference frame). ###@@@ [bruce 060112]
pass ## self.currentFrame = self.ref_frame[0]
if len(self.ref_frame[1]) != len(self.alist):
print "apparent bug: len(self.ref_frame[1]) != len(self.alist)" # should never happen, add %d if it does
if self.currentFrame != 0:
# As of 060111, this can happen (due to fixing bug 1273) if we moved atoms in realtime while creating movie.
# Even after the complete fix of bug 1297 (just done, 060112) it will still happen unless we decide to
# change the fix of bug 1273 to start playing from ref_frame (see comment above about that).
# Maybe it could also happen if we leave moviemode on a nonzero frame, then reenter it?
# Should test, but right now my guess is that we're prohibited from leaving it in that case!
env.history.message( greenmsg( "(Starting movie from frame %d.)" % self.currentFrame ))
# [note: self.currentFrame is maintained independently of a similar variable
# inside a lower-level moviefile-related object.]
self.alist_and_moviefile = alist_and_moviefile( self.assy, self.alist, self.filename, ref_frame = self.ref_frame )
## curframe_in_alist = self.currentFrame)
# if this detected an error in the file matching the alist, it stored this fact but didn't report it yet or fail #####@@@@@ doit
# maybe it won't even check yet, until asked...
we_set_alist_and_moviefile = True
ok = self.alist_and_moviefile.valid()
else:
# we have an alist_and_moviefile but we need to recheck whether the alist still matches the file
# (in case the file changed on disk).
ok = self.alist_and_moviefile.recheck_valid() # in theory this might come up with a new larger totalFramesActual value (NIM)
if not ok:
# it emitted error messages already #####@@@@@ doit
# reset what we set, in case user will try again later with altered file or different assy.part
if we_set_alist_and_moviefile:
self.alist_and_moviefile.destroy()
self.alist_and_moviefile = None
if we_set_alist:
self.alist = None
return False
# change current part and/or arrange to warn if user does that? No, this is done later when we _play.
return True
def cueMovie(self, propMgr = None, hflag = True): #bruce 060112 revised retval documentation and specific values
"""
Setup this movie for playing.
@param propMgr: The movie property manager.
@type propMgr: MoviePropertyManager
@param hflag: The history message flag. If True, print a history
message.
@type hflag: boolean
@return: False if this worked, True if it failed
(warning: reverse of common boolean retvals).
@rtype: boolean
[#doc whether it always prints error msg to history if it failed.]
"""
# bruce 050427 comment:
# what it did before today:
# - figure out part to use for movie file (this is wrong and needs changing).
# - check movie file for validity re that part (on error, print message and return true error code)
# - freeze atoms (making some other operations on them illegal, I think, in the present code)
# - possibly save frame 0 positions -- only if self.currentFrame is 0
# - open movie file, read header
# - update dashboard frame number info (SB, SL, label)
# - history info: if hflag: self._info()
# - self.startFrame = self.currentFrame
if _DEBUG1:
print "movie.cueMovie() called. filename = [" + self.filename + "]"
self.propMgr = propMgr
if self.isOpen and debug_flags.atom_debug:
env.history.message( redmsg( "atom_debug: redundant cueMovie()? bug if it means atoms are still frozen"))
kluge_ensure_natoms_correct( self.assy.part) # matters for some warn_if_other_part messages, probably not for anything else
ok = self._cueMovieCheck()
if not ok:
# bruce 050427 doing the following disable under more circumstances than before
# (since old code's errcodes 'r' 1 or 2 are no longer distinguished here, they're just both False) -- is that ok?
self.isOpen = False #bruce 050427 added this
return True
#bruce 050427 extensively rewrote the following (and moved some of what was here into OldFormatMovieFile_startup)
## no longer needed: self.alist_and_moviefile.own_atoms() # older related terms: self.movsetup(), freeze the atoms
self.isOpen = True
self.totalFramesActual = self.alist_and_moviefile.get_totalFramesActual() # needed for dashboard controls
self.natoms = len(self.alist) # needed for _info
if hflag:
self._info() # prints info to history
# startFrame and currentFrame are compared in _close to determine if the assy has changed due to playing this movie. ###k
self.startFrame = self.currentFrame
return False
## # Debugging Code [to enable, uncomment and remove prior 'return' statement]
## if _DEBUG1:
## msg = "Movie Ready: Number of Frames: " + str(self.totalFramesActual) + \
## ", Current Frame:" + str(self.currentFrame) +\
## ", Number of Atoms:" + str(self.natoms)
## env.history.message(msg)
##
## ## filepos = self.fileobj.tell() # Current file position
## msg = "Current frame:" + str(self.currentFrame) ## + ", filepos =" + str(filepos)
## env.history.message(msg)
## return False
# ==
def warn_if_other_part(self, part): #bruce 050427; to call when play is pressed, more or less...
"warn the user if playing this movie won't move any (or all) atoms in the given part (or about other weird conditions too)."
if self.alist is None:
return
# like .touches_part() in a subobject, but can be called sooner...
yes = 0 # counts the ones we'll move
other = killed = False
for atm in self.alist:
if atm.molecule.part == part:
yes += 1
elif atm.molecule.part:
other = True
else:
killed = True
if not (yes or other):
if not killed:
# should never happen, I think
self.warning( "this movie has no atoms. Playing it anyway (no visible effect).")
else:
self.warning( "all of this movie's atoms have been deleted. Playing it anyway (no visible effect).")
else:
if not yes:
self.warning( "to see this movie playing, you must display a different Part.") #e which one, or ones?
elif other or killed:
if killed:
self.warning( "some of this movie's atoms have been deleted. (Playing it still moves the remaining atoms.)")
if other:
self.warning( "some of this movie's atoms have been moved to another Part (maybe one on the clipboard). " \
"Playing it moves its atoms in whichever Parts they reside in." )
if yes < part.natoms:
# (this assumes part.natoms has been properly updated by the caller; cueMovie() does this.)
self.warning( "some displayed atoms are not in this movie, and stay fixed while it plays.")
return
def warning(self, text):
env.history.message( orangemsg( "Warning: " + text))
def _close(self):
"""
Close movie file and adjust atom positions.
"""
#bruce 050427 comment: what this did in old code:
# - if already closed, noop.
# - pause (sets internal play-state variables, updates dashboard ###k)
# - close file.
# - unfreeze atoms.
# - if frame moved while movie open this time, self.assy.changed()
# - wanted to delete saved frame 0 but doesn't (due to crash during devel)
if _DEBUG1:
print_compact_stack( "movie._close() called. self.isOpen = %r" % self.isOpen)
if not self.isOpen:
return
self._pause(0)
## self.fileobj.close() # Close the movie file.
self.alist_and_moviefile.snuggle_singlets() #bruce 050427
self.alist_and_moviefile.close_file() #bruce 050427
self.isOpen = False #bruce 050425 guess: see if this fixes some bugs
if _DEBUG1:
print "self.isOpen = False #bruce 050425 guess: see if this fixes some bugs" ###@@@
## self.movend() # Unfreeze atoms.
if self.startFrame != self.currentFrame:
self.assy.changed()
#bruce 050427 comment: this [i.e. having this condition rather than 'if 1' [060107]]
# only helps if nothing else in playing a movie does this...
# I'm not sure if that's still true (or if it was in the older code, either).
return
def _play(self, direction = FWD):
"""
Start playing movie from the current frame.
"""
#bruce 050427 comment: not changing this much
if _DEBUG0:
print "movie._play() called. Direction = ", playDirection[ direction ]
if not self.isOpen: #bruce 050428 not sure if this is the best condition to use here ###@@@
if (not self.might_be_playable()) and 0: ## self.why_not_playable:
msg = "Movie file is not presently playable: %s." ## % (self.why_not_playable,)
else:
msg = "Movie file is not presently playable." ###e needs more detail, especially when error happened long before.
env.history.message( redmsg( msg )) #bruce 050425 mitigates bug 519 [since then, it got fixed -- bruce 050428]
return
if direction == FWD and self.currentFrame == self.totalFramesActual:
return
if direction == REV and self.currentFrame == 0:
return
self.playDirection = direction
if self.currentFrame in [0, self.realtime_played_framenumber]:
#bruce 060108 added realtime_played_framenumber; probably more correct would be only it, or a flag to emit this once
env.history.message("Playing movie file [" + self.filename + "]")
self._continue(hflag = False) #bruce 060108 revised call, should be equivalent
else:
self._continue()
# josh 050815.
# Changed name from _writeas to _write_povray_series. mark 050908.
# I plan to write a special Movie Maker dialog that would call this with arguments.
# Mark 050908
def _write_povray_series(self, name):
"""
Writes the movie out as a series of POV-Ray files, starting with the
current frame until the last frame, skipping frames using the
"Skip" value from the dashboard.
If your trajectory file was foobar.dpb, this will write, e.g.,
foobar.000000.pov thru foobar.000999.pov (assuming your movie has
1000 frames).
If you have bash, you may then run:
for FN in foobar.000*.pov; { povray +W800 +H600 +A -D $FN; } &> /dev/null &
to generate the .png files.
This is not to be done under NE1 because it typically takes several
hours and will be best done on a renderfarm with commands appropriate
to the renderfarm.
You may then make a move of it with:
mencoder "mf://*.png" -mf fps=25 -o output.avi -ovc lavc -lavcopts vcodec=mpeg4
"""
from graphics.rendering.povray.writepovfile import writepovfile
if not self.isOpen: #bruce 050428 not sure if this is the best condition to use here ###@@@
if (not self.might_be_playable()) and 0: ## self.why_not_playable:
msg = "Movie file is not presently playable: %s." ## % (self.why_not_playable,)
else:
msg = "Movie file is not presently playable." ###e needs more detail, especially when error happened long before.
env.history.message( redmsg( msg )) #bruce 050425 mitigates bug 519 [since then, it got fixed -- bruce 050428]
return
self.playDirection = 1
# Writes the POV-Ray series starting at the current frame until the last frame,
# skipping frames if "Skip" (on the dashboard) is != 0. Mark 050908
nfiles = 0
for i in range(self.currentFrame,
self.totalFramesActual+1,
self.propMgr.frameSkipSpinBox.value()):
self.alist_and_moviefile.play_frame(i)
filename = "%s.%06d.pov" % (name,i)
# For 100s of files, printing a history message for each file is undesired.
# Instead, I include a summary message below. Fixes bug 953. Mark 051119.
# env.history.message( "Writing file: " + filename )
writepovfile(self.assy.part, self.assy.o, filename) #bruce 050927 revised arglist
nfiles += 1
self.framecounter = i # gets the the last frame number of the file written. This will be passed in the history message ninad060809
# Return to currentFrame. Fixes bug 1025. Mark 051119
self.alist_and_moviefile.play_frame(self.currentFrame)
# Summary msgs tell user number of files saved and where they are located.
msg = fix_plurals("%d file(s) written." % nfiles)
env.history.message(msg)
filenames = "%s.%06d.pov - %06d.pov" % (name, self.currentFrame, self.framecounter)#ninad060809 fixed bugs 2147 and 2148
msg = "Files are named %s." % filenames
env.history.message(msg)
def _continue(self, hflag = True): # [bruce 050427 comment: only called from self._play]
"""
Continue playing movie from current position.
@param hflag: if True, print history message
@type hflag: boolean
"""
if _DEBUG0:
print "movie._continue() called. Direction = ", playDirection[ self.playDirection ]
# In case the movie is already playing (usually the other direction).
self._pause(0)
if hflag:
env.history.message("Movie continued: " + playDirection[ self.playDirection ])
self.warn_if_other_part(self.assy.part) #bruce 050427
self.showEachFrame = True #bruce 050428 comment: this is the only set of this var to True.
# Continue playing movie.
if self.playDirection == FWD:
self._playToFrame(self.totalFramesActual)
# If the pause button was pressed by the user, then this condition is True.
if self.currentFrame != self.totalFramesActual:
return
else:
self._playToFrame(0)
# If the pause button was pressed by the user, then this condition is True.
if self.currentFrame != 0:
return
# If "Loop" is checked, continue playing until user hits pause. Mark 051101.
while self.propMgr.movieLoop_checkbox.isChecked():
if self.playDirection == FWD:
self._reset() # Resets currentFrame to 0
self.showEachFrame = True # _pause(), called by _playToFrame(), reset this to False.
self._playToFrame(self.totalFramesActual)
# If the pause button was pressed by the user, then this condition is True.
if self.currentFrame != self.totalFramesActual:
break
else: # playDirection == REV
self._moveToEnd() # Resets currentFrame to totalFramesActual
self.showEachFrame = True # _pause(), called by _playToFrame(), reset this to False.
self._playToFrame(0)
# If the pause button was pressed by the user, then this condition is True.
if self.currentFrame != 0:
break
def _pause(self, hflag = True):
"""
Pause movie.
hflag - if True, print history message
"""
#bruce 050428 comment: I suspect it's required to call this in almost every event,
# since it's the only place certain state variables gets reinitialized to default
# values (e.g. showEachFrame to False). This should be analyzed and documented.
if _DEBUG0:
print "movie._pause() called"
self.debug_dump("_pause called, not done")
# bruce 050427 comment: no isOpen check, hope that's ok (this has several calls)
self.isPaused = True
self.win.movie_is_playing = False
self.showEachFrame = False
self.moveToEnd = False
self.propMgr.moviePlayActiveAction.setVisible(0)
self.propMgr.moviePlayAction.setVisible(1)
self.propMgr.moviePlayRevActiveAction.setVisible(0)
self.propMgr.moviePlayRevAction.setVisible(1)
if hflag: env.history.message("Movie paused.")
self.debug_dump("_pause call done")
def debug_dump(self, heading = "debug_dump", **kws):
if not _DEBUG_DUMP:
return # disable when not needed -- but it's useful and nicelooking output, so keep it around as an example
if heading:
print "\n %s:" % heading
print " movie_is_playing = %r, isPaused = %r, showEachFrame = %r, moveToEnd = %r, totalFramesActual = %r, currentFrame = %r, playDirection = %r" \
% (self.win.movie_is_playing, self.isPaused, self.showEachFrame, self.moveToEnd, self.totalFramesActual, self.currentFrame, self.playDirection )
if kws:
print " other args: %r" % kws
print_compact_stack(" stack at that time: ", skip_innermost_n = 1) # skips this lineno and all internal ones
def _playToFrame(self, fnum, from_slider = False):
#bruce 050428 renamed this from _playFrame, since it plays all frames from current to fnum.
"""
Main method for movie playing.
When called due to the user sliding the movie dashboard frame-number slider, from_slider should be True.
If "self.showEachFrame = True", it will play each frame of the movie between "fnum" and "currentFrame"
(except for skipped frames due to the skip control on the dashboard).
If "self.showEachFrame = False", it will advance to "fnum" from "currentFrame".
fnum - frame number to play to in the movie.
"""
#bruce 050427 revised docstring, added from_slider arg, merged most of _playSlider into this method.
# I faithfully added conditions on from_slider to imitate the old code, though some of them might not
# be needed or might even be bugs (if so, then presumably the non-from_slider cases are more correct).
# If the differences are correct, it's probably better to separate the methods again;
# the reason I merged them for now is to facilitate comparison so I (or Mark) can eventually review
# whether the diffs are correct.
if _DEBUG0:
print "movie._playToFrame() called: from fnum = ", fnum, ", to currentFrame =", self.currentFrame
#bruce 050427 comment: added an isOpen check, in case of bugs in callers (this has lots of calls)
if not self.isOpen:
return
if not from_slider: #bruce 050427 comment: I'm suspicious of this condition.
self.isPaused = False
self.win.movie_is_playing = True # In case Bruce's suspicion is true. Mark 051209.
self.debug_dump()
# Return immediately if already at desired frame.
if fnum == self.currentFrame:
if not from_slider: #bruce 050427 comment: I'm suspicious of this condition.
self.isPaused = True # May not be needed. Doing it anyway.
self.win.movie_is_playing = False # May not be needed. Doing it anyway. Mark 051209.
self.debug_dump("fnum == self.currentFrame so paused", fnum = fnum)
return
# Don't let movie run out of bounds.
if fnum < 0 or fnum > self.totalFramesActual:
print "Warning: Slider or other fnum out of bounds. fnum value =",fnum,", Number of frames =", self.totalFramesActual
self.isPaused = True # May not be needed. Doing it anyway.
self.win.movie_is_playing = False # May not be needed. Doing it anyway. Mark 051209.
self.debug_dump("fnum out of range so paused", fnum = fnum)
return
# Reset movie to beginning (frame 0). Executed when user types 0 in spinbox.
#bruce 050427 comment: this might no longer be needed (it might be handled at a lower level). We'll see. ###@@@
if not self.showEachFrame and fnum == 0 and not from_slider:
self._reset()
self.win.movie_is_playing = False # May not be needed. Doing it anyway. Mark 051209.
return
if False:
# There are implications with the Undo system that I don't understand here. Bruce better look
# this over before we switch it on.
if self.currentFrame == 0:
if not hasattr(self, 'origfile'):
errorcode, partdir = self.assy.find_or_make_part_files_directory()
if errorcode:
raise Exception("filename does not exist")
self.origfile = os.path.normpath(os.path.join(partdir,
self.assy.filename[:-4] + '.orig' +
self.assy.filename[-4:]))
try:
env.mainwindow().fileOpen(self.origfile)
except:
print 'cannot open original file'
raise
# "inc" is the frame increment (FWD = 1, REV = -1) .
if fnum > self.currentFrame:
inc = FWD
if not from_slider:
self.propMgr.moviePlayActiveAction.setVisible(1)
self.propMgr.moviePlayAction.setVisible(0)
else:
inc = REV
if not from_slider:
self.propMgr.moviePlayRevActiveAction.setVisible(1)
self.propMgr.moviePlayRevAction.setVisible(0)
# This addresses the situation when the movie file is large (> 1000 frames)
# and the user drags the slider quickly, creating a large delta between
# fnum and currentFrame. Issue: playing this long of a range of the movie
# may take some time. We need to give feedback to the user as to what is happening:
# 1). turn the cursor into WaitCursor (hourglass).
# 2). print a history message letting the user know we are advancing the movie, but
# also let them know they can pause the movie at any time.
#bruce 050427 comments:
# - The above comment dates from the old code when this method didn't handle the slider,
# so my guess is, its reference to the slider is even older and is out of date.
# - I'm now merging in _playSlider and adding "if from_slider" as needed for no change in behavior;
# no idea if the differences are good or bad.
# - This might not be needed when we change to the new .dpb file format (NIM)
# or if we have cached enough frames in lower-level code (NIM). ###e
waitCursor = False
if not from_slider:
if not self.showEachFrame:
delta = abs(fnum - self.currentFrame)
if delta != 1:
if delta > 1000:
waitCursor = True
env.history.message(playDirection[ inc ] + "ing to frame " + str(fnum) + ". You may select Pause at any time.")
else:
env.history.message(playDirection[ inc ] + "ing to frame " + str(fnum))
else:
if abs(fnum - self.currentFrame) > 1000:
env.history.message("Advancing to frame " + str(fnum) + ". Please wait...")
env.history.h_update() #bruce 060707
waitCursor = True
if waitCursor:
self.waitCursor = True
QApplication.setOverrideCursor( QCursor(Qt.WaitCursor) )
if _DEBUG0:
print "BEGIN LOOP: fnum = ", fnum, ", currentFrame =", self.currentFrame, ", inc =",inc
# This is the main loop to compute atom positions from the current frame to "fnum".
# After this loop completes, we paint the model -- but also during it.
# We also recursively process QEvents during it. [bruce 050427 revised this comment]
# [bruce question 050516: do those events ever include movie dashboard slider
# or button events which call this method recursively?? ####@@@@]
self.debug_dump("before playToFrame loop", fnum = fnum, inc = inc)
if from_slider:
# [bruce 050427: this case got a lot simpler.]
self.currentFrame = fnum
self.alist_and_moviefile.play_frame( self.currentFrame)
self.debug_dump("just after play_frame for slider", fnum = fnum, inc = inc)
# else...
self.win.movie_is_playing = True # Starting the Movie...
# Top of Main loop...
while self.currentFrame != fnum:
self.debug_dump("top of while loop body", fnum = fnum, inc = inc)
assert not from_slider
if self.isPaused:
self.win.movie_is_playing = False # Probably not needed. Doing it anyway. mark 051209.
break
## self.currentFrame += inc -- doing this below [bruce 050427]
if _DEBUG0:
print "IN LOOP1: fnum = ", fnum, ", currentFrame =", self.currentFrame, ", inc =",inc
#bruce 050427 totally revised the following in implem (not in behavior).
# Note that we needn't worry about valid range of frames, since both currentFrame and fnum should be within range.
# (Unless the surrounding code fails to check currentFrame well enough... I'm not sure! ###k)
# We only need to worry about whether we reach fnum or not.
skip_n = self.propMgr.frameSkipSpinBox.value() - 1 # Mark 060927
if not self.showEachFrame:
#bruce 050428 adding this to see if it speeds up "forward to end"
###e potential optim: increase skip, as long as time passed will not be too bad
skip_n = max(19,skip_n) # seems ok, though value is surely too big for huge models and too small for tiny ones ###e
delta_n = 1 + skip_n
for ii in range(delta_n): # slowness should not be an issue compared to time it takes to scan file... until new file format??
self.currentFrame += inc
if self.currentFrame == fnum:
break
# now self.currentFrame needs to be shown
if 1: ## self.showEachFrame: ####@@@@ old code said if 1 for this... what's best? maybe update them every 0.1 sec?
self.propMgr.updateCurrentFrame()
if 1:
self.alist_and_moviefile.play_frame( self.currentFrame) # doing this every time makes it a lot slower, vs doing nothing!
###e [bruce 050428 comments:]
# potential optim: do we need to do this now, even if not drawing on glpane?
# Warning: if we don't do it, the atom posns and self.currentFrame are out of sync;
# at least this requires fixing the posns at the end (doing play_frame then),
# but it might cause worse trouble, so really it's better to increase the "skip" above
# (perhaps adaptively using time-measurements). The other updates could safely be
# conditioned on how much time had passed, if they're always done at the end if needed.
if self.showEachFrame:
self.glpane.gl_update()
# Process queued events [bruce comment 050516: note that this will do a paintGL from our earlier gl_update above ####@@@@]
env.call_qApp_processEvents() #bruce 050908 replaced qApp.processEvents()
#e bruce 050427 comment: should we check to see if the user changed the controls,
# and (if so) change the fnum we're heading for?? ###@@@
# End of loop
self.debug_dump("after playToFrame loop", fnum = fnum, inc = inc)
# Update cursor, slider and show frame.
if self.waitCursor:
QApplication.restoreOverrideCursor() # Restore the cursor
self.waitCursor = False
self.propMgr.updateCurrentFrame( )
# [bruce 050428 comment: old code only updated slider here, but it did both SL and SB in loop above;
# now the update method decides which ones to update]
# Set movie_is_playing to False right before it draws the last frame (fnum).
self.win.movie_is_playing = False
# This is the last frame (fnum).
self.glpane.gl_update() #e bruce 050427 comment: we should optimize and only do this if we didn't just do it in the loop
if 1: ## if not from_slider:
#bruce 050428 always do this, since Mark agrees it'd be good for moving the slider to pause the movie
if _DEBUG0:
print "movie._playToFrame(): Calling _pause"
self._pause(0) # Force pause. Takes care of variable and dashboard maintenance.
if _DEBUG0:
print "movie._playToFrame(): BYE!"
return # from _playToFrame
def _playSlider(self, fnum):
"""
Slot for movie slider control.
It will advance the movie to "fnum" from "currentFrame".
fnum - frame number to advance to.
"""
if _DEBUG0:
print "movie._playSlider() called: fnum = ", fnum, ", currentFrame =", self.currentFrame
self.debug_dump("_playSlider", fnum = fnum)
self._playToFrame(fnum, from_slider = True) #bruce 050427 merged _playSlider into _playToFrame method, using from_slider arg
def _reset(self):
"""
Resets the movie to the beginning (frame 0).
"""
if _DEBUG0:
print "movie._reset() called"
if self.currentFrame == 0:
return
#bruce 050427 comment: added an isOpen check, in case of bugs in callers
if not self.isOpen:
return
self.currentFrame = 0
# Restore atom positions.
self.alist_and_moviefile.play_frame( self.currentFrame)
self.propMgr.updateCurrentFrame()
self._pause(0)
self.glpane.gl_update()
def _moveToEnd(self):
"""
"""
if _DEBUG0:
print "movie._moveToEnd() called"
if self.currentFrame == self.totalFramesActual:
return
#bruce 050427 comment: added an isOpen check, in case of bugs in callers
if not self.isOpen:
return
self._pause(0)
self.moveToEnd = True
self._playToFrame(self.totalFramesActual)
# ==
def _info(self):
"""
Print info about movie to the history widget.
"""
if _DEBUG0:
print "movie._info() called."
if not self.filename:
env.history.message("No movie file loaded.")
return
env.history.message("Filename: [" + self.filename + "]")
msg = "Number of Frames: " + str(self.totalFramesActual) + \
". Number of Atoms: " + str(self.natoms)
env.history.message(msg)
# env.history.message("Temperature:" + str(self.temp) + "K")
# env.history.message("Steps per Frame:" + str(self.stepsper))
# env.history.message("Time Step:" + str(self.stepsper))
def getMovieInfo(self):
"""
Return the information about this movie.
"""
fileName = str(self.filename)
numOfFrames = str(self.totalFramesActual)
numOfAtoms = str(self.natoms)
return (fileName, numOfFrames, numOfAtoms)
def getCurrentFrame(self):
"""
Returns the current frame of the movie.
"""
return self.currentFrame
def getTotalFrames(self):
"""
Returns the total frames of the movie.
"""
return self.totalFramesActual
def get_trace_filename(self):
"""
Returns the trace filename for the current movie.
"""
fullpath, ext = os.path.splitext(self.filename)
if ext == '.xyz':
#bruce 050407 new feature: ensure tracefilename differs when filename does
# (see comment next to our caller in runSim.py for why this matters)
suffix = "-xyztrace.txt"
else:
suffix = "-trace.txt"
return fullpath + suffix
def get_GNUplot_filename(self):
"""
Returns the GNUplot filename for the current movie.
"""
fullpath, ext = os.path.splitext(self.filename)
return fullpath + "-plot.txt"
def moveAtoms(self, newPositions): # used when reading xyz files
"""
Move a list of atoms to newPosition. After all atoms moving
[and singlet positions updated], bond updated, update display once.
@param newPosition: a list of atom absolute position,
the list order is the same as self.alist
@type newPosition: list
"""
if len(newPositions) != len(self.alist):
#bruce 050225 added some parameters to this error message
#bruce 050406 comment: but it probably never comes out, since readxyz checks this,
# so I won't bother to print it to history here. But leaving it in is good for safety.
#bruce 060108: it can come out for realtime minimize if you edit the model. hopefully we'll fix that soon.
msg = "moveAtoms: The number of atoms from XYZ file (%d) is not matching with that of the current model (%d)" % \
(len(newPositions), len(self.alist))
print msg
raise ValueError, msg
#bruce 060108 reviewed/revised all 2 calls, added this exception to preexisting noop/errorprint (untested)
move_atoms_and_normalize_bondpoints(self.alist, newPositions) #bruce 051221 fixed bug 1239 in this function, then split it out
self.glpane.gl_update()
return
pass # end of class Movie
# ==
class MovableAtomList: #bruce 050426 splitting this out of class Movie... except it's entirely new code, as it turns out.
"""
A list of atoms within an assy (perhaps in more than one Part or even
including killed atoms), with methods for quickly and safely changing
all their positions at once, updating their display, for "owning"
those atoms or their chunks as needed to make it safe to reset their
positions, and for tracking external changes to their structure relevant
to safety and validity of resetting their positions. [For Alpha5 we're
mainly worrying about safety from tracebacks rather than validity.]
[Not yet handled here: ability to be told to move an H to one position,
but to actually move a singlet into a different position computed
from that (re bug 254). Caller might help by ordering singlets after
their base atoms, or even by doing this work itself (none of that is
decided yet). #e]
"""
#e the plan is to later optimize this greatly
# by making it totally own the involved atoms' posns and do its own fast redisplay.
def __init__(self, assy, alist):
self.assy = assy
self.glpane = assy.o
self.alist = list(alist) # use A()?
# is alist a public attribute? (if so, no need for methods to prune its atoms by part or killedness, etc)
self.natoms = len(self.alist)
def get_sim_posns(self): #bruce 060111 renamed and revised this from get_posns, for use in approximate fix of bug 1297
# note: this method is no longer called as of bruce 060112, but its comments are relevant and are referred to
# from several files using the name of this method. It's also still correctly implemented, so we can leave it in for now.
"""
Return an Array (mutable and owned by caller) of current
positions-for-simulator of our atoms (like positions, except
singlets pretend they're H's and correct their posns accordingly).
(This must work even if some of our atoms have been killed,
or moved into different Parts, since we were made, though the
positions returned for killed atoms probably don't matter
(#k not sure).)
"""
# Problem: to fully fix bug 1297, we need to return the H position actually used in the sim, not the equilibrium H position
# like a.sim_posn returns. For frame 0 it's the same; for other frames the only source for that info is the frame
# returned from the sim (to the frame_callback), but it's only safe to use it if neither involved atom has been moved
# since the sim was started. I might decide it's not worth fixing this except by moving to new DPB format which defines
# absolute atom positions. Or I might have the realtime-motion code in runSim save frame 0, or any other frame
# of a known number, and pass it to the movie object along with the "current frame number", for use instead of the array
# returned by this method. For now, I won't bother with that, hoping this fix is good enough until we move to the new
# DPB file format. ####@@@@
res = map( lambda a: a.sim_posn(), self.alist )
return A(res)
def set_posns(self, newposns):
"""
Set our atoms' positions (even killed ones) to those in the given
array (but correct singlet positions); do all required invals but
no redisplay
@note: someday we might have a version which only does this for the
atoms now in a given Part.
"""
#e later we'll optimize this by owning atoms and speeding up or
# eliminating invals
#bruce 060109 replaced prior code with this recently split out routine,
# so that singlet correction is done on every frame; could be optimized,
# e.g. by precomputing singlet list and optimizing setposn on lists of
# atoms
#bruce 060111 comment: should probably be renamed set_sim_posns
# since it corrects singlet posns
move_atoms_and_normalize_bondpoints(self.alist, newposns)
set_posns_no_inval = set_posns #e for now... later this can be faster, and require own/release around it
def snuggle_singlets(self): #bruce 050427 made this from movend; 090107 renamed
# terrible hack for singlets in simulator, which treats them as H
for a in self.alist:
if a.is_singlet() and a.bonds: # could check a.molecule.part instead, but a.bonds is more to the point and faster
#bruce 050428 exclude killed atoms (a.killed() is too slow [not anymore, bruce 050702, but this check is better anyway])
a.snuggle() # same code as in moveAtoms() except for killed-atom check
#bruce 051221 comment: note that to avoid an analog of bug 1239, it's critical that atoms are first all moved,
# and only then are singlets snuggled. This was already the case here, but not in moveAtoms() from which this
# code was copied.
#e could optimize this (enough to do it continuously) by using Numeric to do them all at once
self.glpane.gl_update() # needed because of the snuggle above
return
def update_displays(self):
###@@@ should use same glpane as in self.glpane.gl_update code above (one or the other is wrong) [bruce 050516 guess/comment]
self.assy.o.gl_update() #stub? someday might need to update the MT as well if it's showing animated icons for involved Parts...
def destroy(self):
self.alist = None
pass # end of class MovableAtomList
# ==
class alist_and_moviefile:
"""
Set up and maintain a corresponding MovableAtomList and a MovieFile,
and be able to move the atoms using the moviefile
and know the state of their relationship at all times.
(But let the two subobjects we create do most of the work.)
Assume that we know the current valid frame better than the atoms do...
even if something else moves them (unless it's another copy of the same
movie, which we assume won't happen)... but this will become wrong once
there's an Undo feature!
So then, we'd want to advise the atom-state of this value (keyed to
this object's moviefile-contents), so it'd be a part of the undone state.
I'm not sure if I'll do that, or ignore it for now. ###k
Or I might do *both*, by designating this object as the way the atom's
real owner (their assy) remembers that state! In other words, this
"playable movie" is sitting in the atoms as a "slidable handle"
(metaphorically at least) to let anything adjust their posns using it,
including (example 1) the moviemode dashboard controls
(once it decides which movie object it wants to display and adjust)
or (example 2) various cmenu ops (or even MT-embedded sliders?) on
movie nodes in the MT.
This class might be small enough to use as a Jig for actually being in
the MT..., or it might still be better to let that be a separate object
which represents one of these. #k
"""
_valid = False
def __init__(self, assy, alist, filename, ref_frame = None): #bruce 060112 removed curframe_in_alist, added ref_frame
"""
Caller promises that filename exists. If it matches alist well enough
to use with it, we set self.valid() true and fully init ourselves,
i.e. set up the file/alist relationship and get ready to play
specific frames (i.e. copy posns from file into alist's atoms)
on request.
If file doesn't match alist, we set self.valid() false and return early
(but we might still be usable later if the file changes and some
recheck method (NIM) is called (#e)).
If provided, ref_frame is (frame_number, sim_posn_array) for some
frame of the movie, which we should use as a reference for
interpreting a purely-differential moviefile.
Such moviefiles require that this argument be provided.
[I'm not sure when this is checked -- leaving it out might cause
later exception or (unlikely) wrong positions.]
If the moviefile has its own abs positions, we can ignore this argument
(#e but in future we might decide instead to check it, or to
use it in some other way...).
"""
self.alist = alist # needed for rechecking the match
## self.history = env.history # not yet used, but probably will be used for error messages [bruce 050913 removed this]
self.moviefile = MovieFile( filename)
self.movable_atoms = None
if not self.moviefile:
pass ## MovieFile will have emitted a history message (I hope)
return
self._valid = self.moviefile.matches_alist(alist) # this never emits a history message (for now)
if not self._valid:
# for now, we happen to know exactly why they're not valid... [bruce 050428]
env.history.message( redmsg( "Movie file contents not valid for this Part (wrong number of atoms)."))
self.moviefile.destroy()
self.moviefile = None
return # caller should check self.valid()
self.movable_atoms = MovableAtomList( assy, alist)
## if curframe_in_alist is not None:
## n = curframe_in_alist
## frame_n = self.movable_atoms.get_sim_posns()
## #bruce 060111 replaced get_posns with new get_sim_posns to approximately fix bug 1297;
## # see comment in definition of get_sim_posns for more info.
## self.moviefile.donate_immutable_cached_frame( n, frame_n)
if ref_frame:
n, frame_n = ref_frame
self.moviefile.donate_immutable_cached_frame( n, frame_n)
#bruce 060108 commenting out all sets of self.current_frame to avoid confusion, since nothing uses it;
# but I suggest leaving the commented-out code around until the next major rewrite.
## self.current_frame = n
## # warning [bruce 060108]: related or client code keeps its own version of this,
## # called currentFrame, not necessarily in sync with this.
## else:
## self.current_frame = None # since it's unknown (#k ok for all callers?)
return
def destroy(self):
try:
if self.moviefile:
self.moviefile.destroy()
if self.movable_atoms:
self.movable_atoms.destroy()
self.alist = None
except:
if debug_flags.atom_debug:
print_compact_traceback("atom_debug: exception in alist_and_moviefile.destroy() ignored: ")
return
## why_not_playable = "" #e need to set this to actual reasons when possible
def might_be_playable(self):
return self.valid() # from the last time this was checked -- it's not re-checked now
def valid(self):
return self._valid
def recheck_valid(self):
self._valid = self.moviefile.recheck_matches_alist( self.alist)
#e also check whether atoms are all in same part and not killed? No! We'll play even if these conditions are false.
return self._valid
def snuggle_singlets(self):
self.movable_atoms.snuggle_singlets()
def play_frame(self, n):
"""
Try to set atoms to positions in frame n.
Return true if this works, false if n went beyond either end of moviefile.
(For other errors, print some sort of error message and return false,
or perhaps just raise an exception. #k)
"""
mf = self.moviefile
ma = self.movable_atoms
if mf.frame_index_in_range(n):
frame_n = mf.ref_to_transient_frame_n(n)
ma.set_posns(frame_n) # now we no longer need frame_n
# (note: set_posns did invals but not updates.)
## self.current_frame = n #k might not be needed -- our caller keeps its own version of this (named currentFrame)
return True ###k does caller, or this method, need to update dashboards and glpanes that care?
else:
## self.pause() ###k guess -- since we presumably hit the end... maybe return errcode instead, let caller decide??
return False
pass
def get_totalFramesActual(self):
return self.moviefile.get_totalFramesActual()
def close_file(self):
self.moviefile.close_file()
pass # end of class alist_and_moviefile
# == helper functions
def find_saved_movie( assy, mfile):
"look for a .dpb file of the given name; if found, return a Movie object for it"
movie = Movie(assy)
if movie.represent_this_moviefile( mfile):
# succeeded
return movie
# otherwise it failed but did NOT already emit error messages about that (should it? in future, only it knows why it failed)
return None
def _checkMovieFile(part, filename): #bruce 050913 removed history arg since all callers pass env.history
"""
Returns 0 if filename is (or might be) a valid movie file for the specified part.
Returns 1 if filename does not exist.
Returns 2 if the movie file does not match the part.
Prints error messages to env.history
(whenever return value is not zero).
"""
#bruce 050427 comment: This should be merged with related code in moviefile.py,
# but it looks correct, so I won't do this now. It's now only called from fileOpenMovie.
#bruce 050324 made this a separate function, since it's not about a specific
# Movie instance, just about a Part and a filename. Both args are now required,
# and a new optional arg "history" is both where and whether to print errors
# (both existing calls have been changed to pass it). [bruce 050913 hardcoded that arg to env.history.]
# This function only checks number of atoms, and assumes all atoms of the Part
# must be involved in the movie (in an order known to the Part, not checked,
# though the order can easily be wrong).
# It is not yet updated to handle the "new dpb format" (ie it doesn't get help
# from either file keys or movie ids or atom positions) or movies made from
# a possible future "simulate selection" operation.
print_errors = True
if _DEBUG1:
print "movie._checkMovieFile() function called. filename = ", filename
assert filename #bruce 050324
if not os.path.exists(filename):
if print_errors:
msg = redmsg("Cannot play movie file [" + filename + "]. It does not exist.")
env.history.message(msg)
return 1
#bruce 050411: protect against no part (though better if caller does this); see bug 497.
# Might be better to let part be unspecified and figure out from the moviefile
# which available Part to use, but this is not
# currently possible -- if parts have same contents, it's not even possible in principle
# until we have new DPB format, and not clear how to do it even then (if we only have
# persistent names for files rather than parts).
if part is None:
if debug_flags.atom_debug:
print_compact_stack( "atom_debug: possible bug: part is false (%r) in _checkMovieFile for %s" % (part,filename))
## can't do this, no movie arg!!! self.debug_print_movie_info()
if print_errors:
msg = redmsg("Movie file [" + filename + "] can't be played for current part.") # vaguer & different wording, since bug
env.history.message(msg)
return 2
# start of code that should be moved into moviefile.py and merged with similar code there
filesize = os.path.getsize(filename) - 4
fp = open(filename,'rb')
# Read header (4 bytes) from file containing the number of frames in the movie.
nframes = unpack('i',fp.read(4))[0]
fp.close()
natoms = int(filesize/(nframes*3))
# end of code that should be moved into moviefile.py
kluge_ensure_natoms_correct( part)
if natoms == part.natoms: ## bruce 050324 changed this from natoms == len(self.assy.alist)
return 0
else:
if debug_flags.atom_debug:
print "atom_debug: not natoms == part.natoms, %d %d" % (natoms, part.natoms)
if print_errors:
msg = redmsg("Movie file [" + filename + "] not valid for the current part.")
env.history.message(msg)
msg = redmsg("Movie is for %d frames, size is %d, natoms %d" % (nframes, filesize, natoms))
env.history.message(msg)
msg = redmsg("Current part has %d atoms" % (part.natoms))
env.history.message(msg)
return 2
pass
def kluge_ensure_natoms_correct(part):
###@@@ kluge to work around bug in part.natoms not being invalidated enough
part.natoms = None
del part.natoms # force recompute when next needed
return
# end
| NanoCAD-master | cad/src/simulation/movie.py |
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details.
"""
sim_aspect.py -- represent a "simulatable aspect" (portion) of one Part,
and help with its simulation
@author: Bruce
@version: $Id$
@copyright: 2005-2008 Nanorex, Inc. See LICENSE file for details.
History:
Bruce, probably sometime in 2005, wrote this within runSim.py
Bruce 080321 split this into its own file
"""
from model.chem import AtomDict
from model.elements import Singlet
from utilities.constants import BONDPOINT_LEFT_OUT
from utilities.GlobalPreferences import bondpoint_policy
# has some non-toplevel imports too
# ==
debug_sim_aspect = 0 # DO NOT COMMIT with 1
# renamed from debug_sim when this file split out of runSim.py
# ==
#obs comment:
###@@@ this will be a subclass of SimRun, like Movie will be... no, that's wrong.
# Movie will be subclass of SimResults, or maybe not since those need not be a class
# it's more like an UnderstoodFile and also an UndoableContionuousOperation...
# and it needn't mix with simruns not related to movies.
# So current_movie maybe split from last_simrun? might fix some bugs from aborted simruns...
# for prefs we want last_started_simrun, for movies we want last_opened_movie (only if valid? not sure)...
def atom_is_anchored(atom):
"""
is an atom anchored in space, when simulated?
"""
###e refile as atom method?
#e permit filtering set of specific jigs (instances) that can affect it?
#e really a Part method??
res = False
for jig in atom.jigs:
if jig.anchors_atom(atom): # as of 050321, true only for Anchor jigs
res = True # but continue, so as to debug this new method anchors_atom for all jigs
return res
class sim_aspect:
"""
Class for a "simulatable aspect" (portion, more or less) of a Part.
For now, there's only one kind (a subset of atoms, some fixed in position),
so we won't split out an abstract class for now.
Someday there might be other kinds, e.g. with some chunks treated
as rigid bodies or jigs, with the sim not told about all their atoms.
"""
# Note: as of 051115 this is used for Adjust Selection and/or Adjust All
# and/or possibly Minimize, but not for Run Dynamics [using modern names
# for these features, 080321, but not reverifying current usage];
# verified by debug_sim_aspect output.
# WARNING: this class also assumes internally that those are its only uses,
# by setting mapping.min = True.
def __init__(self, part, atoms,
cmdname_for_messages = "Minimize",
anchor_all_nonmoving_atoms = False
):
#bruce 051129 passing cmdname_for_messages
#bruce 080513 passing anchor_all_nonmoving_atoms
"""
atoms is a list of atoms within the part (e.g. the selected ones,
for Minimize Selection); we copy it in case caller modifies it later.
[Note that this class has no selection object and does not look at
(or change) the "currently selected" state of any atoms,
though some of its comments are worded as if it did.]
We become a simulatable aspect for simulating motion of those atoms
(and of any singlets bonded to them, since user has no way to select
those explicitly),
starting from their current positions, with a "boundary layer" of other
directly bonded atoms (if any) held fixed during the simulation.
[As of 050408 this boundary will be changed from thickness 1 to thickness 2
and its own singlets, if any, will also be anchored rather than moving.
This is because we're approximating letting the entire rest of the Part
be anchored, and the 2nd layer of atoms will constrain bond angles on the
first layer, so leaving it out would be too different from what we're
approximating.]
(If any given atoms have Anchor jigs, those atoms are also treated as
boundary atoms and their own bonds are only explored to an additional depth
of 1 (in terms of bonds) to extend the boundary.
So if the user explicitly selects a complete boundary of Anchored atoms,
only their own directly bonded real atoms will be additionally anchored.)
All atoms not in our list or its 2-thick boundary are ignored --
so much that our atoms might move and overlap them in space.
We look at jigs which attach to our atoms,
but only if we know how to sim them -- we might not, if they also
touch other atoms. For now, we only look at Anchor jigs (as mentioned
above) since this initial implem is only for Minimize. When we have
Simulate Selection, this will need revisiting. [Update: we also look at
other jigs, now that we have Enable In Minimize for motors.]
If we ever need to emit history messages
(e.g. warnings) we'll do it using a global history variable (NIM)
or via part.assy. For now [050406] none are emitted.
"""
if debug_sim_aspect: #bruce 051115 added this
print "making sim_aspect for %d atoms (maybe this only counts real atoms??)" % len(atoms) ###@@@ only counts real atoms??
self.part = part
self.cmdname_for_messages = cmdname_for_messages
# (note: the following atomdicts are only used in this method
# so they don't really need to be attributes of self)
self._moving_atoms = AtomDict()
self._boundary1_atoms = AtomDict()
self._boundary2_atoms = AtomDict()
self._boundary3_atoms = AtomDict()
assert atoms, "no atoms in sim_aspect"
for atom in atoms:
assert atom.molecule.part == part
assert atom.element is not Singlet # when singlets are selectable, this whole thing needs rethinking
if atom_is_anchored(atom):
self._boundary1_atoms[atom.key] = atom
else:
self._moving_atoms[atom.key] = atom
# pretend that all singlets of selected atoms were also selected
# (but were not anchored, even if atom was)
for sing in atom.singNeighbors():
self._moving_atoms[sing.key] = sing
### REVIEW: also include all atoms in the same PAM basepair as atom??
del atoms
if anchor_all_nonmoving_atoms:
#bruce 080513 new feature:
# add all remaining atoms or singlets in part to _boundary1_atoms
# (the other boundary dicts are left empty;
# they are used in later code but this causes no harm)
for mol in part.molecules:
for atom in mol.atoms.itervalues():
if atom.key not in self._moving_atoms:
# no need to check whether it's already in _boundary1_atoms
self._boundary1_atoms[atom.key] = atom
pass
else:
# now find the boundary1 of the _moving_atoms
for moving_atom in self._moving_atoms.values():
for atom2 in moving_atom.realNeighbors():
# (not covering singlets is just an optim, since they're already in _moving_atoms)
# (in fact, it's probably slower than excluding them here! I'll leave it in, for clarity.)
if atom2.key not in self._moving_atoms:
self._boundary1_atoms[atom2.key] = atom2 # might already be there, that's ok
# now find the boundary2 of the _boundary1_atoms;
# treat singlets of boundary1 as ordinary boundary2 atoms (unlike when we found boundary1);
# no need to re-explore moving atoms since we already covered their real and singlet neighbors
for b1atom in self._boundary1_atoms.values():
for atom2 in b1atom.neighbors():
if (atom2.key not in self._moving_atoms) and \
(atom2.key not in self._boundary1_atoms):
self._boundary2_atoms[atom2.key] = atom2 # might be added more than once, that's ok
# now find the boundary3 of the boundary2 atoms
# (not just PAM atoms, since even regular atoms might need this due to torsion terms)
# [finding boundary3 is a bugfix, bruce 080507]
for b2atom in self._boundary2_atoms.values():
for atom3 in b2atom.neighbors():
if (atom3.key not in self._moving_atoms) and \
(atom3.key not in self._boundary1_atoms) and \
(atom3.key not in self._boundary2_atoms):
self._boundary3_atoms[atom3.key] = atom3 # might be added more than once, that's ok
pass
# remove singlets which we don't want to simulate
# [bruce 080507 new feature, not fully implemented or tested]
def fix_dict_for_singlets( dict1):
"""
Remove atoms from dict1 which are bondpoints we want to leave out
of this minimization or simulation.
"""
for atom in dict1.values():
if atom.element is Singlet:
policy = bondpoint_policy(atom, True)
if policy == BONDPOINT_LEFT_OUT:
### todo: keep a count of these, or even a list
del dict1[atom.key]
# BUG: the necessary consequences of doing this
# (e.g. not expecting its coordinates to be present
# in sim results frames or files, for some ways of
# reading them, but updating their positions when
# reading those files anyway, using reposition_bondpoints)
# are NIM as of 080603.
pass
# todo: also record lists of bondpoints to handle later
# in various ways, or a list of atoms whose bondpoints need repositioning
pass
continue
return
fix_dict_for_singlets(self._moving_atoms)
fix_dict_for_singlets(self._boundary1_atoms)
fix_dict_for_singlets(self._boundary2_atoms)
fix_dict_for_singlets(self._boundary3_atoms)
# Finally, come up with a global atom order, and enough info to check our validity later if the Part changes.
# We include all atoms (real and singlet, moving and boundary) in one list, sorted by atom key,
# so later singlet<->H conversion by user wouldn't affect the order.
items = self._moving_atoms.items() + \
self._boundary1_atoms.items() + \
self._boundary2_atoms.items() + \
self._boundary3_atoms.items()
items.sort()
self._atoms_list = [atom for key, atom in items]
# make that a public attribute? nah, use an access method
for i in range(1, len(self._atoms_list)):
assert self._atoms_list[i-1] is not self._atoms_list[i]
# since it's sorted, that proves no atom or singlet appears twice
# anchored_atoms alone (for making boundary jigs each time we write them out)
items = self._boundary1_atoms.items() + self._boundary2_atoms.items() + self._boundary3_atoms.items()
items.sort()
self.anchored_atoms_list = [atom for key, atom in items]
#e validity checking info is NIM, except for the atom lists themselves
return
def atomslist(self):
return list(self._atoms_list)
def natoms_moving(self):
return len(self._atoms_list) - len(self.anchored_atoms_list)
def natoms_fixed(self):
return len(self.anchored_atoms_list)
def nsinglets_H(self):
"""
return number of singlets to be written as H for the sim
"""
singlets = filter( lambda atom: atom.is_singlet(), self._atoms_list )
return len(singlets)
def nsinglets_leftout(self):
"""
return number of singlets to be entirely left out of the sim input file
"""
### @@@ this is currently WRONG for some bondpoint_policy values;
# REVIEW ALL USES [bruce 080507/080603 comment]
# review: should this just be the number that were in _moving_atoms
# (guess yes), or in other dicts too? [bruce 080507 Q]
return 0 # for now
def writemmpfile(self, filename, **mapping_options):
#bruce 050404 (for most details).
# Imitates some of Part.writemmpfile aka files_mmp_writing.writemmpfile_part.
#e refile into files_mmp so the mmp format code is in the same place? maybe just some of it.
# in fact the mmp writing code for atoms and jigs is not in files_mmp anyway! tho the reading code is.
"""
write our data into an mmp file; only include just enough info to run the sim
[###e Should we make this work even if the atoms have moved but not restructured since we were made? I think yes.
That means the validity hash is really made up now, not when we're made.]
"""
## do we need to do a part.assy.update_parts() as a precaution?? if so, have to do it earlier, not now.
from files.mmp.files_mmp_writing import writemmp_mapping
assy = self.part.assy
fp = open(filename, "w")
mapping_options['min'] = True # pass min = True
mapping = writemmp_mapping(assy, **mapping_options)
assert mapping.min
assert mapping.sim
#e rename min option? (for minimize; implies sim as well;
# affects mapping attrnames in chem.py atom.writemmp)
#bruce 051031 comment: it seems wrong that this class assumes min = True
# (rather than being told this in __init__). ###@@@
mapping.set_fp(fp)
# note that this mmp file doesn't need any grouping or chunking info at all.
try:
mapping.write_header() ###e header should differ in this case
## node.writemmp(mapping)
self.write_atoms(mapping)
self.write_anchors(mapping)
self.write_minimize_enabled_jigs(mapping)
mapping.write("end mmp file for %s (%s)\n" % (self.cmdname_for_messages, assy.name) ) #bruce 051129 revised this
# sim & cad both ignore text after 'end'
#bruce 051115: fixed this file comment, since this code is also used for Minimize All.
except:
mapping.close(error = True)
raise
else:
mapping.close()
return
def write_atoms(self, mapping):
for atom in self._atoms_list: # includes both real atoms and singlets, both moving and anchored, all sorted by key
atom.writemmp( mapping) # mapping.sim means don't include any info not relevant to the sim
# Note: this method knows whether & how to write a Singlet as an H (repositioned)!
# Note: this writes bonds, but only after their 2nd atom gets written.
# therefore it will end up only writing bonds for which both atoms get written.
# That should be ok (within Adjust Selection) since atoms with two few bonds
# will be anchored. [bruce 080321 comment]
def write_anchors(self, mapping):
from model.jigs import fake_Anchor_mmp_record
atoms = self.anchored_atoms_list
nfixed = len(atoms)
max_per_jig = 20
for i in range(0, nfixed, max_per_jig): # starting indices of jigs for fixed atoms
indices = range( i, min( i + max_per_jig, nfixed ) )
if debug_sim_aspect:
print "debug_sim_aspect: writing Anchor for these %d indices: %r" % (len(indices), indices)
# now write a fake Anchor which has just the specified atoms
these_atoms = [atoms[i] for i in indices]
line = fake_Anchor_mmp_record( these_atoms, mapping) # includes \n at end
mapping.write(line)
if debug_sim_aspect:
print "debug_sim_aspect: wrote %r" % (line,)
return
def write_minimize_enabled_jigs(self, mapping): # Mark 051006
"""
Writes any jig to the mmp file which has the attr "enable_minimize" = True
"""
assert mapping.min #bruce 051031; detected by writemmp call, below; this scheme is a slight kluge
from model.jigs import Jig
def func_write_jigs(nn):
if isinstance(nn, Jig) and nn.enable_minimize:
#bruce 051031 comment: should we exclude the ones written by write_anchors?? doesn't matter for now. ####@@@@
if debug_sim_aspect:
print "The jig [", nn.name, "] was written to minimize MMP file. It is enabled for minimize."
nn.writemmp(mapping)
return # from func_write_jigs only
self.part.topnode.apply2all( func_write_jigs)
return
pass # end of class sim_aspect
# end
| NanoCAD-master | cad/src/simulation/sim_aspect.py |
# Copyright 2005-2007 Nanorex, Inc. See LICENSE file for details.
"""
SimJob.py - The base class for a simulation job.
(Used only for GAMESS, but unclear whether the code is specific to GAMESS.)
@author: Mark
@version: $Id$
@copyright: 2005-2007 Nanorex, Inc. See LICENSE file for details.
"""
import time
import sys
from PyQt4.Qt import QObject
class SimJob(QObject):
"""
The base class for a simulation job
"""
def __init__(self, name, parms):
"""
"""
QObject.__init__(self)
# The parameters (parms) for the SimJob object are provided in a dictionary in key:value pairs
# For the Gamess Jig, the parms are defined in the jig_Gamess.py.
#
# The parms.keys are:
# engine: Engine (MD Simulator or GAMESS)
# calculation: Calculation
# description: General job description
# status: The status of the job (Queued, Running, Completed, Suspended or Failed)
# job_id: Job Id, provided by JobManager.get_job_manager_job_id_and_dir()
# start_time: Job start time
# end_time: Job end time
self.name = name
self.parms = parms.keys()
#self.parms.sort() # Sort parms.
self.edit_cntl = None
# WARNING: Bugs will be caused if any of SimJob's own methods or
# instance variables had the same name as any of the parameter ('k') values.
for k in parms:
self.__dict__[k] = parms[k]
return
def start_job(self):
"""
Starts the job if it is queued.
"""
self.starttime = time.time()
def stop_job(self):
"""
Stops the job if it is running.
"""
if self.status != 'Running':
return
def suspend_job(self):
"""
Suspends the job if it is running.
"""
if self.status != 'Running':
return
def resume_job(self):
"""
Resumes the job if it is suspended.
"""
if self.status != 'Suspended':
return
def edit_job(self):
pass
def get_comment_character(self):
if sys.platform == 'win32':
return 'REM ' # Batch file comment for Windows
else:
return '# ' # Script file comment for Linux and Mac
def write_parms(self, f):
"""
Write job parms to file f
"""
rem = self.get_comment_character()
f.write (rem + '\n' + rem + 'Job Parameters\n' + rem + '\n')
f.write(rem + "Name: " + self.name + "\n")
for k in self.parms:
phrase = rem + k + ': ' + str(self.__dict__[k])
f.write (phrase + '\n')
f.write (rem+'\n')
return
# TODO: probably we also want to define launch, for subclass to implement,
# and maybe pass it something... see comments in our existing subclass,
# GamessJob.
#
# def launch(self):
# pass
#
# [bruce 071216 comment]
pass # end of class SimJob
# end
| NanoCAD-master | cad/src/simulation/SimJob.py |
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details.
"""
sim_commandruns.py -- user-visible commands for running the simulator,
for simulate or minimize (aka Run Dynamics, Minimize, Adjust --
but I'm not sure it's used for all of those)
@author: Bruce
@version: $Id$
@copyright: 2005-2008 Nanorex, Inc. See LICENSE file for details.
History:
Bruce 050324 (and earlier) wrote this (in part by heavily rewriting
existing code) within runSim.py
Bruce 080321 split this into its own file
"""
from utilities.debug import print_compact_traceback
from platform_dependent.PlatformDependent import fix_plurals
from platform_dependent.PlatformDependent import hhmmss_str
from simulation.SimSetup import SimSetup
from simulation.movie import Movie
from utilities.Log import redmsg, greenmsg, orangemsg
import foundation.env as env
from utilities.prefs_constants import Adjust_minimizationEngine_prefs_key
from utilities.prefs_constants import MINIMIZE_ENGINE_UNSPECIFIED
from utilities.prefs_constants import MINIMIZE_ENGINE_GROMACS_FOREGROUND
from utilities.prefs_constants import MINIMIZE_ENGINE_GROMACS_BACKGROUND
# possibly some non-toplevel imports too (of which a few must remain non-toplevel)
from simulation.runSim import FAILURE_ALREADY_DOCUMENTED
from simulation.runSim import writemovie
# these next two are only used in this file; should be split into their own file(s)
from simulation.runSim import readxyz
from simulation.runSim import readGromacsCoordinates
from simulation.sim_aspect import sim_aspect
# ==
class CommandRun: # bruce 050324; mainly a stub for future use when we have a CLI; only used in this file as of 080321
"""
Class for single runs of commands.
Commands themselves (as opposed to single runs of them)
don't yet have objects to represent them in a first-class way,
but can be coded and invoked as subclasses of CommandRun.
"""
def __init__(self, win, *args, **kws):
self.win = win
self.args = args # often not needed; might affect type of command (e.g. for Minimize)
self.kws = kws # ditto; as of 060705, this contains 'type' for Minimize_CommandRun, for basic command name in the UI
self.assy = win.assy
self.part = win.assy.part
# current Part (when the command is invoked), on which most commands will operate
self.glpane = win.assy.o #e or let it be accessed via part??
return
# end of class CommandRun
class simSetup_CommandRun(CommandRun):
"""
Class for single runs of the simulator setup command; create it
when the command is invoked, to prep to run the command once;
then call self.run() to actually run it.
"""
cmdname = 'Simulator' #bruce 060106 temporary hack, should be set by subclass ###@@@
def run(self):
#bruce 050324 made this method from the body of MWsemantics.simSetup
# and cleaned it up a bit in terms of how it finds the movie to use.
if not self.part.molecules: # Nothing in the part to simulate.
msg = redmsg("Nothing to simulate.")
env.history.message(self.cmdname + ": " + msg)
self.win.simSetupAction.setChecked(0) # toggle the Simulator icon ninad061113
return
env.history.message(self.cmdname + ": " + "Enter simulation parameters and select <b>Run Simulation.</b>")
###@@@ we could permit this in movie player mode if we'd now tell that mode to stop any movie it's now playing
# iff it's the current mode.
previous_movie = self.assy.current_movie
# might be None; will be used only to restore self.assy.current_movie if we don't make a valid new one
self.movie = None
r = self.makeSimMovie( ) # will store self.movie as the one it made, or leave it as None if cancelled
movie = self.movie
self.assy.current_movie = movie or previous_movie
# (this restores assy.current_movie if there was an error in making new movie, though perhaps nothing changed it anyway)
if not r: # Movie file saved successfully; movie is a newly made Movie object just for the new file
assert movie
# if duration took at least 10 seconds, print msg.
## self.progressbar = self.win.progressbar ###k needed???
## duration = self.progressbar.duration [bruce 060103 zapped this kluge]
try:
duration = movie.duration #bruce 060103
except:
# this might happen if earlier exceptions prevented us storing one, so nevermind it for now
duration = 0.0
if duration >= 10.0:
spf = "%.2f" % (duration / movie.totalFramesRequested)
###e bug in this if too few frames were written; should read and use totalFramesActual
estr = hhmmss_str(duration)
msg = "Total time to create movie file: " + estr + ", Seconds/frame = " + spf
env.history.message(self.cmdname + ": " + msg)
msg = "Movie written to [" + movie.filename + "]." \
"<br>To play the movie, select <b>Simulation > Play Movie</b>"
env.history.message(self.cmdname + ": " + msg)
self.win.simSetupAction.setChecked(0)
self.win.simMoviePlayerAction.setEnabled(1) # Enable "Movie Player"
self.win.simPlotToolAction.setEnabled(1) # Enable "Plot Tool"
#bruce 050324 question: why are these enabled here and not in the subr or even if it's cancelled? bug? ####@@@@
else:
assert not movie
# Don't allow uninformative messages to obscure informative ones - wware 060314
if r == FAILURE_ALREADY_DOCUMENTED:
env.history.message(self.cmdname + ": " + "Cancelled.")
# (happens for any error; more specific message (if any) printed earlier)
return
def makeSimMovie(self): ####@@@@ some of this should be a Movie method since it uses attrs of Movie...
#bruce 050324 made this from the Part method makeSimMovie.
# It's called only from self.run() above; not clear it should be a separate method,
# or if it is, that it's split from the caller at the right boundary.
suffix = self.part.movie_suffix()
if suffix is None: #bruce 050316 temporary kluge
msg = redmsg( "Simulator is not yet implemented for clipboard items.")
env.history.message(self.cmdname + ": " + msg)
return -1
###@@@ else use suffix below!
self.simcntl = SimSetup(self.win, self.part, suffix = suffix)
# this now has its own sticky params, doesn't need previous_movie [bruce 060601, fixing bug 1840]
# Open SimSetup dialog [and run it until user dismisses it]
movie = self.simcntl.movie # always a Movie object, even if user cancelled the dialog
if movie.cancelled:
# user hit Cancel button in SimSetup Dialog. No history msg went out; caller will do that.
movie.destroy()
return -1
r = writemovie(self.part, movie, print_sim_warnings = True, cmdname = self.cmdname)
# not passing mtype means "run dynamic sim (not minimize), make movie"
###@@@ bruce 050324 comment: maybe should do following in that function too
if not r:
# Movie file created. Initialize. ###@@@ bruce 050325 comment: following mods private attrs, needs cleanup.
movie.IsValid = True # Movie is valid.###@@@ bruce 050325 Q: what exactly does this (or should this) mean?
###@@@ bruce 050404: need to make sure this is a new obj-- if not always and this is not init False, will cause bugs
self.movie = movie # bruce 050324 added this
# it's up to caller to store self.movie in self.assy.current_movie if it wants to.
return r
pass # end of class simSetup_CommandRun
def _capitalize_first_word(words): #bruce 060705
res = words[0].upper() + words[1:]
if res == words:
if env.debug():
print "debug warning: %r did not change in _capitalize_first_word" % (words,)
return res
_MIN_ALL, _LOCAL_MIN, _MIN_SEL = range(3) # internal codes for minimize command subtypes (bruce 051129)
# this is a kluge compared to using command-specific subclasses, but better than testing something else like cmdname
class Minimize_CommandRun(CommandRun):
"""
Class for single runs of the commands Minimize Selection, Minimize All,
Adjust Selection, or Adjust All (which one is determined by one or more
__init__ args, stored in self.args by superclass);
client code should create an instance when the command is invoked, to
prepare to run the command once; then call self.run() to actually run it.
[#e A future code cleanup might split this into a Minimize superclass
and separate subclasses for 'All' vs 'Sel' -- or it might not.]
"""
def run(self):
"""
Minimize (or Adjust) the Selection or the current Part
"""
#bruce 050324 made this method from the body of MWsemantics.modifyMinimize
# and cleaned it up a bit in terms of how it finds the movie to use.
#bruce 050412 added 'Sel' vs 'All' now that we have two different Minimize buttons.
# In future the following code might become subclass-specific (and cleaner):
## fyi: this old code was incorrect, I guess since 'in' works by 'is' rather than '==' [not verified]:
## assert self.args in [['All'], ['Sel']], "%r" % (self.args,)
#bruce 051129 revising this to clarify it, though command-specific subclasses would be better
assert len(self.args) >= 1
cmd_subclass_code = self.args[0]
cmd_type = self.kws.get('type', 'Minimize')
# one of 'Minimize' or 'Adjust' or 'Adjust Atoms'; determines conv criteria, name [bruce 060705]
self.cmd_type = cmd_type # kluge, see comment where used
engine = self.kws.get('engine', MINIMIZE_ENGINE_UNSPECIFIED)
if (engine == MINIMIZE_ENGINE_UNSPECIFIED):
engine = env.prefs[Adjust_minimizationEngine_prefs_key]
if (engine == MINIMIZE_ENGINE_GROMACS_FOREGROUND):
self.useGromacs = True
self.background = False
elif (engine == MINIMIZE_ENGINE_GROMACS_BACKGROUND):
self.useGromacs = True
self.background = True
else:
self.useGromacs = False
self.background = False
assert cmd_subclass_code in ['All', 'Sel', 'Atoms'] #e and len(args) matches that?
# These words and phrases are used in history messages and other UI text;
# they should be changed by specific commands as needed.
# See also some computed words and phrases, e.g. self.word_Minimize,
# below the per-command if stamements. [bruce 060705]
# Also set flags for other behavior which differs between these commands.
if cmd_type.startswith('Adjust'):
self.word_minimize = "adjust"
self.word_minimization = "adjustment"
self.word_minimizing = "adjusting"
anchor_all_nonmoving_atoms = False
pass
else:
assert cmd_type.startswith('Minimize')
self.word_minimize = "minimize"
self.word_minimization = "minimization"
self.word_minimizing = "minimizing"
anchor_all_nonmoving_atoms = True
#bruce 080513 revision to implement nfr bug 2848 item 2
# (note: we might decide to add a checkbox for this into the UI,
# and just change its default value for Minimize vs Adjust)
pass
self.word_Minimize = _capitalize_first_word( self.word_minimize)
self.word_Minimizing = _capitalize_first_word( self.word_minimizing)
if cmd_subclass_code == 'All':
cmdtype = _MIN_ALL
cmdname = "%s All" % self.word_Minimize
elif cmd_subclass_code == 'Sel':
cmdtype = _MIN_SEL
cmdname = "%s Selection" % self.word_Minimize
elif cmd_subclass_code == 'Atoms':
#bruce 051129 added this case for Local Minimize (extending a kluge -- needs rewrite to use command-specific subclass)
cmdtype = _LOCAL_MIN
cmdname = "%s Atoms" % self.word_Minimize #bruce 060705; some code may assume this is always Adjust Atoms, as it is
# self.args is parsed later
else:
assert 0, "unknown cmd_subclass_code %r" % (cmd_subclass_code,)
self.cmdname = cmdname #e in principle this should come from a subclass for the specific command [bruce 051129 comment]
startmsg = cmdname + ": ..."
del cmd_subclass_code
# remove model objects inserted only for feedback from prior runs
# (both because it's a good feature, and to avoid letting them
# mess up this command) [bruce 080520]
from simulation.runSim import part_contains_pam_atoms
# kluge to use this function for this purpose
# (it's called later for other reasons)
hasPAM_junk = part_contains_pam_atoms( self.part,
kill_leftover_sim_feedback_atoms = True )
self.part.assy.update_parts() ###k is this always safe or good?
# Make sure some chunks are in the part.
# (Valid for all cmdtypes -- Minimize only moves atoms, even if affected by jigs.)
if not self.part.molecules: # Nothing in the part to minimize.
env.history.message(greenmsg(cmdname + ": ") + redmsg("Nothing to %s." % self.word_minimize))
return
if cmdtype == _MIN_SEL:
selection = self.part.selection_from_glpane() # compact rep of the currently selected subset of the Part's stuff
if not selection.nonempty():
msg = greenmsg(cmdname + ": ") + redmsg("Nothing selected.") + \
" (Use %s All to %s the entire Part.)" % (self.word_Minimize, self.word_minimize)
#e might need further changes for Minimize Energy, if it's confusing that Sel/All is a dialog setting then
env.history.message( msg)
return
elif cmdtype == _LOCAL_MIN:
from operations.ops_select import selection_from_atomlist
junk, atomlist, ntimes_expand = self.args
selection = selection_from_atomlist( self.part, atomlist) #e in cleaned up code, selection object might come from outside
selection.expand_atomset(ntimes = ntimes_expand) # ok if ntimes == 0
# Rationale for adding monovalent atoms to the selection before
# instantiating the sim_aspect
#
# (Refer to comments for sim_aspect.__init__.) Why is it safe to add
# monovalent atoms to a selection? Let's look at what happens during a
# local minimization.
#
# While minimizing, we want to simulate as if the entire rest of the
# part is grounded, and only our selection of atoms is free to move. The
# most obvious approach would be to minimize all the atoms in the part
# while applying anchors to the atoms that aren't in the selection. But
# minimizing all the atoms, especially if the selection is small, is very
# wasteful. Applying the simulator to atoms is expensive and we want to
# minimize as few atoms as possible.
#
# [revision, bruce 080513: this discussion applies for Adjust,
# but the policy for Minimize is being changed to always include
# all atoms, even if most of them are anchored,
# re nfr bug 2848 item 2.]
#
# A more economical approach is to anchor the atoms for two layers going
# out from the selection. The reason for going out two layers, and not just
# one layer, is that we need bond angle terms to simulate accurately. When
# we get torsion angles we will probably want to bump this up to three
# layers. [Now we're doing three layers -- bruce 080507]
#
# Imagine labeling all the atoms in the selection with zero. Then take the
# set of unlabeled atoms that are bonded to a zero-labeled atom, and label
# all the atoms in that set with one. Next, take the set of yet-unlabeled
# atoms that are bonded to a one-labeled atom, and label the atoms in that
# set with two. The atoms labeled one and two become our first and second
# layers, and we anchor them during the minimization.
#
# In sim_aspect.__init__, the labels for zero, one and two correspond
# respectively to membership in the dictionaries self._moving_atoms,
# self._boundary1_atoms, and self._boundary2_atoms.
#
# If an atom in the selection is anchored, we don't need to go two layers
# out from that atom, only one layer. So we can label it with one, even
# though it's a member of the selection and would normally be labeled with
# zero. The purpose in doing this is to give the simulator a few less atoms
# to worry about.
#
# If a jig includes one of the selected atoms, but additionally includes
# atoms outside the selection, then it may not be obvious how to simulate
# that jig. For the present, the only jig that counts in a local
# minimization is an anchor, because all the other jigs are too complicated
# to simulate.
#
# The proposed fix here has the effect that monovalent atoms bonded to
# zero-labeled atoms are also labeled zero, rather than being labeled one,
# so they are allowed to move. Why is this OK to do?
#
# (1) Have we violated the assumption that the rest of the part is locked
# down? Yes, as it applies to those monovalent atoms, but they are
# presumably acceptable violations, since bug 1240 is regarded as a bug.
#
# (2) Have we unlocked any bond lengths or bond angles that should remain
# locked? Again, only those which involve (and necessarily end at) the
# monovalent atoms in question. The same will be true when we introduce
# torsion terms.
#
# (3) Have we lost any ground on the jig front? If a jig includes one or
# more of the monovalent atoms, possibly - but the only jigs we are
# simulating in this case is anchors, and those will be handled correctly.
# Remember that anchored atoms are only extended one layer, not two, but
# with a monovalent atom bonded to a selected atom, no extension is
# possible at all.
#
# One can debate about whether bug 1240 should be regarded as a bug. But
# having accepted it as a bug, one cannot object to adding these monovalents
# to the original selection.
#
# wware 060410 bug 1240
atoms = selection.selatoms
for atom in atoms.values():
# enumerate the monovalents bonded to atom
for atom2 in filter(lambda atom: not atom.is_singlet(), atom.baggageNeighbors()):
atoms[atom2.key] = atom2
else:
assert cmdtype == _MIN_ALL
selection = self.part.selection_for_all()
# like .selection_from_glpane() but for all atoms presently in the part [bruce 050419]
# no need to check emptiness, this was done above
self.selection = selection #e might become a feature of all CommandRuns, at some point
# At this point, the conditions are met to try to do the command.
env.history.message(greenmsg( startmsg)) #bruce 050412 doing this earlier
# Disable some QActions (menu items/toolbar buttons) during minimize.
self.win.disable_QActions_for_sim(True)
try:
simaspect = sim_aspect( self.part,
selection.atomslist(),
cmdname_for_messages = cmdname,
anchor_all_nonmoving_atoms = anchor_all_nonmoving_atoms
)
#bruce 051129 passing cmdname
# note: atomslist gets atoms from selected chunks, not only selected atoms
# (i.e. it gets atoms whether you're in Select Atoms or Select Chunks mode)
# history message about singlets written as H (if any);
#bruce 051115 updated comment: this is used for both Minimize All and Minimize Selection as of long before 051115;
# for Run Sim this code is not used (so this history message doesn't go out for it, though it ought to)
# but the bug254 X->H fix is done (though different code sets the mapping flag that makes it happen).
nsinglets_H = simaspect.nsinglets_H()
if nsinglets_H: #bruce 051209 this message code is approximately duplicated elsewhere in this file
info = fix_plurals( "(Treating %d bondpoint(s) as Hydrogens, during %s)" % (nsinglets_H, self.word_minimization) )
env.history.message( info)
nsinglets_leftout = simaspect.nsinglets_leftout()
assert nsinglets_leftout == 0 # for now
# history message about how much we're working on; these atomcounts include singlets since they're written as H
nmoving = simaspect.natoms_moving()
nfixed = simaspect.natoms_fixed()
info = fix_plurals( "(%s %d atom(s)" % (self.word_Minimizing, nmoving))
if nfixed:
them_or_it = (nmoving == 1) and "it" or "them"
if anchor_all_nonmoving_atoms:
msg2 = "holding remaining %d atom(s) fixed" % nfixed
else:
msg2 = "holding %d atom(s) fixed around %s" % (nfixed, them_or_it)
info += ", " + fix_plurals(msg2 )
info += ")"
env.history.message( info)
self.doMinimize(mtype = 1, simaspect = simaspect)
# mtype = 1 means single-frame XYZ file.
# [this also sticks results back into the part]
#self.doMinimize(mtype = 2) # 2 = multi-frame DPB file.
finally:
self.win.disable_QActions_for_sim(False)
simrun = self._movie._simrun #bruce 050415 klugetower
if not simrun.said_we_are_done:
env.history.message("Done.")
return
def doMinimize(self, mtype = 1, simaspect = None):
#bruce 051115 renamed method from makeMinMovie
#bruce 051115 revised docstring to fit current code #e should clean it up more
"""
Minimize self.part (if simaspect is None -- no longer used)
or its given simaspect (simulatable aspect) (used for both Minimize Selection and Minimize All),
generating and showing a movie (no longer asked for) or generating and applying to part an xyz file.
The mtype flag means:
1 = tell writemovie() to create a single-frame XYZ file.
2 = tell writemovie() to create a multi-frame DPB moviefile.
[###@@@ not presently used, might not work anymore]
"""
assert mtype == 1 #bruce 051115
assert simaspect is not None #bruce 051115
#bruce 050324 made this from the Part method makeMinMovie.
suffix = self.part.movie_suffix()
if suffix is None: #bruce 050316 temporary kluge; as of circa 050326 this is not used anymore
msg = "%s is not yet implemented for clipboard items." % self.word_Minimize
env.history.message( redmsg( msg))
return
#e use suffix below? maybe no need since it's ok if the same filename is reused for this.
# bruce 050325 change: don't use or modify self.assy.current_movie,
# since we're not making a movie and don't want to prevent replaying
# the one already stored from some sim run.
# [this is for mtype == 1 (always true now) and might affect writemovie ###@@@ #k.]
# NOTE: the movie object is used to hold params and results from minimize,
# even if it makes an xyz file rather than a movie file.
# And at the moment it never makes a movie file when called from this code.
# [bruce 051115 comment about months-old situation]
movie = Movie(self.assy)
# do this in writemovie? no, the other call of it needs it passed in
# from the dialog... #k
# note that Movie class is misnamed since it's really a
# SimRunnerAndResultsUser... which might use .xyz or .dpb results...
# maybe rename it SimRun? ###e also, it needs subclasses for the
# different kinds of sim runs and their results... or maybe it needs
# a subobject which has such subclasses -- not yet sure. [bruce 050329]
self._movie = movie
#bruce 050415 kluge; note that class SimRun does the same thing.
# Probably it means that this class, SimRun, and this way of using
# class Movie should all be the same, or at least have more links
# than they do now. ###@@@
# Set update_cond for controlling realtime update settings for watching
# this "movie" (an ongoing sim). There are three possible ways
# (soon after A8 only the first one will be used) [bruce 060705]:
# - caller specified it.
# - if it didn't, use new common code to get it from General Prefs page.
# - if that fails, use older code for that.
#
# WARNING: it turns out this happens whether or not the checkbox pref
# says it should -- that is checked separately elsewhere! That's a bug,
# since we need to use a different checkbox depending on the command.
# let's see if we can consolidate the "enabling flag" into
# update_cond itself? so it is None or False if we won't update.
# this is now attempted...
if env.debug():
print "debug fyi: runSim/sim_commandruns watch_motion update_cond computed here " \
"(even if not watching motion)" #bruce 060705
try:
# Only the client code knows where to find the correct realtime
# update settings widgets (or someday, knows whether these values
# come from widgets at all, vs from a script).
# It should figure out the update_cond
# (False if we should not watch motion),
# and tell us in self.kws['update_cond'].
update_cond = self.kws['update_cond']
assert update_cond or (update_cond is False) # a callable or False [remove when works]
# WARNING: as of 080321, this apparently fails routinely
# for Adjust All, and then the first fallback in the
# except clause also fails (userPrefs.watch_motion_buttongroup
# attributeerror), and then its fallback finally works.
# Cleanup is severely needed. [bruce 080321 comment]
except:
## print_compact_traceback("bug ...: ")
if env.debug():
print "debug: fyi: sim_commandruns grabbing userPrefs data"
# For A8, this is normal, since only (at most) Minimize Energy sets self.kws['update_cond'] itself.
# This will be used routinely in A8 by Adjust All and Adjust Selection, and maybe Adjust Atoms (not sure).
#
# Just get the values from the "Adjust" prefs page.
# But at least try to do that using new common code.
try:
from widgets.widget_controllers import realtime_update_controller
userPrefs = env.mainwindow().userPrefs
from utilities.prefs_constants import Adjust_watchRealtimeMinimization_prefs_key
###@@@ should depend on command, or be in movie...
ruc = realtime_update_controller(
( userPrefs.watch_motion_buttongroup,
# Note: watch_motion_buttongroup exists in MinimizeEnergyProp.py
# and in SimSetup.py and now its back in Preferences.py,
# so this is no longer a bug (for "Adjust All"). [mark 2008-06-04]
userPrefs.update_number_spinbox,
userPrefs.update_units_combobox ),
None, # checkbox ###@@@ maybe not needed, since UserPrefs sets up the connection #k
Adjust_watchRealtimeMinimization_prefs_key )
update_cond = ruc.get_update_cond_from_widgets()
# note, if those widgets are connected to env.prefs, that's not handled here or in ruc;
# I'm not sure if they are. Ideally we'd tell ruc the prefs_keys and have it handle that too,
# perhaps making it a long-lived object (though that might not be necessary).
assert update_cond or (update_cond is False) # a callable or False
except:
# even that didn't work. Complain, then fall back to otherwise-obsolete old code.
msg = "bug using realtime_update_controller in sim_commandruns, will use older code instead: "
print_compact_traceback(msg)
# This code works (except for always using the widgets from the General Prefs page,
# even for Minimize Energy), but I'll try to replace it with calls to common code.
# [bruce 060705]
# This code for setting update_cond is duplicated (inexactly)
# in SimSetup.createMoviePressed() in SimSetup.py.
userPrefs = env.mainwindow().userPrefs
update_units = userPrefs.update_units_combobox.currentText()
update_number = userPrefs.update_number_spinbox.value()
if userPrefs.update_asap_rbtn.isChecked():
update_cond = ( lambda simtime, pytime, nframes:
simtime >= max(0.05, min(pytime * 4, 2.0)) )
elif update_units == 'frames':
update_cond = ( lambda simtime, pytime, nframes, _nframes = update_number: nframes >= _nframes )
elif update_units == 'seconds':
update_cond = ( lambda simtime, pytime, nframes, _timelimit = update_number: simtime + pytime >= _timelimit )
elif update_units == 'minutes':
update_cond = ( lambda simtime, pytime, nframes, _timelimit = update_number * 60: simtime + pytime >= _timelimit )
elif update_units == 'hours':
update_cond = ( lambda simtime, pytime, nframes, _timelimit = update_number * 3600: simtime + pytime >= _timelimit )
else:
print "don't know how to set update_cond from (%r, %r)" % (update_number, update_units)
update_cond = None
# new as of 060705, in this old code
if not env.prefs[Adjust_watchRealtimeMinimization_prefs_key]:
update_cond = False
pass
# now do this with update_cond, however it was computed
movie.update_cond = update_cond
# semi-obs comment, might still be useful [as of 050406]:
# Minimize Selection [bruce 050330] (ought to be a distinct
# command subclass...) this will use the spawning code in writemovie
# but has its own way of writing the mmp file.
# To make this clean, we need to turn writemovie into more than one
# method of a class with more than one subclass, so we can override
# one of them (writing mmp file) and another one (finding atom list).
# But to get it working I might just kluge it
# by passing it some specialized options... ###@@@ not sure
movie._cmdname = self.cmdname
#bruce 050415 kluge so writemovie knows proper progress bar caption to use
# (not really wrong -- appropriate for only one of several
# classes Movie should be split into, i.e. one for the way we're using it here,
# to know how to run the sim, which is perhaps really self (a SimRunner),
# once the code is fully cleaned up.
# [review: is that the same SimRunner which is by 080321
# a real class in runSim?]
# write input for sim, and run sim
# this also sets movie.alist from simaspect
r = writemovie(self.part,
movie,
mtype,
simaspect = simaspect,
print_sim_warnings = True,
cmdname = self.cmdname,
cmd_type = self.cmd_type,
useGromacs = self.useGromacs,
background = self.background)
if r:
# We had a problem writing the minimize file.
# Simply return (error message already emitted by writemovie). ###k
return
if mtype == 1: # Load single-frame XYZ file.
if (self.useGromacs):
if (self.background):
return
tracefileProcessor = movie._simrun.tracefileProcessor
newPositions = readGromacsCoordinates(movie.filename + "-out.gro", movie.alist, tracefileProcessor)
else:
newPositions = readxyz( movie.filename, movie.alist )
# movie.alist is now created in writemovie [bruce 050325]
# retval is either a list of atom posns or an error message string.
assert type(newPositions) in [type([]),type("")]
if type(newPositions) == type([]):
#bruce 060102 note: following code is approximately duplicated somewhere else in this file.
movie.moveAtoms(newPositions)
# bruce 050311 hand-merged mark's 1-line bugfix in assembly.py (rev 1.135):
self.part.changed() # Mark - bugfix 386
self.part.gl_update()
else:
#bruce 050404: print error message to history
env.history.message(redmsg( newPositions))
else: # Play multi-frame DPB movie file.
###@@@ bruce 050324 comment: can this still happen? [no] is it correct [probably not]
# (what about changing mode to movieMode, does it ever do that?) [don't know]
# I have not reviewed this and it's obviously not cleaned up (since it modifies private movie attrs).
# But I will have this change the current movie, which would be correct in theory, i think, and might be needed
# before trying to play it (or might be a side effect of playing it, this is not reviewed either).
###e bruce 050428 comment: if self.assy.current_movie exists, should do something like close or destroy it... need to review
self.assy.current_movie = movie
# If cueMovie() returns a non-zero value, something went wrong loading the movie.
if movie.cueMovie():
return
movie._play()
movie._close()
return
pass # end of class Minimize_CommandRun
# ==
class CheckAtomTypes_CommandRun(CommandRun):
def run(self):
if not self.part.molecules:
return
for chunk in self.part.molecules:
if (chunk.atoms):
for atom in chunk.atoms.itervalues():
atom.setOverlayText("?")
chunk.showOverlayText = True
selection = self.part.selection_for_all()
simaspect = sim_aspect( self.part,
selection.atomslist(),
cmdname_for_messages = "CheckAtomTypes",
anchor_all_nonmoving_atoms = False
)
movie = Movie(self.assy)
#self._movie = movie
writemovie(self.part,
movie,
1,
simaspect = simaspect,
print_sim_warnings = True,
cmdname = "Simulator",
cmd_type = "Check AtomTypes",
useGromacs = False,
background = False,
useAMBER = True,
typeFeedback = True)
self.part.gl_update()
def LocalMinimize_function( atomlist, nlayers ): #bruce 051207
win = atomlist[0].molecule.part.assy.w # kluge!
#e should probably add in monovalent real atom neighbors -- but before finding neighbors layers, or after?
# (note that local min will always include singlets... we're just telling it to also treat attached H the same way.
# that would suggest doing it after, as an option to Minimize. Hmm, should even Min Sel do it? Discuss.)
cmdrun = Minimize_CommandRun( win, 'Atoms', atomlist, nlayers, type = 'Adjust Atoms')
cmdrun.run()
return
# == helper code for Minimize Selection [by bruce, circa 050406] [also used for Minimize All, probably as of 050419, as guessed 051115]
def adjustSinglet(singlet, minimize = False): # Mark 2007-10-21.
"""
Adjusts I{singlet} using one of two methods based on I{minimize}:
1. Hydrogenate the singlet, then transmute it back to a singlet
(default). Singlet positions are much better after this, but
they are not in their optimal location.
2. Hydrogenate the singlet, then call the simulator via the
L{LocalMinimize_Function} to adjust (minimize) the hydrogen atom, then
tranmute the hydrogen back to a singlet. Singlet positions are best
after using this method, but it has one major drawback -- it
redraws while minimizing. This is a minor problem when breaking
strands, but is intolerable in the DNA duplex generator (which adjusts
open bond singlets in its postProcess method.
@param singlet: A singlet.
@type singlet: L{Atom}
@param minimize: If True, use the minimizer to adjust the singlet
(i.e. method #2).
@type minimize: bool
@note: Real atoms are not adjusted.
@see: L{Hydrogenate} for details about how we are using it to
reposition singlets (via method 1 mentioned above).
"""
if not singlet.is_singlet():
return
singlet.Hydrogenate()
if minimize:
msg = "ATTENTION: Using minimizer to adjust open bond singlets."
env.history.message( orangemsg(msg) )
# Singlet is repositioned properly using minimize.
# The problem is that this redraws while running. Don't want that!
# Talk to Bruce and Eric M. about it. Mark 2007-10-21.
LocalMinimize_function( [singlet], nlayers = 0 )
# Transmute() will not transmute singlets. Since <singlet> is a Hydrogen,
# and not a singlet, this will work. -mark 2007-10-31 (Boo!)
from model.elements import Singlet
singlet.Transmute(Singlet)
return
# end
| NanoCAD-master | cad/src/simulation/sim_commandruns.py |
# Copyright 2007-2008 Nanorex, Inc. See LICENSE file for details.
"""
YukawaPotential.py
Create an .xvg file suitable for passing as the argument to -table for
mdrun.
@author: Eric M
@version: $Id$
@copyright: 2007-2008 Nanorex, Inc. See LICENSE file for details.
The table consists of 7 columns:
r f f'' g g'' h h''
In other words, three functions evaluated at evenly spaced r values,
and their second derivatives. For the standard Coulomb and
Lennard-Jones potentials, the functions are:
f(r) = 1/r
g(r) = -1/r^6
h(r) = 1/r^12
This table is read by the GROMACS mdrun process when user specified
potential functions are used. We use this for the non-bonded
interactions between PAM5 model DNA helices.
"""
# DELTA_R should be 0.002 for a gromacs compiled to use single
# precision floats, and 0.0005 for doubles.
DELTA_R = 0.0005
# values from Physics, Halliday and Resnick, Third Edition, 1978
ELECTRON_CHARGE = 1.6021892e-19 # Coulombs (A s)
VACUUM_PERMITTIVITY = 8.854187818e-12 # Farads/meter (A^2 s^4 kg^-1 m^-3)
BOLTZMANN = 1.380662e-23 # Joules/Kelvin (kg m^2 s^-2 K^-1)
AVOGADRO = 6.022045e23 # particles/mol
PHOSPHATE_DIAMETER = 0.4 # nm
import math
class YukawaPotential(object):
def __init__(self, simulatorParameters):
self.rCutoff = simulatorParameters.getYukawaRCutoff()
self.rSwitch = simulatorParameters.getYukawaRSwitch()
self.shift = simulatorParameters.getYukawaShift()
self.charge = simulatorParameters.getYukawaCounterionCharge()
self.molarity = simulatorParameters.getYukawaCounterionMolarity()
self.temperature = simulatorParameters.getYukawaTemperatureKelvin()
self.dielectric = simulatorParameters.getYukawaDielectric()
self.fudge = simulatorParameters.getYukawaConstantMultiple()
# Bjerrum length (nm)
L_B = 1e9 * ELECTRON_CHARGE * ELECTRON_CHARGE / (4.0 * math.pi *
VACUUM_PERMITTIVITY *
self.dielectric *
BOLTZMANN *
self.temperature)
# (A^2 s^2) (A^-2 s^-4 kg m^3) (kg^-1 m^-2 s^2 K) K^-1 = m
# about 0.7nm for default conditions
# Debye length (nm)
Lambda_D = 1.0 / math.sqrt(4.0 * math.pi * L_B * self.molarity *
self.charge * self.charge)
# 1/sqrt(nm / litre) --> nm
# about 1.2nm for default conditions
mol_K_per_kJ = 1000 / (AVOGADRO * BOLTZMANN)
t1 = 1.0 + PHOSPHATE_DIAMETER / (2.0 * Lambda_D)
E_p_p = self.temperature * L_B / \
(mol_K_per_kJ * PHOSPHATE_DIAMETER * t1 * t1)
self.YA = E_p_p * PHOSPHATE_DIAMETER * self.fudge
self.YB = PHOSPHATE_DIAMETER
self.YC = Lambda_D
#print "L_B = %e, Lambda_D = %e" % (L_B, Lambda_D)
#print "t1 = %e, E_p_p = %e" % (t1, E_p_p)
#print "mol_K_per_kJ = %e" % mol_K_per_kJ
#print "YA = %e, YB = %e. YC = %e" % (self.YA, self.YB, self.YC)
self.YShift = 0.0
if (self.shift):
self.YShift = self.yukawa(self.rCutoff)
# Switching function to smoothly reduce a function to zero.
# Transition begins when r == start, below which the value is 1. The
# value smoothly changes to 0 by the time r == end, after which it
# remains 0.
#
# Potential functions are multiplied by the switching function S:
#
# (r/start - 1)^4
# S = (1 - ----------------- ) ^4
# (end/start - 1)^4
#
# in the range start < r < end.
self.switch_len = self.rCutoff - self.rSwitch
self.switch_len_4 = self.switch_len * self.switch_len * self.switch_len * self.switch_len
self.switch_d_1 = ((self.rCutoff / self.rSwitch) - 1)
self.switch_d_2 = -16.0 / (self.switch_d_1 * self.switch_d_1 * self.switch_d_1 * self.switch_d_1 * self.rSwitch)
self.switch_d2_1 = (self.rCutoff / self.rSwitch) - 1
self.switch_d2_1_4 = self.switch_d2_1 * self.switch_d2_1 * self.switch_d2_1 * self.switch_d2_1
self.switch_d2_1_8 = self.switch_d2_1_4 * self.switch_d2_1_4
self.switch_d2_1_8_start_2 = self.switch_d2_1_8 * self.rSwitch * self.rSwitch
if (self.switch_len <= 0.0):
self.func = self.yukawa
self.d2_func = self.d2_yukawa
else:
self.func = self.switch_yukawa
self.d2_func = self.d2_switch_yukawa
def writeToFile(self, pathName):
tableFile = open(pathName, 'w')
r = 0.0
# We go to 2 * self.rCutoff because GROMACS reuses the mdp
# option table-extension (how far past self.rCutoff we need to
# extend the table) as the length of the 1-4 interaction
# table. Since we want 1-4 interactions to go to self.rCutoff
# as well, we need table-extension to be self.rCutoff, which
# results in the normal table being 2 * self.rCutoff. Silly,
# really.
while (r < 2 * self.rCutoff + (DELTA_R / 2.0)):
print >>tableFile, \
"%8.4f %13.6e %13.6e %13.6e %13.6e %13.6e %13.6e" % (r,
self.r_1(r),
self.d2_r_1(r),
self.func(r),
self.d2_func(r),
0,
0)
r += DELTA_R
tableFile.close()
def r_1(self, r):
if (r < 0.04):
return 0.0
return 1.0 / r
def d2_r_1(self, r):
if (r < 0.04):
return 0.0
return 2.0 / (r * r * r)
def r_6(self, r):
if (r < 0.04):
return 0.0
return -1.0 / (r * r * r * r * r * r)
def d2_r_6(self, r):
if (r < 0.04):
return 0.0
return -42.0 / (r * r * r * r * r * r * r * r)
def r_12(self, r):
if (r < 0.04):
return 0.0
return 1.0 / (r * r * r * r * r * r * r * r * r * r * r * r)
def d2_r_12(self, r):
if (r < 0.04):
return 0.0
return 156.0 / (r * r * r * r * r * r * r * r * r * r * r * r * r * r)
def yukawa(self, r):
if (r < 0.04):
return self.yukawa(0.04) + 0.04 - r ;
return (self.YA / r) * math.exp(-(r - self.YB) / self.YC) - self.YShift
def d_yukawa(self, r):
if (r < 0.04):
return 0.0
return self.YA * math.exp(-(r - self.YB) / self.YC) * (-1.0/(r * r) - 1.0/(self.YC * r))
def d2_yukawa(self, r):
if (r < 0.04):
return 0.0
return self.YA * math.exp(-(r - self.YB) / self.YC) * (2.0/(self.YC * r * r) + 1.0/(self.YC * self.YC * r) + 2.0/(r * r * r))
def switch(self, r):
if (r <= self.rSwitch):
return 1.0
if (r >= self.rCutoff):
return 0.0
rDiff = r - self.rSwitch
S1 = ((rDiff * rDiff * rDiff * rDiff) / self.switch_len_4) - 1
return S1 * S1 * S1 * S1
def d_switch(self, r):
if (r <= self.rSwitch):
return 0.0
if (r >= self.rCutoff):
return 0.0
t1 = r - self.rSwitch
t1_4 = t1 * t1 * t1 * t1
t2 = 1 - t1_4 / self.switch_len_4
t3 = (r / self.rSwitch) - 1
t4 = t2 * t2 * t2 * t3 * t3 * t3
return self.switch_d_2 * t4
def d2_switch(self, r):
if (r <= self.rSwitch):
return 0.0
if (r >= self.rCutoff):
return 0.0
t1 = r - self.rSwitch
t1_4 = t1 * t1 * t1 * t1
t2 = t1_4 / self.switch_len_4
t3 = (r / self.rSwitch) - 1
t3_2 = t3 * t3
t3_4 = t3_2 * t3_2
return (48 * (-(1 - t2) * self.switch_d2_1_4 + 4 * t3_4) * (t2 - 1) * (t2 - 1) * t3_2) / self.switch_d2_1_8_start_2
def switch_yukawa(self, r):
return self.switch(r) * self.yukawa(r)
def d2_switch_yukawa(self, r):
return self.d2_switch(r) * self.yukawa(r) + 2.0 * self.d_switch(r) * self.d_yukawa(r) + self.switch(r) * self.d2_yukawa(r)
| NanoCAD-master | cad/src/simulation/YukawaPotential.py |
NanoCAD-master | cad/src/simulation/ROSETTA/__init__.py |
|
"""
RosettaSimulationPopUpDialog.py
Qt Dialog for setting the arguments for a rosetta simulation
@author: Urmi
@version: $Id$
@copyright:2008 Nanorex, Inc. See LICENSE file for details.
"""
from PyQt4.Qt import SIGNAL, SLOT
from PyQt4.Qt import QSizePolicy
from PyQt4.QtGui import QDialog, QLineEdit, QPushButton, QLabel, QCheckBox
from PyQt4.QtGui import QHBoxLayout, QVBoxLayout, QApplication, QTextEdit
from PyQt4.QtGui import QSpinBox, QSpacerItem
import string
from utilities.icon_utilities import geticon, getpixmap
class RosettaSimulationPopUpDialog(QDialog):
def __init__(self, parent = None):
"""
Constructor for Rosetta simulation parameters dialog
"""
self.parentWidget = parent
super(RosettaSimulationPopUpDialog, self).__init__(parent)
self.setWindowIcon(geticon('ui/border/Rosetta.png'))
self.setWindowTitle("Rosetta Simulation Parameters")
self._loadWidgets()
self.connectSignals()
self.show()
return
def _loadLogoWidget(self):
"""
load logo widget
"""
logoLayout = QHBoxLayout()
self.imageLabel = QLabel()
self.imageLabel.setPixmap(
getpixmap("ui/images/Rosetta.png"))
# Horizontal spacer
hSpacer = QSpacerItem(1, 1,
QSizePolicy.Expanding,
QSizePolicy.Minimum)
logoLayout.addItem(hSpacer)
logoLayout.addWidget(self.imageLabel)
logoLayout.addItem(hSpacer)
return logoLayout
def _loadNumSimWidget(self):
"""
Load number of simulations widget
"""
idLayout = QHBoxLayout()
self.label = QLabel("Enter number of simulations:")
self.numSimSpinBox = QSpinBox()
self.numSimSpinBox.setMinimum(1)
self.numSimSpinBox.setMaximum(999)
idLayout.addWidget(self.label)
idLayout.addWidget(self.numSimSpinBox)
return idLayout
def _loadParameterCheckBoxWidget(self):
"""
load rosetta simulation parameters checkboxes
"""
idLayout1 = QVBoxLayout()
self.ex1Checkbox = QCheckBox("Expand rotamer library for chi1 angle")
self.ex1aroCheckbox = QCheckBox("Use large chi1 library for aromatic residues")
self.ex2Checkbox = QCheckBox("Expand rotamer library for chi2 angle")
self.ex2aroOnlyCheckbox = QCheckBox("Use large chi2 library only for aromatic residues")
self.ex3Checkbox = QCheckBox("Expand rotamer library for chi3 angle")
self.ex4Checkbox = QCheckBox("Expand rotamer library for chi4 angle")
self.rotOptCheckbox = QCheckBox("Optimize one-body energy")
self.tryBothHisTautomersCheckbox = QCheckBox("Try both histidine tautomers")
self.softRepDesignCheckbox = QCheckBox("Use softer Lennard-Jones repulsive term")
self.useElecRepCheckbox = QCheckBox("Use electrostatic repulsion")
self.norepackDisulfCheckbox = QCheckBox("Don't re-pack disulphide bonds")
idLayout1.addWidget(self.ex1Checkbox)
idLayout1.addWidget(self.ex1aroCheckbox)
idLayout1.addWidget(self.ex2Checkbox)
idLayout1.addWidget(self.ex2aroOnlyCheckbox)
idLayout1.addWidget(self.ex3Checkbox)
idLayout1.addWidget(self.ex4Checkbox)
idLayout1.addWidget(self.rotOptCheckbox)
idLayout1.addWidget(self.tryBothHisTautomersCheckbox)
idLayout1.addWidget(self.softRepDesignCheckbox)
idLayout1.addWidget(self.useElecRepCheckbox)
idLayout1.addWidget(self.norepackDisulfCheckbox)
return idLayout1
def _loadCommandLineOptionWidget(self):
"""
Load command line options text edit
"""
self.otherOptionsLabel = QLabel("Command line options:")
self.otherCommandLineOptions = QTextEdit()
self.otherCommandLineOptions.setFixedHeight(80)
idLayout3 = QVBoxLayout()
idLayout3.addWidget(self.otherOptionsLabel)
idLayout3.addWidget(self.otherCommandLineOptions)
return idLayout3
def _loadButtonLayoutWidget(self):
"""
Load OK/Cancel buttons
"""
self.okButton = QPushButton("&OK")
self.cancelButton = QPushButton("Cancel")
buttonLayout = QHBoxLayout()
buttonLayout.addStretch()
buttonLayout.addWidget(self.okButton)
buttonLayout.addWidget(self.cancelButton)
return buttonLayout
def _loadWidgets(self):
"""
Load all the widgets for this dialog
"""
layout = QVBoxLayout()
logoLayout = self._loadLogoWidget()
idLayout = self._loadNumSimWidget()
idLayout1 = self._loadParameterCheckBoxWidget()
idLayout3 = self._loadCommandLineOptionWidget()
buttonLayout = self._loadButtonLayoutWidget()
layout.addLayout(logoLayout)
layout.addLayout(idLayout)
layout.addLayout(idLayout1)
layout.addLayout(idLayout3)
layout.addLayout(buttonLayout)
self.setLayout(layout)
return
def connectSignals(self):
"""
Signal slot connections for rosetta simulation parameters dialog
"""
#signal slot connections for various parameter checkboxes
self.connect(self.ex1Checkbox, SIGNAL("stateChanged(int)"), self.update_ex1)
self.connect(self.ex1aroCheckbox, SIGNAL("stateChanged(int)"), self.update_ex1aro)
self.connect(self.ex2Checkbox, SIGNAL("stateChanged(int)"), self.update_ex2)
self.connect(self.ex2aroOnlyCheckbox, SIGNAL("stateChanged(int)"), self.update_ex2aro_only)
self.connect(self.ex3Checkbox, SIGNAL("stateChanged(int)"), self.update_ex3)
self.connect(self.ex4Checkbox, SIGNAL("stateChanged(int)"), self.update_ex4)
self.connect(self.rotOptCheckbox, SIGNAL("stateChanged(int)"), self.update_rot_opt)
self.connect(self.tryBothHisTautomersCheckbox, SIGNAL("stateChanged(int)"), self.update_try_both_his_tautomers)
self.connect(self.softRepDesignCheckbox, SIGNAL("stateChanged(int)"), self.update_soft_rep_design)
self.connect(self.useElecRepCheckbox, SIGNAL("stateChanged(int)"), self.update_use_elec_rep)
self.connect(self.norepackDisulfCheckbox, SIGNAL("stateChanged(int)"), self.update_norepack_disulf)
#signal slot connections for the push buttons
self.connect(self.okButton, SIGNAL("clicked()"), self.getRosettaParameters)
self.connect(self.cancelButton, SIGNAL("clicked()"), self, SLOT("reject()"))
return
def update_ex1(self, state):
"""
Update the command text edit depending on the state of the update_ex1
checkbox
@param state:state of the update_ex1 checkbox
@type state: int
"""
otherOptionsText = str(self.otherCommandLineOptions.toPlainText())
if self.ex1Checkbox.isChecked() == True:
otherOptionsText = otherOptionsText + ' -ex1 '
else:
otherOptionsText = otherOptionsText.replace(' -ex1 ', '')
self.otherCommandLineOptions.setText(otherOptionsText)
return
def update_ex1aro(self, state):
"""
Update the command text edit depending on the state of the update_ex1aro
checkbox
@param state:state of the update_ex1aro checkbox
@type state: int
"""
otherOptionsText = str(self.otherCommandLineOptions.toPlainText())
if self.ex1aroCheckbox.isChecked() == True:
otherOptionsText = otherOptionsText + ' -ex1aro '
else:
otherOptionsText = otherOptionsText.replace(' -ex1aro ', '')
self.otherCommandLineOptions.setText(otherOptionsText)
return
def update_ex2(self, state):
"""
Update the command text edit depending on the state of the update_ex2
checkbox
@param state:state of the update_ex2 checkbox
@type state: int
"""
otherOptionsText = str(self.otherCommandLineOptions.toPlainText())
if self.ex2Checkbox.isChecked() == True:
otherOptionsText = otherOptionsText + ' -ex2 '
else:
otherOptionsText = otherOptionsText.replace(' -ex2 ', '')
self.otherCommandLineOptions.setText(otherOptionsText)
return
def update_ex2aro_only(self, state):
"""
Update the command text edit depending on the state of the update_ex2aro_only
checkbox
@param state:state of the update_ex2aro_only checkbox
@type state: int
"""
otherOptionsText = str(self.otherCommandLineOptions.toPlainText())
if self.ex2aroOnlyCheckbox.isChecked() == True:
otherOptionsText = otherOptionsText + ' -ex2aro_only '
else:
otherOptionsText = otherOptionsText.replace(' -ex2aro_only ', '')
self.otherCommandLineOptions.setText(otherOptionsText)
return
def update_ex3(self, state):
"""
Update the command text edit depending on the state of the update_ex3
checkbox
@param state:state of the update_ex3 checkbox
@type state: int
"""
otherOptionsText = str(self.otherCommandLineOptions.toPlainText())
if self.ex3Checkbox.isChecked() == True:
otherOptionsText = otherOptionsText + ' -ex3 '
else:
otherOptionsText = otherOptionsText.replace(' -ex3 ', '')
self.otherCommandLineOptions.setText(otherOptionsText)
return
def update_ex4(self, state):
"""
Update the command text edit depending on the state of the update_ex4
checkbox
@param state:state of the update_ex4 checkbox
@type state: int
"""
otherOptionsText = str(self.otherCommandLineOptions.toPlainText())
if self.ex4Checkbox.isChecked() == True:
otherOptionsText = otherOptionsText + ' -ex4 '
else:
otherOptionsText = otherOptionsText.replace(' -ex4 ', '')
self.otherCommandLineOptions.setText(otherOptionsText)
return
def update_rot_opt(self, state):
"""
Update the command text edit depending on the state of the update_rot_opt
checkbox
@param state:state of the update_rot_opt checkbox
@type state: int
"""
otherOptionsText = str(self.otherCommandLineOptions.toPlainText())
if self.rotOptCheckbox.isChecked() == True:
otherOptionsText = otherOptionsText + ' -rot_opt '
else:
otherOptionsText = otherOptionsText.replace(' -rot_opt ', '')
self.otherCommandLineOptions.setText(otherOptionsText)
return
def update_try_both_his_tautomers(self, state):
"""
Update the command text edit depending on the state of the update_try_both_his_tautomers
checkbox
@param state:state of the update_try_both_his_tautomers checkbox
@type state: int
"""
otherOptionsText = str(self.otherCommandLineOptions.toPlainText())
if self.tryBothHisTautomersCheckbox.isChecked() == True:
otherOptionsText = otherOptionsText + ' -try_both_his_tautomers '
else:
otherOptionsText = otherOptionsText.replace(' -try_both_his_tautomers ', '')
self.otherCommandLineOptions.setText(otherOptionsText)
return
def update_soft_rep_design(self, state):
"""
Update the command text edit depending on the state of the update_soft_rep_design
checkbox
@param state:state of the update_soft_rep_design checkbox
@type state: int
"""
otherOptionsText = str(self.otherCommandLineOptions.toPlainText())
if self.softRepDesignCheckbox.isChecked() == True:
otherOptionsText = otherOptionsText + ' -soft_rep_design '
else:
otherOptionsText = otherOptionsText.replace(' -soft_rep_design ', '')
self.otherCommandLineOptions.setText(otherOptionsText)
return
def update_use_elec_rep(self, state):
"""
Update the command text edit depending on the state of the update_use_elec_rep
checkbox
@param state:state of the update_use_elec_rep checkbox
@type state: int
"""
otherOptionsText = str(self.otherCommandLineOptions.toPlainText())
if self.useElecRepCheckbox.isChecked() == True:
otherOptionsText = otherOptionsText + ' -use_electrostatic_repulsion '
else:
otherOptionsText = otherOptionsText.replace(' -use_electrostatic_repulsion ', '')
self.otherCommandLineOptions.setText(otherOptionsText)
return
def update_norepack_disulf(self, state):
"""
Update the command text edit depending on the state of the update_no_repack
checkbox
@param state:state of the update_no_repack checkbox
@type state: int
"""
otherOptionsText = str(self.otherCommandLineOptions.toPlainText())
if self.norepackDisulfCheckbox.isChecked() == True:
otherOptionsText = otherOptionsText + ' -norepack_disulf '
else:
otherOptionsText = otherOptionsText.replace(' -norepack_disulf ', '')
self.otherCommandLineOptions.setText(otherOptionsText)
return
def getRosettaParameters(self):
"""
Get all the parameters from the Rosetta pop up dialog
"""
otherOptionsText = str(self.otherCommandLineOptions.toPlainText())
numSim = self.numSimSpinBox.value()
self.parentWidget.setRosettaParameters(numSim, otherOptionsText)
self.close()
self.emit(SIGNAL("editingFinished()"))
return | NanoCAD-master | cad/src/simulation/ROSETTA/RosettaSimulationPopUpDialog.py |
"""
runRosetta.py -- setting up and running rosetta simulations
@version: $Id$
@copyright: 2008 Nanorex, Inc. See LICENSE file for details.
History:
Urmi copied this file from runSim.py and then modified it.
This file is very different from runSim.py, partly because rosetta simulation
is quite different from gromacs simulation
"""
from files.pdb.files_pdb import writepdb
from files.pdb.files_pdb import insertpdb
from model.chunk import Chunk
from utilities.debug import print_compact_traceback
from platform_dependent.PlatformDependent import find_or_make_Nanorex_subdir
import os, sys, time, string
from widgets.StatusBar import AbortHandler
from datetime import datetime
from PyQt4.Qt import QApplication, QCursor, Qt, QStringList
from PyQt4.Qt import QProcess, QFileInfo
from utilities.Log import redmsg, greenmsg, orangemsg, quote_html, _graymsg
import foundation.env as env
from geometry.VQT import A, vlen
import re
from utilities.constants import filesplit
from processes.Process import Process
from processes.Plugins import checkPluginPreferences
from utilities.prefs_constants import rosetta_enabled_prefs_key, rosetta_path_prefs_key
from utilities.prefs_constants import rosetta_database_enabled_prefs_key, rosetta_dbdir_prefs_key
from protein.model.Protein import write_rosetta_resfile
from foundation.wiki_help import WikiHelpBrowser
#global counter so that repeat run of rosetta can produce uniquely named
#output file.
count = 1
#same with backrub
count_backrub = 1
def showRosettaScore(tmp_file_prefix, scorefile, win):
"""
Show the rosetta score of the current protein sequence
@param tmp_file_prefix: file prefix from which directory of the score file
could be extracted
@type tmp_file_prefix: str
@param scorefile: name of the rosetta score file
@type scorefile: str
@param win: NE-1 window
@type win: L{gl_pane}
"""
dir1 = os.path.dirname(tmp_file_prefix)
scorefile = scorefile + '.sc'
scoreFilePath = os.path.join(dir1, scorefile)
fileObject1 = open(scoreFilePath, 'r')
if fileObject1:
doc = fileObject1.readlines()
copied_lines = []
for line in doc:
#put a comma after each word
i = 0
firstSpace = True
for c in line:
if i > 0 and c == ' ' and firstSpace == True:
line = line[0:i] + ',' + line[i+1:]
firstSpace = False
if c != ' ' and firstSpace == False:
firstSpace = True
i = i + 1
if i == len(line):
copied_lines.append(line)
array_Name = copied_lines[0].split(',')
array_Score = copied_lines[1].split(',')
i = 0
for i in range(len(array_Name)):
array_Name[i] = array_Name[i].strip()
array_Score[i] = array_Score[i].strip()
i = 0
html = ""
for i in range(len(array_Name)):
html = html + "<p><b>" + array_Name[i].upper() + "</b> = "
html = html + "<font color = red> " + array_Score[i] + "</font></p>"
w = WikiHelpBrowser(html, parent = win, caption = "Rosetta Scoring Results", size = 1)
w.show()
return
def createUniquePDBOutput(tmp_file_prefix, proteinName, win):
"""
Create a uniquely named output file for rosetta backrub motion simulation
@param tmp_file_prefix: file prefix from which directory of the pdb file to
be saved could be extracted
@type tmp_file_prefix: str
@param proteinName: name of the input protein
@type proteinName: str
@param win: NE-1 window
@type win: L{gl_pane}
@return: output protein name and output pdb file path
"""
pdbFile = 'backrub_low.pdb'
dir1 = os.path.dirname(tmp_file_prefix)
pdbFilePath = os.path.join(dir1, pdbFile)
fileObject1 = open(pdbFilePath, 'r')
outFile = proteinName + '_' + pdbFile
#make sure that this outfile does not already exists,
#if it exists, then we should assign the out protein a unique name such that
# its easy to browse through the set of available proteins in the model tree
for mol in win.assy.molecules:
#if an output protein chunk with the same name exists, we need to
#rename the output protein
tempPdb = outFile[0:len(outFile)-4].lower() + ' '
if mol.isProteinChunk() and tempPdb == mol.name:
global count_backrub
outFile = tempPdb + '_' + str(count_backrub) + '.pdb'
count_backrub = count_backrub + 1
print "using global count backrub", count_backrub
outputPdbFilePath = os.path.join(dir1, outFile)
if fileObject1:
fileObject2 = open(outputPdbFilePath, 'w+')
else:
return None
doc = fileObject1.readlines()
fileObject2.writelines(doc)
fileObject1.close()
fileObject2.close()
outProteinName = outFile[0:len(outFile)-4]
return outProteinName, outputPdbFilePath
def getScoreFromBackrubOutFile(outputPdbFilePath):
"""
Get score from backrub_low.pdb for the current protein sequence deisgn with
backrub motion
@param outputPdbFilePath: path location of the output pdb file in the disk
@type outputPdbFilePath: str
@return: a string
"""
#a separate function for this is needed since we have only one pdb file
#with backrub that is backrub_low and hence the score is much more easily
#obtainable from the header
fileObject1 = open(outputPdbFilePath, 'r')
if fileObject1:
doc = fileObject1.readlines()
else:
return None
for line in doc:
#first instance of score
valFind = line.find("SCORE")
if valFind!=-1:
#process this line to read the total score
words = line[16:]
score = words.strip()
pdbFile = os.path.basename(outputPdbFilePath)
print "For output pdb file " + pdbFile + ", score = ", score
fileObject1.close()
return score
return None
def getProteinNameAndSeq(inProtein, outProtein, win):
"""
Get the protein name for inProtein and outProtein chunk and the corresponding
sequence to be displayed in the popup result dialog
@param inProtein: input protein chunk
@type inProtein: L{Chunk}
@param outProtein: output protein chunk
@type outProtein: L{Chunk}
@param win: NE-1 window
@type win: L{gl_pane}
@return: a list of two tuples [(inProtein Name, sequence), (outProtein Name, sequence)]
"""
proteinSeqTupleList = []
seqList1 = ""
seqList2 = ""
#no idea what insert pdb does to put a space at the end of the chunk name!
outProtein = outProtein.lower() + ' '
for mol in win.assy.molecules:
if mol.isProteinChunk() and inProtein == mol.name:
seqList1 = mol.protein.get_sequence_string()
tupleEntry1 = (inProtein, seqList1)
if mol.isProteinChunk() and outProtein == mol.name:
seqList2 = mol.protein.get_sequence_string()
tupleEntry2 = (outProtein, seqList2)
proteinSeqTupleList = [tupleEntry1, tupleEntry2]
if seqList1 is "":
return []
return proteinSeqTupleList
def getScoreFromOutputFile(tmp_file_prefix, outfile, numSim):
"""
Extract the best score from the output file
@param tmp_file_prefix: directory path for the pdb files
@type tmp_file_prefix: str
@param outfile: Name of the outfile file (pdb file)
@type outfile: str
@param numSim: number of simulation
@type numSim: int
@return: best score from the pdb file, name of the pdb file with the best
score
"""
scoreList = []
for i in range(numSim):
if len(str(i+1)) == 1:
extension = '000' + str(i+1)
elif len(str(i+1)) == 2:
extension = '00' + str(i+1)
elif len(str(i+1)) == 3:
extension = '0' + str(i+1)
else:
#Urmi 20080716: what to do beyond 4 digits?
extension = str(i+1)
pdbFile = outfile + '_' + extension + '.pdb'
dir = os.path.dirname(tmp_file_prefix)
pdbFilePath = os.path.join(dir, pdbFile)
f = open(pdbFilePath, 'r')
if f:
doc = f.readlines()
for line in doc:
#first instance of score
valFind = line.find("score")
if valFind!=-1:
#process this line to read the total score
words = line[15:]
score = words.strip()
print "For output pdb file " + pdbFile + ", score = ", score
score1 = float(score)
f.close()
scoreList.append(score1)
break
else:
print "Output Pdb file cannot be read to obtain score"
f.close()
return None, None
sortedList = sorted(scoreList)
minScore = sortedList[0]
index = scoreList.index(minScore)
if len(str(index + 1)) == 1:
extension = '000' + str(index + 1)
elif len(str(index)) == 2:
extension = '00' + str(index + 1)
elif len(str(index + 1)) == 3:
extension = '0' + str(index + 1)
else:
#Urmi 20080716: what to do beyond 4 digits?
extension = str(index + 1)
pdbFile = outfile + '_' + extension + '.pdb'
return str(minScore), pdbFile
def processFastaFile(fastaFilePath, bestSimOutFileName, inputProtein):
"""
Process fasta file to extract output protein sequence
@param fastaFilePath: path of the fasta file containing all the protein pdb
ids and their corresponding sequences
@type fastaFilePath: str
@param bestSimOutFileName: pdb id with the lowest score
@type bestSimoutFileName: str
@param inputProtein: pdb id of the protein, input to the Rosetta simulation
@type inputProtein: str
@return: a list of (protein name, protein sequence) tuples
"""
proteinSeqTupleList = []
f = open(fastaFilePath, 'r')
desiredOutProtein = bestSimOutFileName[0:len(bestSimOutFileName)-4]
if f:
doc = f.readlines()
line1 = doc[0]
i = 0
while i < len(doc):
proteinName = line1[2:len(line1)-1]
if proteinName.find(".pdb")!= -1:
proteinName = proteinName[0:len(proteinName)-4]
#this line is bound to be the sequence
i = i + 1
line2 = doc[i]
proteinSeq = line2[0:len(line2)-1]
# in case of long sequences, these lines may have part of sequences
#fasta files do that for better readability
i = i + 1
#but you can reach EOF while doing increments within a loop
#hence you need to write the last protein (name, sequence) tuple
#before you exit the loop
if i >= len(doc):
if proteinName == desiredOutProtein or proteinName == inputProtein:
tupleEntry = (proteinName, proteinSeq)
proteinSeqTupleList.append(tupleEntry)
break
line3 = doc[i]
while 1:
if line3.find(">")!= -1:
#indicates begining of new protein sequence
line1 = line3
if proteinName == desiredOutProtein or proteinName == inputProtein:
tupleEntry = (proteinName, proteinSeq)
proteinSeqTupleList.append(tupleEntry)
break
#part of the old sequence, since the sequence spans over multiple lines
proteinSeq = proteinSeq + line3[0:len(line3)-1]
i = i + 1
#writing the last sequence, see comment for similar situation above
if i >= len(doc):
if proteinName == desiredOutProtein or proteinName == inputProtein:
tupleEntry = (proteinName, proteinSeq)
proteinSeqTupleList.append(tupleEntry)
break
line3 = doc[i]
else:
print "File cannot be read"
f.close()
return proteinSeqTupleList
def highlightDifferencesInSequence(proteinSeqList):
"""
Highlight the differences between input rosetta protein sequence and output
rosetta protein sequence with the lowest score.
@param proteinSeqList: List of size 2 containing input protein and output
protein pdb ids and their corresponding sequences
in a tuple
@type proteinSeqList: list
@return: a list of amino acids, some of which have been colored red, to
indicate that they are different from that of the input protein,
percentage sequence similarity
"""
modList = [proteinSeqList[0][1]]
baseList = proteinSeqList[0][1]
count = 0
for i in range(1,len(proteinSeqList)):
currentProtSeq = proteinSeqList[i][1]
tempSeq = ""
for j in range(0, len(baseList)):
if baseList[j] == currentProtSeq[j]:
tempSeq = tempSeq + baseList[j]
count = count + 1
else:
tempSeq = tempSeq + "<font color=red>" + currentProtSeq[j] + "</font>"
modList.append(tempSeq)
#Similarity measurement for the original protein and protein with minimum
#score
simMeasure = (float)((count * 100)/len(baseList))
similarity = str(simMeasure) + "%"
return modList, similarity
class RosettaRunner:
"""
Class for running the rosetta simulator.
[subclasses can run it in special ways, maybe]
"""
PREPARE_TO_CLOSE = False
used_atoms = None
def __init__(self, part, mflag,
simaspect = None,
cmdname = "Rosetta Design",
cmd_type = 'Fixed_Backbone_Sequence_Design',
useRosetta = False,
background = False,
):
"""
Constructor for Rosetta Runner
set up external relations from the part we'll operate on;
@param part: NE-1 part
@type part: L{Part}
@param mflag: Movie flag
@type mflag: int
@note: mflag is not used at all since we are running only one type of
simulation for now
@param simaspect: simulation aspect
@type simaspect:
@param cmdname: name of the command
@type cmdname: str
@param cmd_type: name of type of command
@type cmd_type: str
@param useRosetta: whether we should use rosetta or not
@type useRosetta: bool
@note: Since we are using only Rosetta to run protein simlations, this
is unnecessary for now. May be we will use it some day when we
are using multiple simulators
@param background: dictates whether a rosetta simulation should run in
the background or not
@type useRosetta: bool
@note: Rosetta is running in the foreground only for now.
"""
self.assy = assy = part.assy #
self.win = assy.w
self.part = part
self.mflag = mflag
self.simaspect = simaspect
self.errcode = 0 # public attr used after we're done;
# 0 or None = success (so far), >0 = error (msg emitted)
self.said_we_are_done = False
self.useRosetta = useRosetta
self.background = background
self.rosettaLog = None
self.tracefileProcessor = None
self.cmdname = cmdname
self.cmd_type = cmd_type #060705
return
def sim_input_filename(self, args):
"""
write the pdb for the part that is in the NE-1 window now and set the
filename to that pdb
@param part: NE-1 part
@type part: L{Part}
@param args: name of the protein for which simulation should be run
@type args: str
@return: name of the pdb file which is going to be the starting structure
for the current rosetta simulation
"""
# if we run rosetta from within build protein mode, then we can run
# rosetta for the current protein which is args
#if we are outside this mode, we can run rosetta for a selected protein
#chunk, if there's one
if args != "":
pdbId = args
for mol in self.win.assy.molecules:
if mol.name == args:
chunk = mol
break
else:
#run it for the first available protein in chunklist
pdbId, chunk = self.getPDBIDFromChunk()
if pdbId is None:
return None
#input filename
fileName = pdbId + '.pdb'
dir = os.path.dirname(self.tmp_file_prefix)
fileLocation = os.path.join(dir, fileName)
#since the starting structure could be in arbitrary location in users
#hard disk, we write a pdb file for the imported/inserted/fetched protein
#chunk in RosettaDesignFiles directory under Nanorex
writepdb(self.part, str(fileLocation), singleChunk = chunk)
return fileName
def getPDBIDFromChunk(self):
"""
Get the first available protein chunk from NE-1 part
@return: pdb id of the first protein chunk and the chunk as well
"""
for chunk in self.win.assy.molecules:
if chunk.isProteinChunk():
return chunk.name, chunk
return None, None
def removeOldOutputPDBFiles(self):
"""
remove all the old output files for rosetta simulatiosn run on the same
starting structure before running a new rosetta simulation
@note: bug in rosetta: a new simulation refuses to run if there's
pdbid_0001.pdb or any other parameters you have have provided with
-pdbout in rosetta simulation. We think that pdbid_0001.pdb is
created first as the main output file at the end of the simulation
and then its copied to parameter with -pdbout. Hence we need to
remove all output files related to starting structure pdbid.pdb
before running a new simulation.
"""
dir = os.path.dirname(self.tmp_file_prefix)
infile = self.sim_input_file
#remove all output files previously created for this pdb
#In this regular expression match, the first * is for any pdbout name,
#we generate based on the input name and the second * is for
#all the numbers of output pdb files that are generated based on the
#number of simulations
outpath = infile[0:len(infile) - 4] + '*' + '_' + '*' + '.pdb'
from fnmatch import fnmatch
for file in os.listdir(dir):
fullname = os.path.join( dir, file)
if os.path.isfile(fullname):
if fnmatch( file, outpath):
os.remove(fullname)
return
def setupArgsFromPopUpDialog(self, args):
"""
Besides the default set of arguments there are many command line options
that the user can specify. This parses the user input and generates a list
of those options
@param args: a string of various command line options
for running rosetta separated by space(s)
@type args: str
"""
argStringListFromPopUpDialog = []
#argument 0 is for number of simulations, already handled
#Index of each argument known ahead of time
if args != "":
#break the string into individual words and make a list and extend
# the argument list
tempString = args.replace('\n', ' ')
extraArgs = tempString.split(" ")
#strip extra space around each of these options
extraArgs1 = []
for i in range(len(extraArgs)):
word = extraArgs[i].strip()
if word != '':
extraArgs1.append(word)
argStringListFromPopUpDialog.extend(extraArgs1)
return argStringListFromPopUpDialog
def setup_sim_args(self, argsFromPopUpDialog, backrubArgs = []):
"""
Set up arguments for the simulator,
by constructing a command line for the standalone executable simulator,
@param argsFromPopUpDialog: a string of various command line options
for running rosetta separated by space(s)
@type argsFromPopUpDialog: str
"""
argListFromPopUpDialog = self.setupArgsFromPopUpDialog(argsFromPopUpDialog)
use_command_line = True
movie = self._movie # old-code compat kluge
self.totalFramesRequested = movie.totalFramesRequested
self.update_cond = movie.update_cond
program = self.program
path = self.path
infile = self.sim_input_file
self.outfile = infile[0:len(infile) - 4] + '_out'
self.scorefile = infile[0:len(infile) - 4] + '_score'
#if any of the protein chunks in NE-1 part matches the outfile name,
#rename the outfile
#this is necessary, otherwise two chunks with the same name will be
#created in the model tree and its not easy to figure out in the build
#protein mode which rosetta run generated it
tempPdb = infile[0:len(infile) - 5] + '([A-Z]|[a-z])' + '_out' + '_' + '[0-9][0-9][0-9][0-9]' + '([A-Z]|[a-z])'
for mol in self.win.assy.molecules:
#if an output protein chunk with the same name exists, we need to
#rename the output protein
if mol.isProteinChunk() and re.match(tempPdb, mol.name) is not None:
global count
self.outfile = infile[0:len(infile) - 4] + '_' + str(count) + '_out'
count = count + 1
#bug in rosetta: simulation does not work in pdbID_0001.pdb exists in
#this directory, hence always remove it
self.removeOldOutputPDBFiles()
args = []
if use_command_line:
#Urmi 20080709 Support for fixed backbone sequence design for now
if self.cmd_type == "ROSETTA_FIXED_BACKBONE_SEQUENCE_DESIGN":
args = [
'-paths', str(self.path),
'-design',
'-fixbb',
'-profile',
'-ndruns', str(self.numSim),
'-resfile', str(self.resFile),
'-pdbout', str(self.outfile),
'-s', infile]
args.extend(argListFromPopUpDialog)
elif self.cmd_type == "BACKRUB_PROTEIN_SEQUENCE_DESIGN":
args = [
'-paths', str(self.path),
'-ntrials', str(self.numSim),
'-pose1',
'-backrub_mc',
'-resfile', str(self.resFile),
'-s', infile]
args.extend(argListFromPopUpDialog)
args.extend(backrubArgs)
elif self.cmd_type == "ROSETTA_SCORE":
args =[
'-paths', str(self.path),
'-scorefile', str(self.scorefile),
'-score',
'-s', infile]
else:
args = []
self._arguments = args
return # from setup_sim_args
def set_options_errQ(self, args):
"""
Figure out and set filenames, including sim executable path.
All inputs and outputs are self attrs or globals or other obj attrs...
except, return error code if sim executable missing
or on other errors detected by subrs.
@param args: name of the protein for which rosetta simulation is run and
if its empty then it is run for the first available chunk
@type args: str
"""
movie = self._movie
simFilesPath = find_or_make_Nanorex_subdir('RosettaDesignFiles')
# Create temporary part-specific filename, for example:
# "partname-minimize-pid1000".
# We'll be appending various extensions to tmp_file_prefix to make temp
# file names for sim input and output files as needed
if args != "":
pdbId = args
for mol in self.win.assy.molecules:
if mol.name == args:
chunk = mol
break
else:
pdbId, chunk = self.getPDBIDFromChunk()
if self.cmd_type == "BACKRUB_PROTEIN_SEQUENCE_DESIGN":
backrubSetupCorrect = chunk.protein.is_backrub_setup_correctly()
#Urmi 20080807: The backrub motion is so poorly documented that
#I do not have any idea what is the threshold value
#my experiments with 2gb1 seems to show that its 3, but I dont know for sure
if not backrubSetupCorrect:
msg = redmsg("Rosetta sequence design with backrub motion failed. Please edit your residues properly from Edit REsidues command.")
env.history.message(self.cmdname + "," + self.cmd_type + ": " + msg)
return -1
#write the residue file
resFile = pdbId + ".resfile"
resFilePath = os.path.join(simFilesPath, resFile)
success = write_rosetta_resfile(resFilePath, chunk)
if success:
self.resFile = resFile
else:
#Shall we refuse to run the program if we cannot write the residue file?
print "Residue file could not be written"
return -1
#remove all previously existing fasta files
#may not be needed. But we are doing with out pdb, might as well do it
#fasta and design files as well
fastaFile = pdbId + "_out_design.fasta"
checkPointFile = pdbId + "_out_design.checkpoint"
checkPointPath = os.path.join(simFilesPath, checkPointFile)
fastaFilePath = os.path.join(simFilesPath, fastaFile)
if os.path.exists(fastaFilePath):
os.remove(fastaFilePath)
if os.path.exists(checkPointPath):
os.remove(checkPointPath)
if pdbId is None:
basename = "Untitled"
else:
basename = pdbId
timestampString = ""
if (self.background):
# Add a timestamp to the pid so that multiple backgrounded
# calculations don't clobber each other's files.
#We are not running Rosetta in the background now, so may not be useful
timestamp = datetime.today()
timestampString = timestamp.strftime(".%y%m%d%H%M%S")
self.tmp_file_prefix = \
os.path.join(simFilesPath,
"%s-rosetta-design-pid%d%s" % (basename, os.getpid(),
timestampString))
#get program path, database path and write path.txt
self.program = self.getExecutablePluginPath()
if self.program is None:
msg = redmsg("The simulator program is missing. Simulation aborted.")
env.history.message(self.cmdname + ": " + msg)
return -1
databasePath = self.getDatabasePluginPath()
if databasePath is None:
msg = redmsg("The protein database is missing. Simulation aborted.")
env.history.message(self.cmdname + ": " + msg)
return -1
self.path = self.getPathLocation(databasePath, simFilesPath)
return None # no error
def getPathLocation(self, dataBasePath, simFilesPath):
"""
Write the paths.txt file required for a rosetta simulation
@param dataBasePath: path for rosetta databae
@type dataBasePath: str
@param simFilesPath: path for rosetta executable
@type simFilesPath: str
@see: rosetta documentation on explanation of the paths.txt file
@return: paths.txt file path
"""
#simplest would be to overwrite the path's file everytime, instead of
#doing text processing to figure out if the file has changed
# paths.txt is small enough to do so
simFilesPath = simFilesPath + '/'
pathFile = simFilesPath + "paths.txt"
f = open(pathFile, "w+")
line = "Rosetta Input/Output Paths (order essential)\n"
f.write(line)
line = "path is first '/', './',or '../' to next whitespace, must end with '/'\n"
f.write(line)
line = "INPUT PATHS:\n"
f.write(line)
word = ["Temp", "Temp"]
# input files will always be in this directory
tempWord = "pdb1"
word[0] = "%-32s" % tempWord
tempWord = simFilesPath + "\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "pdb2"
word[0] = "%-32s" % tempWord
tempWord = simFilesPath + "\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "alternate data files"
word[0] = "%-32s" % tempWord
word[1] = dataBasePath + '/\n'
line = ''.join(word)
f.write(line)
tempWord = "fragments"
word[0] = "%-32s" % tempWord
tempWord = simFilesPath + "\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "structure dssp,ssa (dat,jones)"
word[0] = "%-32s" % tempWord
tempWord = simFilesPath + "\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "sequence fasta,dat,jones"
word[0] = "%-32s" % tempWord
tempWord = simFilesPath + "\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "constraints"
word[0] = "%-32s" % tempWord
tempWord = simFilesPath + "\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "starting structure"
word[0] = "%-32s" % tempWord
tempWord = simFilesPath + "\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "data files"
word[0] = "%-32s" % tempWord
tempWord = dataBasePath + "/\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
line = "OUTPUT PATHS:\n"
f.write(line)
tempWord = "movie"
word[0] = "%-32s" % tempWord
tempWord = simFilesPath + "\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "pdb path"
word[0] = "%-32s" % tempWord
tempWord = simFilesPath + "\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "score"
word[0] = "%-32s" % tempWord
tempWord = simFilesPath + "\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "status"
word[0] = "%-32s" % tempWord
tempWord = simFilesPath + "\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "user"
word[0] = "%-32s" % tempWord
tempWord = simFilesPath + "\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
line = "FRAGMENTS: (use '*****' in place of pdb name and chain)\n"
f.write(line)
tempWord = "2"
word[0] = "%-39s" % tempWord
tempWord = "number of valid fragment files\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "3"
word[0] = "%-39s" % tempWord
tempWord = "frag file 1 size\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "aa*****03_05.200_v1_3"
word[0] = "%-39s" % tempWord
tempWord = "name\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "9"
word[0] = "%-39s" % tempWord
tempWord = "frag file 2 size\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
tempWord = "aa*****09_05.200_v1_3"
word[0] = "%-39s" % tempWord
tempWord = "name\n"
word[1] = tempWord
line = ''.join(word)
f.write(line)
f.close()
return pathFile
def getExecutablePluginPath(self):
"""
Get the path of the rosetta executable from the preferences dialog
@return: path for the rosetta executable
"""
plugin_name = "ROSETTA"
plugin_prefs_keys = (rosetta_enabled_prefs_key, rosetta_path_prefs_key)
errorcode, errortext_or_path = \
checkPluginPreferences(plugin_name, plugin_prefs_keys)
if errorcode:
msg = redmsg("Verify Plugin: %s (code %d)" % (errortext_or_path, errorcode))
env.history.message(msg)
return None
program_path = errortext_or_path
return program_path
def getDatabasePluginPath(self):
"""
Get the path of the rosetta database from the preferences dialog
@return: path for the rosetta database
"""
plugin_name = "ROSETTA_DATABASE"
#Urmi 20080710: using the same code as exectuables. Its kind of bad
# but probably ok before RosettaCon
plugin_prefs_keys = (rosetta_database_enabled_prefs_key, rosetta_dbdir_prefs_key)
errorcode, errortext_or_path = \
checkPluginPreferences(plugin_name, plugin_prefs_keys)
if errorcode:
msg = redmsg("Verify Plugin: %s (code %d)" % (errortext_or_path, errorcode))
env.history.message(msg)
return None
dataBase_path = errortext_or_path
return dataBase_path
def run_rosetta(self, movie, args):
"""
Main method that executes the rosetta simulation
@param movie: simulation object
@type movie: L{Movie}
@param args: list of simulation arguments
@type args: list
@note: This method needs to be refactored very badly
"""
self._movie = movie
assert args >= 1
#we have set it up such that the first element in arg[0] is number of simulations
self.numSim = args[0][0]
#set the program path, database path and write the paths.txt in here
#we have set it up such that the third argument in args[0] always have
# the name of the protein we are running rosetta simulation for
#also we say that an error has occurred if we cannot write the resfile.
#not sure if this should be the case
self.errcode = self.set_options_errQ( args[0][2])
if self.errcode: # used to be a local var 'r'
return
#get the starting pdb structure for rosetta simulation
self.sim_input_file = self.sim_input_filename(args[0][2])
if self.sim_input_file is None:
return
#this marks the beginning of the simulation. Although technically we are yet
# to call QProcess, it seems like a good place to set the waitcursor to True
self.set_waitcursor(True)
progressBar = self.win.statusBar().progressBar
# Disable some QActions (menu items/toolbar buttons) while the sim is running.
self.win.disable_QActions_for_sim(True)
try:
self.simProcess = None
#sets up the argument list for running rosetta including the ones
#that were provided in the pop up dialog
backRubArgs = []
if len(args) == 3:
backRubArgs = args[2]
self.setup_sim_args(args[0][1], backRubArgs)
progressBar.setRange(0, 0)
progressBar.reset()
progressBar.show()
env.history.statusbar_msg("Running Rosetta on " + self.sim_input_file[0:len(self.sim_input_file) - 4])
#this is used to name all the files related to this simulation
#we make sure that the pdb id is there in the filename so that it is
#easy to identify for which protein chunk we are running the simulation
rosettaFullBaseFileName = self.tmp_file_prefix
rosettaFullBaseFileInfo = QFileInfo(rosettaFullBaseFileName)
rosettaWorkingDir = rosettaFullBaseFileInfo.dir().absolutePath()
rosettaBaseFileName = rosettaFullBaseFileInfo.fileName()
rosettaProcess = Process()
rosettaProcess.setProcessName("rosetta")
rosettaProcess.redirect_stdout_to_file("%s-rosetta-stdout.txt" %
rosettaFullBaseFileName)
rosettaProcess.redirect_stderr_to_file("%s-rosetta-stderr.txt" %
rosettaFullBaseFileName)
rosettaStdOut = rosettaFullBaseFileName + "-rosetta-stdout.txt"
#rosetta files are all put in RosettaDesignFiles under Nanorex
rosettaProcess.setWorkingDirectory(rosettaWorkingDir)
environmentVariables = rosettaProcess.environment()
rosettaProcess.setEnvironment(environmentVariables)
msg = greenmsg("Starting Rosetta sequence design")
env.history.message(self.cmdname + ": " + msg)
env.history.message("%s: Rosetta files at %s%s%s.*" %
(self.cmdname, rosettaWorkingDir, os.sep,
rosettaFullBaseFileInfo.completeBaseName()))
abortHandler = AbortHandler(self.win.statusBar(), "rosetta")
#main rosetta simulation call
errorCode = rosettaProcess.run(self.program, self._arguments, False, abortHandler)
abortHandler = None
if (errorCode != 0):
if errorCode == -2: # User pressed Abort button in progress dialog.
msg = redmsg("Aborted.")
env.history.message(self.cmdname + ": " + msg)
env.history.statusbar_msg("")
if self.simProcess:
self.simProcess.kill()
else:
#the stdout will tell the user for what other reason,
#the simulation may fail
msg = redmsg("Rosetta sequence design failed. For details check" + rosettaStdOut)
env.history.message(self.cmdname + ": " + msg)
self.errcode = 2;
env.history.statusbar_msg("")
else:
#Error code is not zero but there's in reality error in stdout
#check if that be the case
env.history.statusbar_msg("")
errorInStdOut = self.checkErrorInStdOut(rosettaStdOut)
if errorInStdOut:
msg = redmsg("Rosetta sequence design failed, Rosetta returned %d" % errorCode)
env.history.message(self.cmdname + "," + self.cmd_type + ": " + msg)
env.history.statusbar_msg("")
else:
#bug in rosetta: often for some reason or the other rosetta
#run does not produce an o/p file. One instance is that if
# you already have an output file for this starting structure
#already in the directory rosetta refuses to optimize the
#structue again even if your residue file has changed
#since we remove all related output files before any run on
#the same protein, this is not a possible source of error
#in our case but there can be other similar problems
#Hence we always check the desired output file actually exists
#in the RosettaDesignFiles directory before we actually declare
#that it has been a successful run
if self.cmd_type == "ROSETTA_FIXED_BACKBONE_SEQUENCE_DESIGN":
outputFile = self.outfile + '_0001.pdb'
outPath = os.path.join(os.path.dirname(self.tmp_file_prefix), outputFile)
if os.path.exists(outPath):
#if there's the o/p pdb file, then rosetta design "really"
#succeeded
msg = greenmsg("Rosetta sequence design succeeded")
env.history.message(self.cmdname + "> " + self.cmd_type + ": " + msg)
#find out best score from all the generated outputs
#may be we will do it some day, but for now we only output
#the chunk with the lowest energy (Score)
score, bestSimOutFileName = getScoreFromOutputFile(self.tmp_file_prefix, self.outfile, self.numSim)
chosenOutPath = os.path.join(os.path.dirname(self.tmp_file_prefix), bestSimOutFileName)
insertpdb(self.assy, str(chosenOutPath), None)
#set the secondary structure of the rosetta output protein
#to that of the inpput protein
outProtein = self._set_secondary_structure_of_rosetta_output_protein(bestSimOutFileName)
#update the protein combo box in build protein mode with
#newly created protein chunk
self._updateProteinComboBoxInBuildProteinMode(outProtein)
env.history.statusbar_msg("")
fastaFile = self.outfile + "_design.fasta"
fastaFilePath = os.path.join(os.path.dirname(self.tmp_file_prefix), fastaFile)
#process th fasta file to find the sequence of the protein
#with lowest score
proteinSeqList = processFastaFile(fastaFilePath, bestSimOutFileName, self.sim_input_file[0:len(self.sim_input_file)-4])
#show a pop up dialog to show the best score and most
#optimized sequence
if score is not None and proteinSeqList is not []:
self.showResults(score, proteinSeqList)
else:
#even when there's nothing in stderr or errocode is zero,
#rosetta may not output anything.
msg1 = redmsg("Rosetta sequence design failed. ")
msg2 = redmsg(" %s file was never created by Rosetta." % outputFile)
msg = msg1 + msg2
env.history.message(self.cmdname + ": " + msg)
env.history.statusbar_msg("")
if self.cmd_type == "BACKRUB_PROTEIN_SEQUENCE_DESIGN":
#its important to set thi pref key to False so that if the
#subsequent rosetta run is with fixed backbone then the
#resfile is correctly written
from utilities.prefs_constants import rosetta_backrub_enabled_prefs_key
env.prefs[rosetta_backrub_enabled_prefs_key] = False
#Urmi 20080807: first copy the backrub_low.pdb to a new pdb
#file with the pdb info also added there
outProteinName, outPath = createUniquePDBOutput(self.tmp_file_prefix, self.sim_input_file[0:len(self.sim_input_file)-4], self.win)
if outProteinName is None:
msg1 = redmsg("Rosetta sequence design with backrub motion has failed. ")
msg2 = redmsg(" backrub_low.pdb was never created by Rosetta.")
msg = msg1 + msg2
env.history.message(self.cmdname + "," + self.cmd_type + ": " + msg)
env.history.statusbar_msg("")
else:
env.history.statusbar_msg("")
msg = greenmsg("Rosetta sequence design with backrub motion allowed, succeeded")
env.history.message(self.cmdname + "> " + self.cmd_type + ": " + msg)
insertpdb(self.assy, str(outPath), None)
outProtein = self._set_secondary_structure_of_rosetta_output_protein(outProteinName + ".pdb")
self._updateProteinComboBoxInBuildProteinMode(outProtein)
inProteinName = self.sim_input_file[0:len(self.sim_input_file)-4]
proteinSeqList = getProteinNameAndSeq(inProteinName, outProteinName, self.win)
score = getScoreFromBackrubOutFile(outPath)
if score is not None and proteinSeqList is not []:
self.showResults(score, proteinSeqList)
if self.cmd_type == "ROSETTA_SCORE":
msg = greenmsg("Rosetta scoring has succeeded")
env.history.message(self.cmdname + "> " + self.cmd_type + ": " + msg)
showRosettaScore(self.tmp_file_prefix, self.scorefile, self.win)
except:
print_compact_traceback("bug in simulator-calling code: ")
self.errcode = -11111
self.set_waitcursor(False)
self.win.disable_QActions_for_sim(False)
env.history.statusbar_msg("")
if not self.errcode:
return # success
return # caller should look at self.errcode
def _updateProteinComboBoxInBuildProteinMode(self, outProtein):
"""
update protein combo box in build protein mode with the newly generated
output protein
@param outProtein: rosetta outputted protein chunk
@type outProtein: L{Chunk}
"""
command = self.win.commandSequencer.find_innermost_command_named('BUILD_PROTEIN')
if command:
command.propMgr.proteinListWidget.addItem(outProtein)
return
def _set_secondary_structure_of_rosetta_output_protein(self, bestSimOutFileName):
"""
Set the secondary struture of the rosetta protein to that of the input
protein
@param bestSimOutFileName: output pdb id with lowest energy score
@type bestSimOutFileName: str
@return: output protein chunk with its secondary structure set
@note: rosetta fixed bb sequence design does not do anything to the secondary
structure of the output protein. As it remains constant, we simply
copy it from the input protein
"""
#since this method is called only if a simulation be successful,
#input and output protein are both bound to be there and hence there's
#no else block
matchForFixedBB = bestSimOutFileName[0:len(bestSimOutFileName)-4].lower() + 'A'
matchForBackRub = bestSimOutFileName[0:len(bestSimOutFileName)-4].lower() + ' '
outMatch = ""
if self.cmd_type == "ROSETTA_FIXED_BACKBONE_SEQUENCE_DESIGN":
outMatch = matchForFixedBB
if self.cmd_type == "BACKRUB_PROTEIN_SEQUENCE_DESIGN":
outMatch = matchForBackRub
outProtein = None
for mol in self.win.assy.molecules:
if mol.isProteinChunk() and mol.name == self.sim_input_file[0:len(self.sim_input_file)-4]:
inProtein = mol
if mol.isProteinChunk() and mol.name == outMatch:
outProtein = mol
if outProtein:
outProtein.protein.set_rosetta_protein_secondary_structure(inProtein)
return outProtein
def showResults(self, score, proteinSeqList):
"""
Display the rosetta simulation results in a pop up dialog at the end
of a successful simulation
@param score: Score from the most optimized sequence
@type score: str
@param proteinSeqList: list of size 2, with (protein, sequence) tuple,
containing the input protein and its sequence
and the output protein and its corresponding
sequence
@type proteinSeqList: list
"""
html = "Score of this fixed backbone sequence design using starting"
html = html + " structure " + self.sim_input_file
html = html + " and residue file " + self.resFile
html = html + " is " + "<font face=Courier New color=red>" + score + "</font>"
html = html + "<p>The original protein sequence and the designed sequence"
html = html + " are shown below with differences in designed sequence "
html = html + "shown in red: <br>"
#highlight the differences in sequence between the original protein
#and the new protein
modSeqList, similarity = highlightDifferencesInSequence(proteinSeqList)
for i in range(len(proteinSeqList)):
html = html + "<font face=Courier New>" + proteinSeqList[i][0] + "</font> "+ "<br>"
html = html + "<font face=Courier New>" + modSeqList[i] + "</font>" + "<br>"
html = html + "</p>"
html = html + "<p>Sequence Similarity = " + similarity + "</p>"
w = WikiHelpBrowser(html, parent = self.win, caption = "Rosetta Sequence Design Results", size = 2)
w.show()
return
def checkErrorInStdOut(self, rosettaStdOut):
"""
check for error in Rosetta outputted pdb file
@param rosettaStdOut: rosetta outputted pdb file
@type rosettaStdOut: str
@return: 1 if there's an error and if not, then 0
"""
f = open(rosettaStdOut, 'r')
doc = f.read()
if doc.find("ERROR") == -1:
return 0
else:
return 1
def set_waitcursor(self, on_or_off):
"""
For on_or_off True, set the main window waitcursor.
For on_or_off False, revert to the prior cursor.
"""
if on_or_off:
QApplication.setOverrideCursor( QCursor(Qt.WaitCursor) )
else:
QApplication.restoreOverrideCursor() # Restore the cursor
return
| NanoCAD-master | cad/src/simulation/ROSETTA/runRosetta.py |
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details.
"""
rosetta_commandruns.py -- user-visible commands for running the rosetta simulator,
@author: Urmi
@version: $Id$
@copyright: 2008 Nanorex, Inc. See LICENSE file for details.
History:
Copied from sim_commandruns.py and then modified to suit rosetta simulation
"""
from utilities.debug import print_compact_traceback
from simulation.ROSETTA.RosettaSetup import RosettaSetup
from utilities.Log import redmsg, greenmsg, orangemsg
import foundation.env as env
from simulation.ROSETTA.runRosetta import RosettaRunner
from model.chunk import Chunk
def checkIfProteinChunkInPart(part):
"""
See if there is a protein among various chunks in NE-1 part
@param part: NE-1 part
@type part: L{Part}
"""
chunkList = []
def getAllChunks(node):
if isinstance(node, Chunk):
chunkList.append(node)
part.topnode.apply2all(getAllChunks)
for chunk in chunkList:
if chunk.isProteinChunk():
return True, chunk
return False, None
def writemovie(part,
args,
movie,
mflag = 0,
simaspect = None,
print_sim_warnings = False,
cmdname = "Rosetta Design",
cmd_type = 'Fixed_Backbone_Sequence_Design',
useRosetta = False,
background = False):
"""
Write an input file for the simulator, then run the simulator,
in order to create a moviefile (.dpb file), or an .xyz file containing all
frames(??), or an .xyz file containing what would have
been the moviefile's final frame. The name of the file it creates is found in
movie.filename
@param part: NE-1 part
@type part: L{Part}
@param args: argument list for rosetta simulation
@type args: list
@param movie: simulation object
@type movie: L{Movie}
@param simaspect: simulation aspect
@type simaspect:
@param cmdname: name of the command
@type cmdname: str
@param cmd_type: name of type of command
@type cmd_type: str
"""
simrun = RosettaRunner(part,
mflag,
simaspect = simaspect,
cmdname = cmdname,
cmd_type = cmd_type,
useRosetta = useRosetta,
background = background,
)
movie._simrun = simrun
simrun.run_rosetta(movie, args)
return simrun.errcode
class CommandRun:
"""
Class for single runs of commands.
Commands themselves (as opposed to single runs of them)
don't yet have objects to represent them in a first-class way,
but can be coded and invoked as subclasses of CommandRun.
"""
def __init__(self, win, *args, **kws):
"""
Constructor for CommandRun
"""
self.win = win
self.args = args
self.kws = kws
self.assy = win.assy
self.part = win.assy.part
self.glpane = win.assy.o
return
# end of class CommandRun
class rosettaSetup_CommandRun(CommandRun):
"""
Class for single runs of the rosetta setup command; create it
when the command is invoked, to prep to run the command once;
then call self.run() to actually run it.
"""
cmdname = 'Rosetta Design'
def run(self):
"""
Execute a rosetta simulation
"""
if not self.part.molecules: # Nothing in the part to simulate.
msg = redmsg("Nothing to simulate.")
env.history.message(self.cmdname + ": " + msg)
self.win.rosettaSetupAction.setChecked(0)
return
#check if at least one protein chunk is present on the NE-1 window,
#otherwise there's no point calling the simulator
proteinExists, chunk = checkIfProteinChunkInPart(self.part)
if not proteinExists:
msg = redmsg("No protein to simulate.")
env.history.message(self.cmdname + ": " + msg)
self.win.rosettaSetupAction.setChecked(0)
return
# iff it's the current mode.
previous_movie = self.assy.current_movie
self.movie = None
r = self.makeSimMovie( ) # will store self.movie as the one it made, or leave it as None if cancelled
self.win.rosettaSetupAction.setChecked(0)
return
def makeSimMovie(self):
"""
Make simulation movie or in other words execute rosetta simulation
"""
suffix = self.part.movie_suffix()
if suffix is None:
msg = redmsg( "Simulator is not yet implemented for clipboard items.")
env.history.message(self.cmdname + ": " + msg)
return -1
self.simcntl = RosettaSetup(self.win, self.part, suffix = suffix)
movie = self.simcntl.movie
#we are passing the type of rosetta simulation we intend to run as the second
#argument in the argument list
self.cmd_type = self.args[1]
r = writemovie(self.part, self.args, movie, print_sim_warnings = True,
cmdname = self.cmdname, cmd_type = self.cmd_type, useRosetta = True)
if not r:
# Movie file created.
movie.IsValid = True
self.movie = movie
return r | NanoCAD-master | cad/src/simulation/ROSETTA/rosetta_commandruns.py |
# Copyright 2008 Nanorex, Inc. See LICENSE file for details.
"""
RosettaSetup.py
Dialog for setting up to run the simulator.
@author: Urmi
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
Copied and then modified from SimSetup.py
"""
from simulation.movie import Movie
_stickyParams = None
class RosettaSetup:
"""
The "Run Dynamics" dialog class for setting up and launching a simulator run.
"""
def __init__(self, win, part, previous_movie = None, suffix = ""):
"""
use previous_movie (if passed) for default values,
otherwise use the same ones last ok'd by user
(whether or not that sim got aborted), or default values if that never happened in this session;
on success or failure, make a new Movie and store it as self.movie
"""
self.assy = part.assy # used only for assy.filename
self.suffix = suffix
self.previous_movie = previous_movie or _stickyParams or Movie(self.assy) # used only for its parameter settings
# note: as of bruce 060601 fixing bug 1840, previous_movie is no longer ever passed by caller.
self.movie = Movie(self.assy) # public attr used by client code after we return; always a Movie even on failure.
#Urmi 20080709: set movie filename to something for existing code to work
self.movie.filename = "RosettaDesignTest.xyz"
pass # end of class SimSetup
# end
| NanoCAD-master | cad/src/simulation/ROSETTA/RosettaSetup.py |
NanoCAD-master | cad/src/simulation/GROMACS/__init__.py |
|
# Copyright 2007 Nanorex, Inc. See LICENSE file for details.
"""
GROMACS.py - defines class GROMACS, for a temporary demo of
atomic-level-DNA GROMACS simulation
@author: Brian
@version: $Id$
@copyright: 2007 Nanorex, Inc. See LICENSE file for details.
Encapsulates the running of energy minimizations and molecular dynamics
simulations from NE1 using GROMACS and HK_Simulation (the visualization window
of HiveKeeper.
NOTE: THIS CODE IS DESIGNED JUST FOR THE FNANO 2007 DEMO, AND FOR PROTOTYPING,
IE, TO SEE HOW THE VARIOUS COMPONENTS WORK/BEHAVE. NONE OF IT WOULD SURVIVE TO
THE REAL SOLUTION.
Brian Helfrich 2007-03-31
"""
import os
from datetime import datetime
class GROMACS:
def __init__(self, part):
self.part = part
# Note: This GROMACS build doesn't work if there are any spaces in its
# path, so don't put any.
#
self.gmxHome = 'C:/11Nano/CVS-D/cad/plugins/GROMACS/'
# These are the GROMACS AMBER03 atom types for nucleotide residues in
# the order they are written out by the DNA generator. We can re-create
# the PDB-style structure with these, and it works if nothing is done
# to the structure of the DNA to change the order of the atoms.
#
self.adenineAtomTypes = ['P', 'O1P', 'O2P', 'O5\'', 'C5\'', 'C4\'',
'O4\'', 'C3\'', 'C2\'', 'C1\'', 'N9', 'C8', 'N7', 'C5', 'C6', 'N6',
'N1', 'C2', 'N3', 'C4', 'H5\'2', 'H5\'1', 'H4\'', 'H1\'', 'H2\'1',
'H2\'2', 'H3\'', 'H8', 'H61', 'H62', 'H2', 'O3\'']
self.cytosineAtomTypes = ['P', 'O1P', 'O2P', 'O5\'', 'C5\'', 'C4\'',
'O4\'', 'C3\'', 'C2\'', 'C1\'', 'N1', 'C2', 'O', 'N3', 'C4', 'N4',
'C5', 'C6', 'H5\'1', 'H5\'2', 'H4\'', 'H1\'', 'H2\'1', 'H2\'2',
'H3\'', 'H6', 'H5', 'H41', 'H42', 'O3\'']
self.guanineAtomTypes = ['P', 'O1P', 'O2P', 'O5\'', 'C5\'', 'C4\'',
'O4\'', 'C3\'', 'C2\'', 'C1\'', 'N9', 'C8', 'N7', 'C5', 'C6', 'O6',
'N1', 'C2', 'N2', 'N3', 'C4', 'H5\'1', 'H5\'2', 'H4\'', 'H1\'',
'H2\'1', 'H2\'2', 'H3\'', 'H8', 'H1', 'H21', 'H22', 'O3\'']
self.thymineAtomTypes = ['P', 'O1P', 'O2P', 'O5\'', 'C5\'', 'C4\'',
'O4\'', 'C3\'', 'C2\'', 'C1\'', 'N1', 'C2', 'O', 'N3', 'C4', 'O4',
'C5', 'C7', 'C6', 'H5\'1', 'H5\'2', 'H4\'', 'H1\'', 'H2\'1',
'H2\'2', 'H3\'', 'H6', 'H71', 'H72', 'H73', 'H3', 'O3\'']
# Pseudo-atom tables
#
self.pseudoAtomTypes = \
['??', 'Ax', 'Ae', 'Ss', 'Sj', 'Pl', 'Pe', 'Sh', 'Hp'] # BUG: element names have been changed since this code worked
self.pseudoAtomCharges = \
[0.0, 0.0, 0.0, 0.0, 0.0, -1.0, -2.0, 0.0, 0.0]
self.pseudoAtomMasses = \
[0.0, 100, 100, 100, 100, 100, 100, 100, 100]
self.atomKeyToIndexMap = {}
self.debug = False
return
def run(self, operation):
"""
Creates a temp directory, generates a structure files from the part,
pre-processes them with GROMACS tools, spawns the GROMACS simulation,
and spawns an HK_Simulation process to view it with.
operation - either "em" to perform an energy minimization, or "md" to
perform molecular dynamics simulation
"""
# Create a unique directory under the Nanorex/SimFiles directory for our
# files: Nanorex/SimFiles/GMX-<timestamp>
#
from platform_dependent.PlatformDependent import find_or_make_Nanorex_subdir
simFilesPath = find_or_make_Nanorex_subdir('SimFiles')
timestamp = datetime.today()
self.tempFilePath = \
os.path.join(simFilesPath,
"GMX-%s" % timestamp.strftime("%Y%m%d%H%M%S"))
os.mkdir(self.tempFilePath)
# Create the structure files from our part.
#
self.atomIndex = 1
self.residueIndex = 1
self.pdbFileHandle = 0
self.atomsFileHandle = 0
self.confFileHandle = 0
self.bondsFileHandle = 0
self.anglesFileHandle = 0
self.pseudoPass = 1
partType = self.writeStructure_Helper(self.part.topnode)
if partType == "pseudo":
# The first pass was to process and index atoms, now we have
# sufficient information to determine bonds and angles. It's
# probably possible to do everything in one pass.
#
if self.atomsFileHandle != 0:
self.atomsFileHandle.close()
self.pseudoPass = 2
self.residueIndex = 1
self.writeStructure_Helper(self.part.topnode)
if self.pdbFileHandle != 0:
self.pdbFileHandle.close()
if self.confFileHandle != 0:
self.confFileHandle.close()
if self.bondsFileHandle != 0:
self.bondsFileHandle.close()
if self.anglesFileHandle != 0:
self.anglesFileHandle.close()
script = ""
if partType == "pseudo":
# Combine the fragments of the topology into the topol.top file,
# tweak with GROMACS tools, and run the operation.
script = "pseudo_" + operation + ".bat"
else:
# Pre-process the .pdb file with the GROMACS tools and run the
# operation.
#
script = "atomic_" + operation + ".bat"
os.spawnl(os.P_NOWAIT, os.path.join(self.gmxHome, script),
os.path.join(self.gmxHome, script),
os.path.normpath(self.gmxHome),
'"' + os.path.normpath(self.tempFilePath) + '"')
return
def writeStructure_Helper(self, node):
partType = "pseudo"
if self.debug: print "node.name=%s" % node.name
if node.name[0:6] == "strand":
if self.debug: print "\t atomic helper"
self.writeAtomicPDB(node)
partType = "atomic"
else:
for childNode in node.members:
if childNode.is_group():
partType = self.writeStructure_Helper(childNode)
else:
if self.debug: print "\t p-atom write"
self.writePseudoAtomStructure(childNode)
return partType
def writePseudoAtomStructure(self, node):
if self.pseudoPass == 1:
# Process atoms
#
# Open the topol.top atoms fragment file if not already open
#
if self.atomsFileHandle == 0:
self.atomsFileHandle = \
open(os.path.join(self.tempFilePath, "atoms.frag"), "w")
self.atomsFileHandle.write("[ atoms ]\n")
self.atomsFileHandle.write("; atomId atomType residue# residue atom chargeGroup# charge mass\n")
for atom in node.atoms_in_mmp_file_order():
if atom.element.eltnum == 0:
continue
atomTypeIndex = self.getAtomTypeIndex(atom.element.eltnum)
self.atomsFileHandle.write("%8d%10s%10d BAS%6s%14d %8.3f%8.3f\n" % \
(self.atomIndex, self.pseudoAtomTypes[atomTypeIndex],
self.residueIndex, self.pseudoAtomTypes[atomTypeIndex],
self.atomIndex, self.pseudoAtomCharges[atomTypeIndex],
self.pseudoAtomMasses[atomTypeIndex]))
self.atomKeyToIndexMap[atom.key] = self.atomIndex
self.atomIndex += 1
self.residueIndex += 1
else:
# Process bonds, angles, and generate the conf.gro file
#
# Open the topol.top bonds fragment file if not already open
#
if self.bondsFileHandle == 0:
self.bondsFileHandle = \
open(os.path.join(self.tempFilePath, "bonds.frag"), "w")
self.bondsFileHandle.write("\n[ bonds ]\n")
self.bondsFileHandle.write("; ai aj function\n")
# Open the topol.top angles fragment file if not already open
#
if self.anglesFileHandle == 0:
self.anglesFileHandle = \
open(os.path.join(self.tempFilePath, "angles.frag"), "w")
self.anglesFileHandle.write("\n[ angles ]\n")
self.anglesFileHandle.write("; ai aj ak function\n")
# Open the conf.gro file if not already open
#
if self.confFileHandle == 0:
self.confFileHandle = \
open(os.path.join(self.tempFilePath, "conf.gro"), "w")
self.confFileHandle.write("DNA\n")
self.confFileHandle.write(" %d\n" % \
len(self.atomKeyToIndexMap))
for atom_1 in node.atoms_in_mmp_file_order():
if atom_1.element.eltnum == 0:
continue
# Emit conf.gro coordinates
#
atomTypeIndex = self.getAtomTypeIndex(atom_1.element.eltnum)
self.confFileHandle.write("%5d%-5s%5s%5d%8.3f%8.3f%8.3f\n" % \
(self.residueIndex, "BAS",
self.pseudoAtomTypes[atomTypeIndex],
self.atomKeyToIndexMap[atom_1.key],
atom_1.posn()[0]/10, atom_1.posn()[1]/10,
atom_1.posn()[2]/10))
# Emit bonds
#
atom_1_Index = self.atomKeyToIndexMap[atom_1.key]
if self.debug: print "atom [%s] %d" % (atom_1.key, atom_1_Index)
bondCount = 0
bondIndexes = []
for bond in atom_1.bonds:
atom_2 = bond.other(atom_1)
if self.debug: print "atom_2.key=%s" % atom_2.key
if atom_2.key not in self.atomKeyToIndexMap:
continue
atom_2_Index = self.atomKeyToIndexMap[atom_2.key]
if atom_2_Index > atom_1_Index:
self.bondsFileHandle.write("%6d%6d 1\n" % \
(atom_1_Index, atom_2_Index))
bondIndexes += [atom_2_Index]
bondCount += 1
# Emit angles
if bondCount > 1:
for index in range(1, bondCount):
self.anglesFileHandle.write("%6d%6d%6d 1\n" % \
(bondIndexes[index - 1], atom_1_Index,
bondIndexes[index]))
if bondCount > 2:
self.anglesFileHandle.write("%6d%6d%6d 1\n" % \
(bondIndexes[bondCount - 1], atom_1_Index,
bondIndexes[0]))
self.residueIndex += 1
def writePseudoAtomPDB___(self, node):
"""
This is dead code left here just in case pseudo-atom .pdb files need to
be generated.
"""
count_Ss = 1
count_Pl = 1
for atom in node.atoms_in_mmp_file_order():
if atom.element.eltnum == 0:
continue
coordinates = atom.posn()
coordinateFields = (coordinates[0], coordinates[1], coordinates[2])
self.filehandle.write("%-6s" % "ATOM")
self.filehandle.write("%5d" % self.atomIndex)
self.filehandle.write(" ")
if atom.element.eltnum == 200:
self.filehandle.write("Ax ")
elif atom.element.eltnum == 201:
self.filehandle.write("Ss%d " % count_Ss)
count_Ss += 1
elif atom.element.eltnum == 202:
self.filehandle.write("Pl%d " % count_Pl)
count_Pl += 1
self.filehandle.write("BAS ")
self.filehandle.write("%4d" % self.residueIndex)
self.filehandle.write(" ")
self.filehandle.write("%8.3f%8.3f%8.3f" % coordinateFields)
self.filehandle.write(" 1.00 0.00\n");
self.atomIndex += 1
self.residueIndex += 1
def writeAtomicPDB(self, node):
"""
Write down strand 1
- first nucleotide: residue name gets a "5", no (P, OP1, OP2)
- last nucleotide: residue name gets a "3"
Write up strand 2
- first (bottom) nucleotide: residue name gets a "5", no (P, OP1, OP2)
- last (top) nucleotide: residue name gets a "3"
"""
# Open the .pdb file if not already open
#
if self.pdbFileHandle == 0:
self.pdbFileHandle = \
open(os.path.join(self.tempFilePath, "dna.pdb"), "w")
# Need to write residues down strand 1 and up strand 2.
# Take note of the last nucleotide in each case.
#
nodeMembers = list(node.members) # Use a copy of the real list.
if len(nodeMembers) > 0:
lastNode = nodeMembers[len(nodeMembers) - 1]
if node.name == 'strand 2':
lastNode = nodeMembers[0]
nodeMembers.reverse()
nucleotideIndex = 1
for childNode in nodeMembers:
if self.debug:
print "node=%s nucleotideIndex=%d nucleotide=%s " % \
(node.name, nucleotideIndex, childNode.name),
if childNode == lastNode:
print "last",
print "\n"
atomTypeIndex = 0
for atom in childNode.atoms_in_mmp_file_order():
if atom.element.eltnum == 0:
continue
if (nucleotideIndex == 1) & (atomTypeIndex < 3):
atomTypeIndex += 1
continue # First nucleotide in a strand - no phosphate
coordinates = atom.posn()
coordinateFields = (coordinates[0], coordinates[1],
coordinates[2])
self.pdbFileHandle.write("%-6s" % "ATOM")
self.pdbFileHandle.write("%5d" % self.atomIndex)
self.pdbFileHandle.write(" ")
if childNode.name == 'adenine':
self.pdbFileHandle.write("%4s" %
self.adenineAtomTypes[atomTypeIndex])
self.pdbFileHandle.write(" DA")
elif childNode.name == 'cytosine':
self.pdbFileHandle.write("%4s" %
self.cytosineAtomTypes[atomTypeIndex])
self.pdbFileHandle.write(" DC")
elif childNode.name == 'guanine':
self.pdbFileHandle.write("%4s" %
self.guanineAtomTypes[atomTypeIndex])
self.pdbFileHandle.write(" DG")
elif childNode.name == 'thymine':
self.pdbFileHandle.write("%4s" %
self.thymineAtomTypes[atomTypeIndex])
self.pdbFileHandle.write(" DT")
# Handle strand ends
if nucleotideIndex == 1:
self.pdbFileHandle.write("5")
elif childNode == lastNode:
self.pdbFileHandle.write("3")
else:
self.pdbFileHandle.write(" ")
self.pdbFileHandle.write(" ")
self.pdbFileHandle.write("%4d" % self.residueIndex)
self.pdbFileHandle.write(" ")
self.pdbFileHandle.write("%8.3f%8.3f%8.3f" % coordinateFields)
self.pdbFileHandle.write(" 1.00 0.00\n");
atomTypeIndex += 1
self.atomIndex += 1
self.residueIndex += 1
nucleotideIndex += 1
return
def getAtomTypeIndex(self, elementNumber):
atomTypeIndex = 0 # ??
if elementNumber == 200: # Ax
atomTypeIndex = 1
elif elementNumber == 201: # Ss
atomTypeIndex = 3
elif elementNumber == 202: # Pl
atomTypeIndex = 5
elif elementNumber == 203: # Sj
atomTypeIndex = 4
elif elementNumber == 204: # Ae
atomTypeIndex = 2
elif elementNumber == 205: # Pe
atomTypeIndex = 6
elif elementNumber == 206: # Sh
atomTypeIndex = 7
elif elementNumber == 207: # Hp
atomTypeIndex = 8
return atomTypeIndex
| NanoCAD-master | cad/src/simulation/GROMACS/GROMACS.py |
# Copyright 2005-2009 Nanorex, Inc. See LICENSE file for details.
"""
changes.py - utilities for tracking changes, usage, nested events, etc.
@author: Bruce
@version: $Id$
@copyright: 2005-2009 Nanorex, Inc. See LICENSE file for details.
History:
original features were stubs and have mostly been removed.
bruce 050803 new features to help with graphics updates when
preferences are changed.
bruce 061022 soon some of this will be used in the new exprs module.
Later it will need optimization for that use.
...
bruce 071106 split changedicts.py out of changes.py
"""
from utilities.debug import print_compact_traceback, print_compact_stack
import foundation.env as env
from utilities.constants import noop
from utilities import debug_flags
from utilities.Comparison import same_vals #bruce 060306
# == Usage tracking.
_print_all_subs = False # can be set to True by debuggers or around bits of code being debugged
class OneTimeSubsList: #bruce 050804; as of 061022, looks ok for use in new exprs module (and being used there); doc revised 061119
"""
This object corresponds to (one momentary value of) some variable or aspect whose uses (as inputs to
other computations) can be tracked (causing a ref to this object to get added to a set of used things).
When the user of the value corresponding to this object finds this object in the list of all such things
it used during some computation, it can subscribe some function (eg an invalidator for the result of that
computation, which (result) might be another variable, or a side effect like the effect of drawing something)
to the event of that value becoming invalid. [See SubUsageTrackingMixin and usage_tracker_obj for how this is done.]
That subscription will be fulfilled (by self) at most once, ASAP after the corresponding value is known to be invalid.
Exceptions in fulfilling it might be debug-reported but will cause no harm to this object (since it's important
that they don't prevent this object from fulfilling its other subscriptions).
The value-user can also remove that subscription before it gets fulfilled
(or even after? not sure, esp re duplicate funcs provided).
Note: this object's implem seems general enough for any one-time-only subslist,
even though this docstring is only about its application to usage-tracking and inval-subscription.
[###e what to do about that? Is it really reusable??]
[###k Does it permit resubs while fulfilling them (as in changed_members)??]
[Does SelfUsageTrackingMixin make a new one each time it fulfills old one? Yes [verified 061119].]
"""
def __init__(self, debug_name = None):
self.debug_name = debug_name #061118
self._subs = {}
# map from id(func) to list of (zero or more identical elements) func (which are of course strongrefs to func).
# (We need the dict for efficient removal, and multiple copies of func in case duplicate funcs are provided
# (which is quite possible), but that doesn't need to be optimized for, so using a list of copies seems simplest.
# Implem note if we ever do this in C: could replace id(func) with hash(id(func)), i.e. not distinguish between
# equal funcs and funcs with same hashcode.)
def __repr__(self): #061118
return "<%s%s at %#x>" % (self.__class__.__name__, self.debug_name and ("(%s)" % self.debug_name) or '', id(self))
def subscribe(self, func):
try:
subs = self._subs
except AttributeError:
# our event already occurred (as indicated by fulfill_all removing this attribute).
# [not sure if this ever happens in initial uses of this class]
# (note: if subscribe could come before __init__, e.g. due to some sort of bug
# in which this obj got unpickled, this could also happen.)
if debug_flags.atom_debug:
#e Remove this debug print if this non-error happens routinely (and turns out not to reveal a bug).
# It never happened enough to notice until circa 061118 in exprs module; I don't yet know why it happens there,
# but it started after an LvalUnset exception was added and some exception-ignorers were changed to pass that on;
# it is also associated (but not always) with the same invals happening twice.
print_compact_stack( "atom_debug: fyi: %r's event already occurred, fulfilling new subs %r immediately: " % (self, func))
self._fulfill1(func)
else:
lis = subs.setdefault( id(func), [])
lis.append(func)
return #e return a unique "removal code" for this subs?? or None if we just fulfilled it now.
def fulfill_all(self, debug = False):
"""
Fulfill all our subscriptions now (and arrange to immediately
fulfill any subscriptions that come in later).
You must only call this once.
"""
subs = self._subs
del self._subs # (this would expose the class's definition of _subs, if there is one; presently there's not)
# Does this make it illegal to subscribe to this object ever again? No!
# It causes such a subs to be immediately fulfilled.
for sublis in subs.values(): #bruce 060405 precaution: itervalues -> values (didn't analyze whether needed)
for sub1 in sublis:
# note: fulfilling at most one elt might be acceptable if we redefined API to permit that
# (since all elts are identical),
# but it wouldn't much simplify the code, since the list length can legally be zero.
self._fulfill1(sub1, debug = debug)
pass
return
def _list_of_subs(self): #bruce 070109
"""
For debugging: return a newly made list of our subscriptions
(not removing duplicates), without changing or fulfilling them.
"""
res = []
subs = self._subs
for sublis in subs.itervalues():
res.extend(sublis)
return res
def remove_all_subs(self): #bruce 070109 experimental (for trying to fix a bug in exprs module), might become normal
"""
[private while experimental]
WARNING: I'm not sure makes sense except on an owning obj
since we are a 'one time' sublist
"""
try:
self._subs.clear() # does self._subs always exist when this is called? I think so but I'm not sure, so check for this.
except AttributeError:
print "no _subs in %r so nothing to clear in remove_all_subs" % (self,)
return
def _fulfill1(self, sub1, debug = False):
# note: the only use of self is in the debug msg.
try:
if debug or _print_all_subs:
print "%r: fulfilling sub1 %r" % (self, sub1)
sub1() #e would an arg of self be useful?
except:
# We have no choice but to ignore the exception, even if it's always a bug (as explained in docstring).
# The convention should be to make sure sub1 won't raise an exception (to make bugs more noticable),
# so this is always a likely bug, so we print it; but only when atom_debug, in case it might get printed
# a lot in some circumstances. [revised, see below]
if True or debug or debug_flags.atom_debug:
#bruce 070816 included True in that condition, to avoid silently discarding exceptions indicating real bugs.
print_compact_traceback("bug: exception in subs %r ignored by %r: " % (sub1, self) )
print_compact_stack(" note: here is where that exception occurred: ", skip_innermost_n = 1) #bruce 080917 revised
return
def remove_subs(self, func): # note: this has never been used as of long before 061022, and looks potentially unsafe (see below)
"""
Make sure (one subscribed instance of) func will never be fulfilled.
WARNING: calling this on a subs (a specific instance of func) that was already fulfilled is an UNDETECTED ERROR.
But it's ok to subscribe the same func eg 5 times, let 2 of those be fulfilled, and remove the other 3.
"""
# optimize by assuming it's there -- if not, various exceptions are possible.
# [Note, bruce 061022: this assumption looks like a bug, in light of the use of remove_all_instances.
# At least, it would be an error to use both of them on the same sublis within self,
# which means in practice that it would be hard to safely use both of them on the same OneTimeSubsList object.]
self._subs[id(func)].pop()
# (this can create a 0-length list which remains in the dict. seems ok provided self is not recycled.)
return
def remove_all_instances(self, func):
"""
#doc; legal even if no instances, but only if an instance
once existed (but this might not be checked for).
"""
try:
del self._subs[id(func)]
except KeyError:
pass # not sure this ever happens, but it's legal (if we call this multiple times on one func)
except AttributeError:
if 0 and debug_flags.atom_debug:
print "atom_debug: fyi: %r's event already occurred, in remove_all_instances( %r)" % (self, func)
pass # this happens routinely after fulfill_all removes self._subs,
# since our recipient is too lazy to only remove its other subs when one gets fulfilled --
# it just removes all the subs it had, including the one that got fulfilled.
except:
# this detected the following bug during development (subsequently fixed):
# bug: self._subs is None: exceptions.TypeError: object does not support item deletion
print_compact_traceback("bug: self._subs is %r: " % (self._subs,) )
return
pass # end of class OneTimeSubsList
# ==
class SelfUsageTrackingMixin: #bruce 050804; docstring revised 090212
"""
You can mix this into client classes which need to let all other code
track uses and changes (or invalidations) of their "main value".
What "main value" means is up to the client class. Usually it's the value
returned by accessing some "get method" (in the client class instance
itself, or if that class implements a "high-level slot", then in an instance
of the slot's client), but it can instead be an externally stored value
or implicit value, or even something like the side effects that a
certain method would perform, or the cumulative side effects that would
have occurred over all calls of a certain method if it was called again
now (e.g. a method to keep something up to date). In the more complex cases
of what value is being tracked, there is still usually a method that must
be called by external code to indicate a use of that value (and sometimes
to return a related quantity such as a change indicator), which can be
considered to be a kind of "get method" even though it doesn't actually
return the tracked value.
If a client class needs to permit tracking of more than one value or aspect,
it should use more than one instance of this class or a client class,
one for each tracked value. (Our subclass UsageTracker can be used as a
"stand-alone instance of this class".)
To accomplish the tracking, the client class instances must notice all
uses and changes of the "main value" they want to track, and call the
following methods whenever these occur:
* for uses, call self.track_use() (must be called on every use, not just
the first use after a change, in case the using entity is different);
* for changes or invalidations, call self.track_change() or
self.track_inval() (these are presently synonyms but might not always be).
For more info see the docstrings of the specific methods.
See also code comments warning about an issue in correctly using this
in conjunction with class Formula, and after client objects are killed.
"""
# note: as of 061022 this was used only in class Chunk and
# (via UsageTracker) in preferences.py; more uses were added later.
### REVIEW the status of these old comments, and clarify them:
# WARNING: The way Formula uses track_change only works when the call of
# track_change comes after the new value is available,
# but when using track_change to report an invalidation, that's not
# possible in general! ###@@@ this design flaw needs to be corrected
# somehow. See also comments near one of preferences.py's uses of
# track_change.
# note about whether to call track_change after something is "killed":
# If something is "killed" or in some other way becomes unusable,
# it only needs to call track_change if a use after that is theoretically
# possible (which is usually the case, since bugs in callers can cause
# that), and if a use after that would have a different effect because it
# had been killed. (I think it's always safe to call it then, and not even
# an anti-optimization, but I'm not 100% sure.) (Typically, other changes,
# which themselves call track_change(), occur when something is killed,
# so whether it's called directly doesn't matter anyway.)
def track_use(self):
"""
This must be called whenever the "get method" for the value we're
tracking is called, or more generally whenever that value is "used"
(see class docstring for background).
@note: This must be called on every use, not just the first use after
a change, in case the using entity is different, since all users
need to know about the next change or invalidation.
This works by telling cooperating users (clients of SubUsageTrackingMixin ###VERIFY)
what they used, so they can subscribe to invalidations or changes of all
values they used.
[some callers might inline this method]
"""
if getattr(self, '_changes__debug_print', False):
####REVIEW: can we give _changes__debug_print a default value
# as an optim? Note that since we're a mixin class, it would end up
# defined in arbitrary client classes, but its mangled-like name
# ought to make that ok. [bruce 090212 comment]
print_compact_stack( "\n_changes__debug_print: track_use of %r: " % self )
try:
subslist = self.__subslist
except AttributeError:
# note: this is the only way self.__subslist gets created;
# it happens on the first call of track_use (on self),
# and on the first call after each call of track_change/track_inval
debug_name = debug_flags.atom_debug and ("%r" % self) or None #061118; TODO: optimize
subslist = self.__subslist = OneTimeSubsList(debug_name)
# (more generally, some sort of unique name for self's current value era)
# (design note: Should we now return subslist.subscribe(func)? No --
# that's what the value-user should do *after* subslist gets entered
# here into its list of used objects, and value-user later finds it
# there.)
env.track( subslist)
###@@@ REVIEW:
# - is it right that the name of this function doesn't imply it's specific to usage-tracking?
# can it be used for tracking other things too?? [guess: no and no -- rename it ###@@@.]
# - (Wouldn't it be faster to save self, not self.__subslist, and then at the end to create and use the subslist?
# Yes, but it would be wrong, since one self has several subslists in succession as its value changes!
# [addendum 061022: i'm suspicious of that reasoning -- doesn't it happen too soon for the subslist to change??])
# - Maybe some callers can inline this method using essentially only a line like this:
# env.used_value_sublists[id(subslist)] = subslist
return
def track_change(self):
"""
This method (which implements both track_change and track_inval)
must be called whenever something changes or invalidates the value
self is tracking (see class docstring for background).
It tells everything which tracked a use of self's value that
self's value is no longer valid (unless this would be redundant
since it already told them this since they last subscribed).
(What the recipients do with that message is up to them,
but typically includes propogating further invalidations to
their own subscribers, directly marking their own value as invalid
(using another instance of this class or anything analogous to it),
and/or adding themselves to a set of objects that need later updates.)
Note that in typical use cases, there are several distinct
reasons to call this method -- all of them which apply must be done,
which typically means two or three kinds of calls of this method
can occur for each instance of self:
* the tracked value is explicitly set
* something used to compute the tracked value has changed (or been
invalidated), so the tracked value needs to be invalidated
(to ensure it will be recomputed when next needed).
(This differs from the tracked value changing by being set,
since we don't yet know the new value, or even (in general) whether
it's definitely different than the old value. But this class doesn't
yet behave differently in those cases -- in general, it's rare for
there to be a simple correct way to take advantage of knowing the new
value, except for comparing it to an old value and avoiding
propogating an invalidation if they're equivalent, but in all cases
where that can be done simply and correctly, it can be done by our
client before calling this method. [###todo: refer to external docs
which elaborate on this point, discussing why this comparison-optim
can't be propogated in general in any simple way.])
This kind of call has two subcases:
* done manually by the client, since it knows some value
used in that computation has changed (this is the only way to handle
it for values it uses which are not automatically tracked
by the system used by this class);
* done automatically, since something the client usage-tracked
(via the system used by this class)
while last recomputing that value has changed -- this case
is called "propogating an invalidation" (propogating it
from the other value usage-tracked by our client during its
recomputation, to the value directly tracked by our client).
"""
### REVIEW: rename to track_inval?? see class docstring and other
# comments for discussion
# review: add args? e.g. "why" (for debugging), nature of change
# (for optims), etc...
debug = getattr(self, '_changes__debug_print', False)
if debug:
print_compact_stack( "\n_changes__debug_print: track_change of %r: " % self )
try:
subslist = self.__subslist
except AttributeError:
# this is common [TODO: optimize by replacing with boolean test]
return # optimization (there were no uses of the current value, or,
# they were already invalidated)
del self.__subslist
subslist.fulfill_all(debug = debug)
return
track_inval = track_change
#bruce 061022 added track_inval to API, though I don't know if it ever
# needs to have a different implem. ### REVIEW: does each client use the
# appropriate one of track_inval vs. track_change?
pass # end of class SelfUsageTrackingMixin
# ==
class UsageTracker( SelfUsageTrackingMixin): #bruce 050804 #e rename?
"""
Ownable version of SelfUsageTrackingMixin, for owners that have
more than one aspect whose usage can be tracked, or which want to use
a cooperating class rather than a mixin class for clarity.
"""
# note: as of 061022 this is used only in _tracker_for_pkey (preferences.py);
# this or its superclass might soon be used in exprs/lvals.py
pass
# ===
class begin_end_matcher: #bruce 050804
"""
Maintain a stack of objects (whose constructor is a parameter of this object)
which are created/pushed and popped by calls of our begin/end methods, which must occur in
properly nested matching pairs. Try to detect and handle the error of nonmatching begin/end calls.
(This only works if the constructor promises to return unique objects, since we use their id()
to detect matching.)
"""
active = False # flag to warn outside callers that we're processing a perhaps-nonreentrant method [bruce 050909]
def __init__(self, constructor, stack_changed_func = None):
"""
Constructor is given all args (including keyword args) to self.begin(),
and from them should construct objects with begin(), end(), and error(text) methods
which get stored on self.stack while they're active.
Or constructor can be None, which means begin should always receive one arg, which is the object to use.
Stack_changed_func (or noop if that is None or not supplied)
is called when we're created and after every change to our stack,
with arg1 self, and arg2 a dict of info (keyword:value pairs) about the nature of the stack change
(see dict() calls herein for its keywords, including popped, pushed, errmsg).
"""
self.constructor = constructor or (lambda arg: arg)
self.stack_changed = stack_changed_func or noop
self.stack = []
self.active_apply( self.stack_changed, (self, dict()) ) # self.stack_changed(self, dict())
# callers depend on stack_changed being called when we're first created
#k is active_apply needed here??
def active_apply(self, func, args, kws = {}):
self.active = True
try:
return apply(func, args, kws)
finally:
self.active = False
def begin(self, *args, **kws):
"""
Construct a new object using our constructor;
activate it by calling its .begin() method [#k needed??] [before it's pushed onto the stack];
push it on self.stack (and inform observers that self.stack was modified);
return a currently-unique match_checking_code which must be passed to the matching self.end() call.
"""
self.active = True
try:
newobj = self.constructor(*args, **kws) # exceptions in this mean no change to our stack, which is fine
newobj.begin()
self.stack.append(newobj)
self.stack_changed(self, dict(pushed = newobj))
# for debugging, we could record compact_stack in newobj, and print that on error...
# but newobj __init__ can do this if it wants to, without any help from this code.
return id(newobj) # match_checking_code
finally:
self.active = False
def end(self, match_checking_code):
"""
This must be passed the return value from the matching self.begin() call.
Verify begin/end matches, pop and deactivate (using .end(), after it's popped) the matching object, return it.
Error handling:
- Objects which had begin but no end recieve .error(errmsg_text) before their .end().
- Ends with no matching begin (presumably a much rarer mistake -- only likely if some code
with begin/end in a loop reuses a localvar holding a match_checking_code from an outer begin/end)
just print a debug message and return None.
"""
stack = self.stack
if stack and id(stack[-1]) == match_checking_code:
# usual case, no error, be fast
doneobj = stack.pop()
self.active = True
try:
self.stack_changed(self, dict(popped = doneobj))
doneobj.end() # finalize doneobj
finally:
self.active = False
return doneobj
# some kind of error
ids = map( id, stack )
if match_checking_code in ids:
# some subroutines did begin and no end, but the present end has a begin -- we can recover from this error
doneobj = stack.pop()
while id(doneobj) != match_checking_code:
self.active = True
try:
errmsg = "begin, with no matching end by the time some outer begin got its matching end"
self.stack_changed(self, dict(popped = doneobj, errmsg = errmsg))
# stack_change is needed here in case doneobj.end() looks at something this updates from self.stack
doneobj.error(errmsg)
doneobj.end() # might be needed -- in fact, it might look at stack and send messages to its top
finally:
self.active = False
doneobj = stack.pop()
# now doneobj is correct
self.active = True
try:
self.stack_changed(self, dict(popped = doneobj))
doneobj.end()
finally:
self.active = False
return doneobj
# otherwise the error is that this end doesn't match any begin, so we can't safely pop anything from the stack
print_compact_stack( "bug, ignored for now: end(%r) with no matching begin, in %r: " % (match_checking_code, self) )
return None # might cause further errors as caller tries to use the returned object
pass # end of class begin_end_matcher
# ==
def default_track(thing): #bruce 050804; see also the default definition of track in env module
"""
Default implementation -- will be replaced at runtime whenever usage of certain things is being tracked.
"""
## if debug_flags.atom_debug:
## print "atom_debug: fyi (from changes module): something asked to be tracked, but nothing is tracking: ", thing
## # if this happens and is not an error, then we'll zap the message.
return
def _usage_tracker_stack_changed( usage_tracker, infodict): #bruce 050804
"""
[private]
called when usage_tracker's begin/end stack is created,
and after every time it changes
"""
# getting usage_tracker arg is necessary (and makes better sense anyway),
# since when we're first called, the global usage_tracker is not yet defined
# (since its rhs, in the assignment below, is still being evaluated).
del infodict
stack = usage_tracker.stack
if stack:
env.track = stack[-1].track
else:
env.track = default_track
return
usage_tracker = begin_end_matcher( None, _usage_tracker_stack_changed )
# constructor None means obj must be passed to each begin
usage_tracker._do_after_current_tracked_usage_ends = {} #070108
def after_current_tracked_usage_ends(func):# new feature 070108 [tested and works, but not currently used]
"""
Do func at least once after the current usage-tracked computation ends.
WARNING: if func invalidates something used during that computation, it will cause
an immediate inval of the entire computation, which in most cases would lead to
excessive useless recomputation. For an example of incorrect usage of this kind,
see Lval_which_recomputes_every_time in exprs/lvals.py (deprecated).
(There may be legitimate uses which do something other than a direct inval,
such as making a note to compare something's current and new value at some later time,
for possible inval just before it's next used in a tracked computation.
So this function itself is not deprecated. But it's been tested only in the deprecated
Lval_which_recomputes_every_time.)
"""
assert usage_tracker.stack, "error: there is no current usage-tracked computation"
# we use a dict to optim for lots of equal funcs (reducing RAM usage, at least)
# (note that in the case where this comes up, Lval_which_recomputes_every_time in exprs/lvals.py,
# they might be equivalent but non-identical bound methods)
usage_tracker._do_after_current_tracked_usage_ends[ func] = func
return
class SubUsageTrackingMixin: #bruce 050804; as of 061022 this is used only in class Chunk, class GLPane, class Formula
# [note, 060926: this doesn't use self at all [later: except for debug messages].
# Does it need to be a mixin?? addendum 061022: maybe for inval propogation??]
"""
###doc - for doing usagetracking in whatever code we call when we
remake a value, and handling results of that;
see class usage_tracker_obj for a related docstring
@note: this is often used in conjunction with SelfUsageTrackingMixin,
in which case (for a client class which inherits this class and
that one together) the invalidator passed to self.end_tracking_usage
can often be self.track_inval or a client method that calls it.
"""
def begin_tracking_usage(self): #e there's a docstring for this in an outtakes file, if one is needed
debug_name = debug_flags.atom_debug and ("%r" % self) or None #061118
obj = usage_tracker_obj(debug_name)
match_checking_code = usage_tracker.begin( obj)
# don't store match_checking_code in self -- that would defeat the error-checking
# for bugs in how self's other code matches up these begin and end methods
# [later, 060926: would it be good to combine id(self) into the match-checking code,
# thereby enforcing that the same object calls the matching begin & end methods?
# I doubt it's needed, since the match_checking_code is already unique,
# and it's hard to see how the correct one could be passed by accident
# (except by code written to use some bad kluge, which might need that kluge).
# And it might be legitimate. So don't add a restriction like that.]
return match_checking_code
def end_tracking_usage(self, match_checking_code, invalidator, debug = False):
"""
#doc;
returns the usage_tracker_obj
"""
# new feature 070109, mainly for debugging-related uses
obj = usage_tracker.end( match_checking_code)
obj.standard_end( invalidator, debug = debug)
##e or we could pass our own invalidator which wraps that one,
# so it can destroy us, and/or do more invals, like one inside the other mixin class if that exists
# support after_current_tracked_usage_ends [070108]
if not usage_tracker.stack:
dict1 = usage_tracker._do_after_current_tracked_usage_ends
if dict1: # condition is optim
usage_tracker._do_after_current_tracked_usage_ends = {}
for func in dict1.itervalues():
try:
func()
except:
print_compact_traceback(
"after_current_tracked_usage_ends: error: exception in call of %r (ignored): " % (func,))
pass
continue
pass
pass
return obj
pass # end of class SubUsageTrackingMixin
class usage_tracker_obj: #bruce 050804; docstring added 060927
"""
###doc [private to SubUsageTrackingMixin, mostly]
This object corresponds to one formula being evaluated,
or to one occurrence of some other action which needs to know what it uses
during a defined period of time while it calculates something.
At the start of that action, the client should create this object and make it accessible within env
so that when a trackable thing is used, its inval-subslist is passed to self.track (self being this object),
which will store it in self.
[This can be done by calling SubUsageTrackingMixin.begin_tracking_usage().]
At the end of that action, the client should make this object inaccessible
(restoring whatever tracker was active previously, if any),
then call self.standard_end(invalidator) with a function it wants called
the next time one of the things it used becomes invalid.
[This can be done by calling SubUsageTrackingMixin.end_tracking_usage() with appropriate args.]
Self.standard_end will store invalidator in self, and subscribe self.standard_inval to the invalidation
of each trackable thing that was used.
This usage_tracker_obj lives on as a record of the set of those subscriptions, and as the recipient
of invalidation-signals from them (via self.standard_inval), which removes the other subscriptions
before calling invalidator. (Being able to remove them is the only reason this object needs to live on,
or that the inval signal needs to be something other than invalidator itself. Perhaps this could be fixed
in the future, using weak pointers somehow. #e)
"""
# [note, 060926: for some purposes, namely optim of recompute when inputs don't change [#doc - need ref to explanation of why],
# we might need to record the order of first seeing the used things,
# and perhaps their values or value-version-counters ###e]
# 061022 review re possible use in exprs/lvals.py:
# - a needed decision is whether inval propogation is the responsibility of each invalidator, or this general system.
# - it seems like SubUsageTrackingMixin might need self to tie it to SelfUsageTrackingMixin for inval propogation. (guess)
# - which might mean we need variants of that, perhaps varying in this class, usage_tracker_obj:
# - a variant to propogate invals;
# - a variant to track in order (see below);
# - a variant to immediately signal an error (by raising an exception) if any usage gets tracked; used to assert none does.
# - as mentioned above, one variant will need to keep an order of addition of new items, in self.track.
# - and self.track is called extremely often and eventually needs to be optimized (and perhaps recoded in C).
# Other than that, it can probably be used directly, with the invalidator (from client code) responsible for inval propogation.
def __init__(self, debug_name = None):
self.debug_name = debug_name #061118
def __repr__(self): #061118
return "<%s%s at %#x>" % (self.__class__.__name__, self.debug_name and ("(%s)" % self.debug_name) or '', id(self))
def begin(self):
self.data = {}
def track(self, subslist):
"""
This gets called (via env.track) by everything that wants to record one use of its value.
The argument subslist is a subscription-list object which permits subscribing to future changes
(or invalidations) to the value whose use now is being tracked.
[This method, self.track, gets installed as env.track, which is called often, so it needs to be fast.]
"""
# For now, just store subslist, at most one copy. Later we'll subscribe just once to each one stored.
self.data[id(subslist)] = subslist
def error(self, text):
print "bug: error in usage_tracker_obj:", text #e stub
def end(self):
pass # let the caller think about self.data.values() (eg filter or compress them) before subscribing to them
def standard_end(self, invalidator, debug = False):
"""
some callers will find this useful to call, shortly after
self.end gets called; see the class docstring for more info
"""
self.invalidator = invalidator # this will be called only by our own standard_inval
whatweused = self.whatweused = self.data.values() # this list is saved for use in other methods called later
self.last_sub_invalidator = inval = self.standard_inval
# save the exact copy of this bound method object which we use now, so comparison with 'is' will work for unsubscribes
# (this might not be needed if they compare using '==' [###k find out])
# note: that's a self-referential value, which would cause a memory leak, except that it gets deleted
# in standard_inval (so it's ok). [060927 comment]
if debug:
print "changes debug: %r.standard_end subscribing %r to each of %r" % (self, inval, whatweused) #bruce 070816
for subslist in whatweused:
subslist.subscribe( inval ) #e could save retvals to help with unsub, if we wanted
self.data = 222 # make further calls of self.track() illegal [#e do this in self.end()??]
return
def standard_inval(self):
"""
This is used to receive the invalidation signal,
and call self.invalidator after some bookkeeping.
See class docstring for more info.
It also removes all cyclic or large attrs of self,
to prevent memory leaks.
"""
already = self.unsubscribe_to_invals('standard_inval')
if already:
pass # error message was already printed
else:
invalidator = self.invalidator
del self.invalidator
invalidator()
# calling this twice might cause a bug if "something called standard_inval twice",
# depending on the client object which receives it,
# so we don't, and the error message above should be retained [070110 comment]
return
def unsubscribe_to_invals(self, why): #070110 split this out and revised caller (leaving it equivalent) and added a caller
"""
if we already did this, print an error message mentioning why
we did it before and return 1, else do it and return 0
"""
# this needs to remove every subs except the one which is being fulfilled by calling it.
# But it doesn't know which subs that is! So it has to remove them all, even that one,
# so it has to call a remove method which is ok to call even for a subs that was already fulfilled.
# The only way for that method to always work ok is for its semantics to be that it removes all current subs (0 or more)
# which have the same fulfillment function (like subslist.remove_all_instances does).
# That is only ok since our subs (self.standard_inval) is unique to this object, and this object
# makes sure to give only one copy of it to one thing. (Another reason it could be ok is if it doesn't
# matter how many times self.invalidator is called, once it's called once. This is true in all current uses [061119]
# but perhaps not guaranteed.)
inval = self.last_sub_invalidator
self.last_sub_invalidator = 'hmm' # this value is also tested in another method
# 061119 do this rather than using del, to avoid exception when we're called twice; either way avoids a memory leak
self.last_sub_invalidator_why = why
if inval == 'hmm':
# we already did this
if self.last_sub_invalidator_why == 'standard_inval' and why == 'standard_inval':
# Something called standard_inval twice. This can happen (for reasons not yet clear to me) when some tracked state
# is set and later used, all during the same computation -- normally, state used by a computation should never be set
# during it, only before it (so as to trigger it, if the same computation used the same state last time around).
# Maybe the problem is that sets of default values, within "initialization on demand", should be considered pure uses
# but are being considered sets? Not sure yet -- this debug code only shows me a later event. ###k [061121 comment]
# ... update 061207: this is now happening all the time when I drag rects using exprs.test.testexpr_19c,
# so until it's debugged I need to lower the verbosity, so I'm putting it under control of flags I can set from other code.
if _debug_standard_inval_twice: ## was debug_flags.atom_debug:
msg = "debug: fyi: something called standard_inval twice (not illegal but weird -- bug hint?) in %r" % self
if _debug_standard_inval_twice_stack:
print_compact_stack(msg + ": ",
## frame_repr = _std_frame_repr, #bruce 061120, might or might not be temporary, not normally seen
linesep = '\n')
else:
print msg
else:
print "likely bug: %r unsubscribe_to_invals twice, whys are %r and %r" % (self, self.last_sub_invalidator_why, why)
#e print stack?
return 1
elif _debug_standard_inval_nottwice_stack:
print_compact_stack("debug: fyi: something called standard_inval once (totally normal) in %r: " % self)
# the actual unsubscribe:
whatweused = self.whatweused
self.whatweused = 444 # not a sequence (cause bug if we call this again)
for subslist in whatweused:
## can't use subslist.remove( self.last_sub_invalidator ), as explained above
subslist.remove_all_instances( inval )
####e should add debug code to make sure this actually removes some, except in at most one call;
# in fact, I need code to somehow verify they get removed by comparing lengths
# since a bug in removing them properly at all is a possibility [070110 comment]
return 0
def got_inval(self):#070110
"""
Did we get an inval yet (or make them illegal already)?
(if we did, we no longer subscribe to any others, in theory --
known to be not always true in practice)
"""
return (self.last_sub_invalidator == 'hmm')
def make_invals_illegal(self, obj_for_errmsgs = None):#070110, experimental -- clients ought to call it before another begin_tracking_usage... ##e
# if not self.got_inval():
# if it still subscribes to any invals:
# + remove those subs
# + in case that fails, arrange to complain if they are ever fulfilled,
# or better yet, prevent them from being fulfilled, by changing the invalidator
# - print a message (if atom_debug) since it might indicate a bug (or did, anyway, before we removed the subs)
# else:
# - it supposedly doesn't subscribe to any invals, but what if it gets one anyway?
# tell it to not call our invalidator in that case (change it?);
# no need to print a message unless it would have called it
if not self.got_inval():
if 1: ## self.whatweused:
if _debug_old_invalsubs:
# not necessarily a bug if they would never arrive -- I don't really know yet --
# it might (in fact it does routinely) happen in GLPane for reasons I'm not sure about...
# I should find out if it happens in other subusagetracking objects. ###e
# What I do know -- it's large and normal (for glpane) as if inval unsubs never happened; it's never 0;
# but the zapping fixes my continuous redraw bug in exprs module!
# [bruce 070110; more info and cleanup to follow when situation is better understood --
# could it be a bug in the removal of subs which makes it never happen at all?!? ###k]
print "fyi: %r still has %d subs of its invalidator to things it used; zapping/disabling them" % \
(obj_for_errmsgs or self, len(self.whatweused))
already = self.unsubscribe_to_invals('standard_inval')
if already:
# error message was already printed -- but, I don't think this can ever happen, so print *that*
print "(should never happen in make_invals_illegal)"
else:
# if an inval comes (to self.standard_inval), we'll find out, since it'll complain (I think)
pass
return
pass # end of class usage_tracker_obj
_debug_old_invalsubs = False #070110
_debug_standard_inval_twice = debug_flags.atom_debug # whether to warn about this at all
_debug_standard_inval_twice_stack = False # whether to print_compact_stack in that warning [untested since revised by bruce 061207]
_debug_standard_inval_nottwice_stack = False # whether to print_compact_stack in an inval that *doesn't* give that warning [untested]
# ==
class begin_disallowing_usage_tracking(SubUsageTrackingMixin):
"""
Publicly, this class is just a helper function, used like this:
mc = begin_disallowing_usage_tracking(whosays) # arg is for use in debug prints and exception text
try:
... do stuff in which usage tracking would be an error or indicate a bug
finally:
end_disallowing_usage_tracking(mc)
pass
"""
def __init__(self, whosays, noprint = False):
self.whosays = "%r" % (whosays,) #k %r??
self.noprint = noprint
self.mc = self.begin_tracking_usage()
###e SHOULD do that in a nonstandard way so we notice each usage that occurs and complain, w/ exception;
#e and we should not actually subscribe
def _end_disallowing_usage_tracking(self):
self.end_tracking_usage(self.mc, self.inval) # this shouldn't subscribe us to anything, once implem is finished properly
# now warn if we actually subscribed to anything -- done in our overridden version of that method
# someday, self.destroy(), but for now [until implem is done], stick around for things to call our inval.
return
def inval(self):
#e don't use noprint, since this implies a bug in self, tho for now, that's a known bug, always there, til implem is done
print "bug (some time ago): something that %r should not have subscribed to (but did - also a bug) has changed" % self
def __repr__(self):
return "<%s at %#x for %r>" % (self.__class__.__name__, id(self), self.whosays)
def end_tracking_usage(self, match_checking_code, invalidator):
"""
[THIS OVERRIDES the method from SubUsageTrackingMixin]
"""
obj = usage_tracker.end( match_checking_code) # same as in mixin
obj.standard_end( invalidator) # same as in mixin, but we'll remove this later, so don't just call the mixin version here
if obj.whatweused: # a private attr of obj (a usage_tracker_obj), that we happen to know about, being a friend of it
msg = "begin_disallowing_usage_tracking for %s sees some things were used: %r" % (self.whosays, obj.whatweused,)
if not self.noprint:
print msg
assert 0, msg ##e should be a private exception so clients can catch it specifically; until then, noprint is not useful
return obj #070110 be compatible with new superclass API
pass
def end_disallowing_usage_tracking(mc):
mc._end_disallowing_usage_tracking()
return
# ==
def _std_frame_repr(frame): #bruce 061120 #e refile into debug.py? warning: dup code with lvals.py and [g4?] changes.py
"""
return a string for use in print_compact_stack
"""
# older eg: frame_repr = lambda frame: " %s" % (frame.f_locals.keys(),), linesep = '\n'
locals = frame.f_locals
dfr = locals.get('__debug_frame_repr__')
if dfr:
return ' ' + dfr(locals)
co_name = frame.f_code.co_name
res = ' ' + co_name
self = locals.get('self')
if self is not None:
res +=', self = %r' % (self,)
return res
# locals.keys() ##e sorted? limit to 25? include funcname of code object? (f_name?)
# note: an example of dir(frame) is:
['__class__', '__delattr__', '__doc__', '__getattribute__', '__hash__',
'__init__', '__new__', '__reduce__', '__reduce_ex__', '__repr__',
'__setattr__', '__str__', 'f_back', 'f_builtins', 'f_code',
'f_exc_traceback', 'f_exc_type', 'f_exc_value', 'f_globals', 'f_lasti',
'f_lineno', 'f_locals', 'f_restricted', 'f_trace']
# and of dir(frame.f_code) is:
['__class__', '__cmp__', '__delattr__', '__doc__', '__getattribute__',
'__hash__', '__init__', '__new__', '__reduce__', '__reduce_ex__',
'__repr__', '__setattr__', '__str__', 'co_argcount', 'co_cellvars',
'co_code', 'co_consts', 'co_filename', 'co_firstlineno', 'co_flags',
'co_freevars', 'co_lnotab', 'co_name', 'co_names', 'co_nlocals',
'co_stacksize', 'co_varnames']
# ==
class Formula( SubUsageTrackingMixin): #bruce 050805 [not related to class Expr in exprs/Exprs.py, informally called formulae]
"""
"""
killed = False
def __init__( self, value_lambda, action = None, not_when_value_same = False, debug = False ):
"""
Create a formula which tracks changes to the value of value_lambda (by tracking what's used to recompute it),
and when created and after each change occurs, calls action with the new value_lambda return value as sole argument.
This only works if whatever value_lambda uses, which might change and whose changes should trigger recomputation,
uses SelfUsageTrackingMixin or the equivalent to track its usage and changes, AND (not always true!) if those things
only report track_change after the new value is available, not just when the old value is known to be invalid.
[About that issue, see comments in docstring of class which defines track_change. ####@@@@]
If action is not supplied, it's a noop.
Whether or not it's supplied, it can be changed later using self.set_action.
Note that anything whose usage is tracked by calling value_lambda can cause action to be called,
even if that thing does not contribute to the return value from value_lambda.
If this bothers you, consider passing not_when_value_same = True, so that repeated calls
only occur when the return value is not equal to what it was before.
(The old return value is not even kept in this object unless not_when_value_same is true.)
[WARNING: the code for the not_when_value_same = True option is untested as of 060306, since nothing uses it yet.]
"""
self.debug = debug #bruce 070816
self.value_lambda = value_lambda
self.set_action( action)
self.not_when_value_same = not_when_value_same
self.first_time = True # prevents looking for self.oldval
self.recompute()
return
def destroy(self):
self.killed = True
self.value_lambda = self.action = self.oldval = None
#e anything else?
return
def set_action(self, action):
if action is None:
action = noop
self.action = action
def recompute(self):
debug = self.debug
print_errors = True or debug or debug_flags.atom_debug
#bruce 070816 included True in that condition, since prior code could
# silently discard exceptions which indicated real bugs.
if debug:
print "\n_changes__debug_print: %r.recompute() calling %r" % \
( self, self.value_lambda )
error = False
match_checking_code = self.begin_tracking_usage()
try:
newval = self.value_lambda()
except:
error = True
newval = None
if print_errors:
print_compact_traceback( "bug: exception in %r value_lambda %r: " % (self, self.value_lambda) )
self.end_tracking_usage( match_checking_code, self.need_to_recompute, debug = debug )
if not error and (not self.not_when_value_same or self.first_time or not self.values_same( self.oldval, newval) ):
# do the action
try:
self.action( newval)
except:
error = True
if print_errors:
print_compact_traceback( "bug: exception in %r action %r: " % (self, self.action) )
self.first_time = False
if not error and self.not_when_value_same:
# save self.oldval for the comparison we might need to do next time
self.oldval = newval
if error:
self.destroy()
if print_errors:
print_compact_stack( "note: destroyed %r due to bug reported above: " % self )
return
def need_to_recompute(self):
if not self.killed:
self.recompute()
return
def values_same(self, val1, val2): #bruce 060306; untested, since self.not_when_value_same is apparently never True ###@@@
"""
Determine whether two values are the same, for purposes of the option 'not_when_value_same'.
Override this in a subclass that needs a different value comparison, such as 'not !='
(or change the code to let the caller pass a comparison function).
WARNING: The obvious naive comparison (a == b) is buggy for Numeric arrays,
and the fix for that, (not (a != b)), is buggy for Python container classes
(like list or tuple) containing Numeric arrays. The current implem uses a slower and stricter comparison,
state_utils.same_vals, which might be too strict for some purposes.
"""
return same_vals(val1, val2)
pass # end of class Formula
# ==
# As of 050803, many of the facilities after this point are used only as stubs,
# though some might be used soon, e.g. as part of recent files menu,
# custom jigs, Undo, or other things.
# [as of 060330 pairmatcher is used in Undo; maybe keep_forever is used somewhere;
# a lot of Undo helpers occur below too]
class pairmatcher:
"""
Keep two forever-growing lists,
and call a specified function on each pair of items in these lists,
in a deterministic order,
as this becomes possible due to the lists growing.
Normally that specified function should return None;
otherwise it should return special codes which
control this object's behavior (see the code for details).
"""
def __init__(self, func, typearg, debug_name = None):
self.d1s = []
self.d2s = []
self.func = func #e will this func ever need to be changed, after we're made?
self.typearg = typearg # public, thus visible to each call of func, since we now pass self [bruce 060330 new feature]
self.debug_name = debug_name or str(typearg) or "?" #revised 060330
def another_dim2(self, d2):
## print self,'getsg5h6 d2',d2
for d1 in self.d1s:
self.doit(d1,d2) # doit for existing d1s
self.d2s.append(d2) # and for future d1s
def another_dim1(self, d1):
## print self,'getsg5h6 d1',d1
for d2 in self.d2s:
self.doit(d1,d2) # doit for existing d2s
self.d1s.append(d1) # and for future d2s
def doit(self, d1, d2):
try:
retcode = self.func(d1,d2,self) #bruce 060330 new feature -- pass self [##k should we catch exception in func??]
if retcode:
if retcode == "remove d1":
# note: we might or might not be iterating over d1s right now!
# if we are iterating over d2, we might have already removed d1!
# we should probably stop that loop in that case, but that feature is nim.
# at least don't mind if we did already remove it...
# [060330 temp kluge -- really we ought to stop the loop ###@@@]
self.d1s = list(self.d1s) # in case we are, modify a fresh copy
try:
self.d1s.remove(d1)
except ValueError:
pass
elif retcode == "remove d2":
self.d2s = list(self.d2s)
try:
self.d2s.remove(d2)
except ValueError:
pass
else:
print_compact_stack( "bug (ignored): unrecognized return code %r in pairmatcher %r: " % \
(retcode, self.debug_name))
#e any other use for retval??
except:
print_compact_traceback( "exception in %r ignored: " % self)#revised 060330
return
def __repr__(self): #060330, untested
return "<%s at %#x, debug_name = %r>" % (self.__class__.__name__, id(self), self.debug_name)
pass
class MakerDict:
"""
A read-only dict with a function for constructing missing elements.
"""
def __init__( self, func):
self.data = {}
self.func = func
def __getitem__(self, key): # (no need for __setitem__ or other dict methods)
try:
return self.data[key]
except KeyError:
self.data[key] = self.func(key)
return self.data[key]
pass
# A place for objects of one kind to register themselves under some name,
# so that objects of another kind can meet all of them using a pairmatcher (#doc better?)
def postinit_func( d1, d2, matcher): #bruce 060330 add matcher arg
"""
After d1 is inited, tell it about d2.
(This is meant to be called for every d1 of one kind,
and every d2 of another kind,
registered below under the same name.)
"""
if 1:
#bruce 060330 kluge, but might be reasonable -- let method name be encoded in typename for type of postinit object
#e (if speed of this ever matters, we might memoize the following in an our-private attr of matcher)
typename = matcher.typearg
if typename.startswith('_'):
###e should assert it's legit as attrname?
methodname = typename
else:
methodname = 'postinit_item' # compatibility with old code, still used as of 060330 for "Jig menu items" or so
try:
method = getattr(d1, methodname)
method(d2)
except:
# blame d1, for error in either statement above
print_compact_traceback( "exception in calling (or finding method for) d1.%s(d2) ignored; removing d1: " % (methodname,))
#e objnames? safe_repr(obj)?
return "remove d1" # special code recognized by the pairmatcher
postinit_pairmatchers = MakerDict( lambda typename: pairmatcher( postinit_func, typename ) )
# the public functions:
# (for main window Jig menu items we'll use the typename "Jigs menu items" since maybe other things will have Jigs menus)
#e [later, 060330: this API suffers from the methodname postinit_item being fixed, rather than depending on typename,
# which matters if this is used for lots of purposes and the same object might participate in more than one purpose.]
def register_postinit_object( typename, object):
"""
Cause object to receive the method-call object.postinit_item(item)
for every postinit item registered under the same typename,
in the order of their registration,
whether item is already registered or will be registered in the future.
"""
pairmatcher = postinit_pairmatchers[ typename]
pairmatcher.another_dim1( object)
def register_postinit_item( typename, item):
"""
Cause every object registered with register_postinit_object
under the same typename (whether registered already or in the future,
and in their order of registration)
to receive the method call object.postinit_item(item) for this item.
"""
pairmatcher = postinit_pairmatchers[ typename]
pairmatcher.another_dim2( item)
# ==
_keep_these_forever = {}
def keep_forever(thing):
"""
a place to put stuff if you need to make sure it's never deallocated by Python
"""
_keep_these_forever[id(thing)] = thing
# ==
_op_id = 0
debug_begin_ops = False #bruce 051018 changed from debug_flags.atom_debug
class op_run:
"""
Track one run of one operation or suboperation, as reported to
env.begin_op and env.end_op in nested pairs
"""
def __init__(self, op_type = None, in_event_loop = False, typeflag = ''): #bruce 060321 added typeflag
#bruce 060127 adding in_event_loop for Undo
"""
[this gets all the args passed to env.begin_op()]
"""
self.op_type = op_type # this might be almost anything, mainly meant for humans to see
self.in_event_loop = in_event_loop
self.typeflag = typeflag # this is one of a small set of constants which control how this is treated by undo (at least)
global _op_id
_op_id += 1
self.op_id = _op_id
def begin(self):
if debug_begin_ops:
self.printmsg( "%sbegin op_id %r, op_type %r" % (self.indent(), self.op_id, self.op_type) )
pass # not sure it's good that begin_end_matcher requires us to define this
def end(self):
if debug_begin_ops:
self.printmsg( "%send op_id %r, op_type %r" % (self.indent(), self.op_id, self.op_type) )
pass
def error(self, errmsg_text):
"""
called for begin_op with no matching end_op, just before our .end() and the next outer end_op is called
"""
if debug_begin_ops: #
self.printmsg( "%serror op_id %r, op_type %r, errmsg %r" % (self.indent(), self.op_id, self.op_type, errmsg_text) )
pass
def printmsg(self, text):
if debug_begin_ops:
# print "atom_debug: fyi: %s" % text
env.history.message( "debug: " + text ) # might be recursive call of history.message; ok in theory but untested ###@@@
def indent(self):
"""
return an indent string based on the stack length; we assume the stack does not include this object
"""
#e (If the stack did include this object, we should subtract 1 from its length. But for now, it never does.)
return "| " * len(op_tracker.stack)
pass
_in_event_loop = True #bruce 060127; also keep a copy of this in env; probably that will become the only copy #e
env._in_event_loop = _in_event_loop
def _op_tracker_stack_changed( tracker, infodict ): #bruce 050908 for Undo
"""
[private]
called when op_tracker's begin/end stack is created, and after every time it changes
"""
#e we might modify some sort of env.prefs object, or env.history (to filter out history messages)...
#e and we might figure out when outer-level ops happen, as part of undo system
#e and we might install something to receive reports about possible missing begin_op or end_op calls
#
#bruce 060127 new code:
new_in_event_loop = True # when nothing is on this op_run stack, we're in Qt's event loop
if tracker.stack:
new_in_event_loop = tracker.stack[-1].in_event_loop
global _in_event_loop, _last_typeflag
changed = False # will be set to whether in_event_loop changed
if _in_event_loop != new_in_event_loop:
# time for a checkpoint
## if _in_event_loop:
## print "begin command segment"
## else:
## print "end command segment"
changed = True
beginflag = _in_event_loop
_in_event_loop = new_in_event_loop
env._in_event_loop = _in_event_loop
if changed:
for sub in env.command_segment_subscribers[:]: # this global list might be changed during this loop
unsub = False
try:
#e ideally we should prevent any calls into op_tracker here...
# infodict is info about the nature of the stack change, passed from the tracker [bruce 060321 for bug 1440 et al]
unsub = sub( beginflag, infodict, tracker ) # (we can't always pass tracker.stack[-1] -- it might not exist!)
except:
print_compact_traceback("bug in some element of env.command_segment_subscribers (see below for more): ")
#e discard it?? nah. (we'd do so by unsub = True)
""" note: during Quit, we got this, when we tried to update the menu items no longer present (enable a QAction);
this could be related to the crashes on Quit reported recently;
so we should try to get the assy to unsubscribe (clear and deinit) when we're about to quit. [bruce 060127]
bug in some element of env.command_segment_subscribers: exceptions.RuntimeError:
underlying C/C++ object has been deleted
[changes.py:607] [undo_manager.py:115] [undo_manager.py:154] [undo_manager.py:128] [undo_manager.py:238]
"""
print_compact_stack(" the call that led to that bug: ", skip_innermost_n = 1) # bruce 080917
if unsub:
try:
env.command_segment_subscribers.remove(sub)
except ValueError:
pass
pass
pass
return
op_tracker = begin_end_matcher( op_run, _op_tracker_stack_changed )
def env_begin_op(*args, **kws):
return op_tracker.begin(*args, **kws)
def env_end_op(mc):
return op_tracker.end(mc) #e more args?
env.begin_op = env_begin_op
env.end_op = env_end_op
global_mc = None
_in_op_recursing = False
def env_in_op(*args, **kws): # (disabled, separately, bruce 060127)
# [note 060320: it's still called directly just below, in env_begin_recursive_event_processing; does it do anything then??]
"""
This gets called by various code which might indicate that an operation is ongoing,
to detect ops in legacy code which don't yet call env.begin_op when they start.
The resulting "artificial op" will continue until the next GLPane repaint event
(other than one known to occur inside recursive event processing,
which should itself be wrapped by begin_op/end_op [with a special flag ###doc? or just in order to mask the artificial op?]),
which will end it.
This system is subject to bugs if recursive event processing is not wrapped,
and some op outside of that has begin but no matching end -- then a redraw during
the unwrapped recursive event processing might terminate this artificial op too early,
and subsequent ends will have no begin (since their begin got terminated early as if it had no end)
and (presently) print debug messages, or (in future, perhaps) result in improperly nested ops.
"""
global global_mc, _in_op_recursing
if not op_tracker.stack and not _in_op_recursing and not op_tracker.active:
_in_op_recursing = True # needed when env_begin_op (before pushing to stack) calls env.history.message calls this func
try:
assert global_mc is None
global_mc = env_begin_op(*args, **kws)
finally:
_in_op_recursing = False
return
def env_after_op(): # (disabled, separately, bruce 060127)
"""
This gets called at the start of GLPane repaint events
[#e or at other times not usually inside user-event handling methods],
which don't occur during an "op" unless there is recursive Qt event processing.
"""
global global_mc
if global_mc is not None and len(op_tracker.stack) == 1:
#e might want to check whether we're inside recursive event processing (if so, topmost op on stack should be it).
# for now, assume no nonmatch errors, and either we're inside it and nothing else was wrapped
# (impossible if the wrapper for it first calls env_in_op),
# or we're not, so stack must hold an artificial op.
mc = global_mc
global_mc = None #k even if following has an error??
env_end_op(mc)
return
# disable these, bruce 060127
##env.in_op = env_in_op
##env.after_op = env_after_op
def env_begin_recursive_event_processing():
"""
call this just before calling qApp.processEvents()
"""
env_in_op('(env_begin_recursive_event_processing)')
return env_begin_op('(recursive_event_processing)', in_event_loop = True, typeflag = 'beginrec') #bruce 060321 added typeflag
#bruce 060127 added in_event_loop = True
def env_end_recursive_event_processing(mc):
"""
call this just after calling qApp.processEvents()
"""
return env_end_op(mc) #bruce 060321: no typeflag needed (or allowed), gets it from matching begin_op
env.begin_recursive_event_processing = env_begin_recursive_event_processing
env.end_recursive_event_processing = env_end_recursive_event_processing
#e desired improvements:
# - begin_op could look at existing stack frames, see which ones are new, sometimes do artificial begin_ops on those,
# with the end_ops happening either from __del__ methods, or if that doesn't work (due to saved tracebacks),
# from every later stack-scan realizing those frames are missing and doing something about it
# (like finding the innermost still-valid stack-frame-op and then the next inner one and ending its stack-suffix now).
# (If we can run any code when tracebacks are created, that would help too. I don't know if we can.)
# This should give you valid begin/end on every python call which contained any begin_op call which did this
# (so it might be important for only some of them to do this, or for the results to often be discarded).
# It matters most for history message emission (to understand py stack context of that). [050909]
# end
| NanoCAD-master | cad/src/foundation/changes.py |
# Copyright 2005-2009 Nanorex, Inc. See LICENSE file for details.
"""
env.py - for global variables and functions treated as "part of the
environment".
@author: Bruce
@version: $Id$
@copyright: 2005-2009 Nanorex, Inc. See LICENSE file for details.
This module is for various global or "dynamic" variables,
which can be considered to be part of the environment of the code
that asks for them (thus the module name "env"). This is for variables
which are used by lots of code, but which would be inconvenient to pass
as arguments (since many routines would need to pass these through
without using them), and which some code might want to change dynamically
to provide a modified environment for some of the code it calls.
(Many of these variables will need to be thread-specific if we ever have
threads.)
Also, certain basic routines for using/allocating some of these global
variables.
Usage:
'import foundation.env as env' is preferred to 'from foundation import env'
since the former makes it clear that env is a module. The latter is never
used, but is here in this docstring so that a search for 'import env' will
find it.
... use env.xxx as needed ...
# Don't say "from env import xxx" since env.xxx might be reassigned
# dynamically. Variables that never change (and are importable when the
# program is starting up) can be put into constants.py.
Purpose and future plans:
Soon we should move some more variables here from platform, assy, and/or win.
We might also put some "dynamic variables" here, like the current Part --
this is not yet decided.
Generators used to allocate things also might belong here, whether or not we
have global dicts of allocated things. (E.g. the one for atom keys.)
One test of whether something might belong here is whether there will always
be at most one of them per process (or per active thread), even when we
support multiple open files, multiple main windows, multiple glpanes and model
trees, etc.
History:
bruce 050610 made this module (since we've needed it for awhile), under the
name "globals.py" (since "global" is a Python keyword).
bruce 050627 renamed this module to "env.py", since "globals" is a Python
builtin function.
bruce 050803 new features to help with graphics updates when preferences are
changed
bruce 050913 converted most or all remaining uses of win.history to
env.history, and officially deprecated win.history.
bruce 080220 split glselect_name_dict.py out of env.py so we can make it
per-assy.
"""
_mainWindow = None
# Initialize the 'prefs' value. It is redefined in preference.py
# see preferences.init_prefs_table for details.
# Initializing it here, should fix this error that pylint output shows in
# a number of files -- " 'Module 'env' has no 'prefs' member"
prefs = None
def setMainWindow(window):
"""
Set the value which will be returned by env.mainwindow(). Called
by MWsemantics on creation of the (currently one and only) main
window.
"""
global _mainWindow
assert _mainWindow is None, "can only setMainWindow once"
assert not window is None
_mainWindow = window
def mainwindow(): #bruce 051209
"""
Return the main window object (since there is exactly one, and it
contains some global variables). Fails if called before main
window is inited (and it and assy point to each other).
@note: mainWindow (note the capital 'W') is an alias to this function.
"""
# sanity check, and makes sure it's not too early for these things
# to have been set up
assert not _mainWindow is None, "setMainWindow not called yet"
assert _mainWindow.assy.w is _mainWindow
return _mainWindow
mainWindow = mainwindow # alias which should become the new name of
# that function [bruce 080605]
def debug(): #bruce 060222
"""
Should debug checks be run, and debug messages be printed, and debug
options offered in menus?
@note: This just returns the current value of debug_flags.atom_debug,
which is this code's conventional flag for "general debugging messages and
checks". Someday we might move that flag itself into env, but that's
harder since we'd have to edit lots of code that looks for it in platform,
or synchronize changes to two flags.
"""
from utilities import debug_flags # don't do this at toplevel in this
# module, in case we don't want it imported so early
# (review: can we move it to toplevel now?)
return debug_flags.atom_debug
# ==
try:
_things_seen_before
# don't reset this on reload
# (not important yet, since env.py doesn't support reload)
except:
_things_seen_before = {}
def seen_before(thing): #bruce 060317
"""
Return True if and only if thing has never been seen before (as an
argument passed to this function).
Useful for helping callers do things only once per session.
"""
res = _things_seen_before.get(thing, False)
_things_seen_before[thing] = True
return res
# ==
try:
_once_per_event_memo
except:
_once_per_event_memo = {}
def once_per_event(*args, **kws):
"""
Return True only once per user event (actually, per glpane redraw),
for the given exact combination of args and keyword args.
All arg values must be hashable as dict keys.
"""
#bruce 060720 ###@@@ should use this in debug's reload function
assert args or kws, "some args or kws are required, " \
"otherwise the result would be meaninglessly global"
if kws:
items = kws.items()
items.sort()
key1 = (args, tuple(items))
else:
# optim the usual case
# (it should be ok that this can, in theory, overlap the kws case,
# since callers ought to be each passing distinct strings anyway)
key1 = args
# this version (untested) would work, but might accumulate so much
# memo data as to be a memory leak:
## key2 = ("once_per_event", redraw_counter, key1)
## return not seen_before( key2)
# so use this version instead:
old = _once_per_event_memo.get(key1, -1)
if redraw_counter == old:
return False # fast case
else:
_once_per_event_memo[key1] = redraw_counter
return True
pass
# ==
# This module defines stub functions which are replaced with different
# implementations by the changes module when it's imported.
# So this module should not import the changes module, directly or indirectly.
# But in case it does, by accident or if in future it needs to, we'll define
# those stub functions as early as possible.
# (One motivation for this (not yet made use of as of 050908) is to enable
# stripped-down code to call these functions even if the functionality of the
# changes module is never needed. The immediate motivation is to allow them to
# be called arbitrarily early during init.)
def track(thing): #bruce 050804
"""
Default implementation -- will be replaced at runtime
as soon as changes.py module is imported (if it ever is)
"""
from utilities import debug_flags
if debug_flags.atom_debug:
print "atom_debug: fyi (from env module): " \
"something asked to be tracked, but nothing is tracking: ", thing
# if this happens and is not an error, then we'll zap the message.
return
#bruce 050908 stubs for Undo ####@@@@
def begin_op(*args):
"""
Default implementation -- will be replaced at runtime
as soon as changes.py module is imported (if it ever is)
"""
return "fake begin" #k needed?
def end_op(*args):
"""
Default implementation -- will be replaced at runtime
as soon as changes.py module is imported (if it ever is)
"""
pass
in_op = begin_op
after_op = end_op
begin_recursive_event_processing = begin_op
end_recursive_event_processing = end_op
command_segment_subscribers = [] #bruce 060127 for Undo
_in_event_loop = True #bruce 060127
# end of stubs to be replaced by changes module
def call_qApp_processEvents(*args): #bruce 050908
"""
No other code should directly call qApp.processEvents --
always call it via this function.
"""
from PyQt4.Qt import qApp #k ??
mc = begin_recursive_event_processing()
try:
res = qApp.processEvents(*args)
# Qt doc says: Processes pending events, for 3 seconds or until there
# are no more events to process, whichever is shorter.
# (Or it can take one arg, int maxtime (in milliseconds),
# to change the timing.)
finally:
end_recursive_event_processing(mc)
return res
# ==
class pre_init_fake_history_widget:
### TODO: refactor this to be a sibling class of HistoryWidget, sharing an
#API class; also make this cache the messages for later display.
too_early = 1
# too_early is defined so insiders can detect that it's too early
# (using hasattr on history) and not call us at all (though it'd be
# better for them to check something else, like win.initialised, and
# make sure messages sent to this object get saved up and printed into
# the widget once it exists) [bruce 050913 revised comment]
def message(self, msg, **options):
"""
This exists to handle messages sent to win.history [deprecated] or
env.history during win.__init__, before the history widget has been
created! Someday it might save them up and print them when that
becomes possible.
"""
## from utilities import debug_flags
## if debug_flags.atom_debug:
# bruce 071018 print this always, and clarify the text:
print "fyi: this history message was produced too early " \
"to show up in the History Widget:"
print msg
# REVIEW: use print_compact_stack instead, if atom_debug is set?
return
redmsg = orangemsg = greenmsg = message #bruce 080220
def deferred_summary_message(self, format, count = 1): #bruce 090119
assert 0
def statusbar_msg(self, msg_text, repaint = False): #bruce 090119
assert 0
pass
history = pre_init_fake_history_widget() # changed by MWsemantics.__init__
last_history_serno = 0 # maintained by HistoryWidget, used by Undo checkpoints
redraw_counter = 0
# ==
_change_checkpoint_counter = 0 #bruce 060123 for Undo and other uses
# discussion: almost any change-counter record can work (in part) by
# incrementing this if necessary to make it odd, then saving its value on
# changed things, if all observing-code for it increments it if necessary
# to make it even; this way it's easy to compare any change (odd saved
# value) with anything that serves as a state-checkpoint (even saved
# value), but we can still optimize saving this on all parents/containers
# of an object in low-level change-tracking code, by stopping the ascent
# from changed child to changed parent as soon as it would store the same
# value of this on the parent.
def change_counter_checkpoint():
"""
Call this to get a value to save in state-snapshots or the like,
for comparison (using >, not ==) with stored values.
"""
global _change_checkpoint_counter
if _change_checkpoint_counter & 1:
_change_checkpoint_counter += 1 # make it even, when observed
return _change_checkpoint_counter
def change_counter_for_changed_objects():
"""
Call this to get a value to store on changed objects and all their
containers; see comment for important optimization.
"""
global _change_checkpoint_counter
if _change_checkpoint_counter & 1 == 0:
_change_checkpoint_counter += 1 # make it odd, when recording a change
return _change_checkpoint_counter
# ==
# temporary compatibility functions
# [bruce 080220 split out the originals, and will remove these soon,
# replacing them with per-assy instances of glselect_name_dict;
# as an intermediate state, this one is shared by each assy,
# so code can be incrementally switched to access it through assy
# or glpane; when all code does that, these will be removed
# and each assy will make its own glselect_name_dict.]
# [bruce 080917: mostly completing this, but see comment in
# Assembly._init_glselect_name_dict for what remains.]
from graphics.drawing.glselect_name_dict import glselect_name_dict
_shared_glselect_name_dict = glselect_name_dict() # still used in class Assembly
##obj_with_glselect_name = _shared_glselect_name_dict.obj_with_glselect_name
## # client code should be revised to use assy.object_for_glselect_name
## # in place of env.obj_with_glselect_name.get
##
##alloc_my_glselect_name = _shared_glselect_name_dict.alloc_my_glselect_name
##
##dealloc_my_glselect_name = _shared_glselect_name_dict.dealloc_my_glselect_name
# ==
# support for post_event_updater functions of various kinds
# Note: we separate the kinds because we need to do them in a certain order
# (model updaters before UI updaters), and because future refactoring
# is likely to move responsibility for maintaining the list of updaters,
# and for calling them, to different modules or objects, based on their kind.
_post_event_model_updaters = []
def register_post_event_model_updater(function):
"""
Add a function to the list of model updaters called whenever
do_post_event_updates is called.
The function should take a single boolean argument,
warn_if_needed. If the function is called with warn_if_needed
True, and the function determines that it needs to take any
action, the function may issue a warning. This helps catch code which
failed to call do_post_event_updates when it needed to.
The function's return value is ignored.
WARNING: the functions are called in the order added; when order matters,
the application initialization code needs to make sure they're added
in the right order.
USAGE NOTE: there is intentionally no way to remove a function from this
list. Application layers should add single functions to this list in the
right order at startup time, and those should maintain their own lists
of registrants if dynamic add/remove is needed within those layers.
See also: register_post_event_ui_updater.
"""
assert not function in _post_event_model_updaters
# Rationale: since order matters, permitting transparent multiple inits
# would be inviting bugs. If we ever need to support reload for
# developers, we should let each added function handle that internally,
# or provide a way of clearing the list or replacing a function
# in-place.
# (Note: it's possible in theory that one update function would need
# to be called in two places within the list. If that ever happens,
# remove this assert, or work around it by using a wrapper function.)
_post_event_model_updaters.append( function)
return
_post_event_ui_updaters = []
def register_post_event_ui_updater(function):
"""
Add a function to the list of ui updaters called whenever
do_post_event_updates is called. All ui updaters are called
(in the order of registration) after all model updaters.
The function should take no arguments. Its return value is ignored.
WARNING & USAGE NOTE: same as for register_post_event_model_updater,
though ui updaters are much less likely than model updaters
to have order dependencies within themselves.
"""
assert not function in _post_event_ui_updaters
_post_event_ui_updaters.append( function)
return
def do_post_event_updates( warn_if_needed = False ):
"""
[public function]
This should be called at the end of every user event which changes model
or selection state.
WARNING: In present code (070925), it is very likely not called that
often, but this is mitigated by the precautionary calls mentioned below.
This can also be called at the beginning of user events, such as redraws
or saves, which want to protect themselves from event-processors which
should have called this at the end, but forgot to.
Those callers should pass warn_if_needed = True, to permit a debug-only
warning to be emitted if the call was necessary (but there is no guarantee
that such a warning is always emitted).
(The updaters registered to be called by this function should be designed
to be fast when called more times than necessary.)
@see: _master_model_updater
"""
# Note: exceptions in one of these updaters can prevent redrawing for the
# rest of the session, so better protection is needed, but it doesn't
# fully work if added right here (at least for an AttributeError in the
# dna sequence editor in Edit Dna Strand Properties). It would be good to
# add it at a higher level at some point.
#
# Details: catching exceptions here and not propogating them upwards may
# make some bugs worse by turning them into infinite recursions (for
# reasons not yet analyzed). Or it may be that those bugs were *already*
# infinite recursions, since at least one such case is known (though it's
# not testable in current code, since Ninad fixed it thismorning).
# To reproduce that bug, this might work (untested):
# - remove def setComplementSequence from DnaSequenceEditor
# (what was tested was having a ProteinSequenceEditor erroneously residing
# in the private win attr meant for the DnaSequenceEditor)
# - edit a dna strand using the PM button of that name in Build DNA.
#
# [bruce 080725 comment]
# note: importing from utilities.debug here adds an import cycle.
# This is not needed now (see above comment), but if ever needed,
# it could be fixed by moving this and nearby functions into a new module.
# [bruce 080725 comment]
# do all model updaters before any ui updaters
for function in _post_event_model_updaters:
(function)(warn_if_needed)
for function in _post_event_ui_updaters:
(function)()
# make sure any change_counter values, saved by those updaters,
# will not be seen again if subsequent real changes occur
# [bruce 080805; should make model_change_indicator more useful
# by making it change for every drag event during a drag
# and not depend on undo checkpointing, but needs testing for
# unanticipated bugs or performance impact]
change_counter_checkpoint()
return
# ==
def node_departing_assy(node, assy): #bruce 060315 for Undo
"""
If assy is an assembly, warn it that node (with all its child atoms)
is leaving it.
"""
try:
um = assy.undo_manager
except AttributeError:
# for assy is None or a certain string constant
assert assy is None or \
type(assy) == type("") and "assembly" in assy
# todo: assert could be more specific (or, refactor)
return
if um is not None:
um.node_departing_assy(node, assy)
return
# end
| NanoCAD-master | cad/src/foundation/env.py |
# Copyright 2006-2008 Nanorex, Inc. See LICENSE file for details.
"""
state_constants.py -- definitions needed by state_utils and its client code,
including constants for declaring attributes' roles in
holding state or referring to objects that hold state. (Also some
names for the available kinds of specialcase treatment re Undo.)
@author: Bruce
@version: $Id$
@copyright: 2006-2008 Nanorex, Inc. See LICENSE file for details.
See state_utils.py for more info and related classes/utilities.
(It's undecided whether these declarations make sense
in objects that don't inherit from one of the mixins defined in
state_utils.py.)
These declarations are used by Undo to know what attributes' changes
should be recorded and later undone, and to find objects which need to
be watched for changes.
Someday they might also be used for automatically knowing
how to save objects to files, how to copy or diff or delete them,
how to browse or edit their state from a UI, etc.
Review:
Should IdentityCopyMixin and similar small classes be moved
into this file?
"""
# NOTE: This module should not import anything non-builtin,
# or define any name unsuitable for being a global in all modules.
# ==
# Possible values for _s_attr_xxx attribute declarations (needed by Undo)
S_DATA = 'S_DATA' # for attributes whose value changes should be saved or undone.
S_CHILD = 'S_CHILD' # like S_DATA, but for attributes whose value is None or a "child object" which might also contain undoable state.
S_CHILDREN = 'S_CHILDREN' # like S_CHILD, but value might be a list or dict (etc) containing one or more child objects.
S_CHILDREN_NOT_DATA = 'S_CHILDREN_NOT_DATA' # scan for children, but not for state or diffs [bruce 060313, experimental but used]
# ref and parent options are not yet needed, and will be treated the same as S_DATA,
# which itself will be treated more like S_REFS anyway if it hits any objects.
# We'll still define them so we can see if you want to declare any attrs using them, mainly S_PARENT.
S_REF = 'S_REF' # like S_DATA, but for attributes whose value is None or a "referenced object",
# which might or might not be encountered in a scan of undoable objects going only into children.
# (It's not yet clear exactly how this differs from S_DATA, or whether it matters if ref'd objects are encountered
# in a scan into children. Are these "siblings or cousins" (like a jig's atoms) or "foreign objects" (like some QDialog)
# or "other state-holders" (like GLPane or MainWindow) or "constants" (like Elements and Atomtypes)?)
S_REFS = 'S_REFS' # like S_REF, but value might be a list or dict (etc) containing one or more referenced objects.
S_PARENT = 'S_PARENT' # like S_DATA, but for attributes whose value is None or a "parent object"
# (one which should be encountered in a scan of undoable objects going only into children,
# and of which this object is a child or grandchild etc).
S_PARENTS = 'S_PARENTS' # like S_PARENT, but value might be a list or dict (etc) containing one or more parent objects.
S_CACHE = 'S_CACHE' # for attributes which should be deleted (or otherwise invalidated) when other attributes' changes are undone.
S_IGNORE = 'S_IGNORE' # state system should pretend this attr doesn't exist (i.e. never look at it or change it or delete it).
# (This is equivalent to providing no state declaration for the attr, unless we add a future "default decl" for all attrs
# not declared individually, in which case this will let you exclude an attr from that.
# It's also useful for subclasses wanting to override state decls inherited from a superclass.)
# ==
# Kinds of specialcases that Undo can handle; for use as the value
# of a class constant for _s_undo_specialcase: [bruce 071114]
UNDO_SPECIALCASE_ATOM = 'UNDO_SPECIALCASE_ATOM'
UNDO_SPECIALCASE_BOND = 'UNDO_SPECIALCASE_BOND'
##UNDO_SPECIALCASE_ATOM_OWNER = 'UNDO_SPECIALCASE_ATOM_OWNER' # not sure this is right, vs CHUNK -- also it may never be needed
ATOM_CHUNK_ATTRIBUTE_NAME = 'molecule' # must match the Atom.molecule attrname
# ==
# Note: _UNSET_class should inherit from IdentityCopyMixin, but that would
# only work when IdentityCopyMixin has been split out from state_utils,
# since state_utils imports this file. Instead, we copy the methods here.
class _UNSET_class:
"""
[private class for _UNSET_, which sometimes represents
unset attribute values within Undo snapshots, and similar things]
"""
# review: can we add a decl that makes the _s_attr system notice
# the bug if it ever hits this value in a real attrval? (should we?)
def __init__(self, name = "_???_"):
self.name = name
def __repr__(self):
return self.name
def _copyOfObject(self): # copied from IdentityCopyMixin
return self
def _isIdentityCopyMixin(self): # copied from IdentityCopyMixin
pass
pass
# ensure only one instance of _UNSET_ itself, even if we reload this module
try:
_UNSET_
except:
_UNSET_ = _UNSET_class("_UNSET_")
try:
_Bugval
except:
_Bugval = _UNSET_class("_Bugval")
# end
| NanoCAD-master | cad/src/foundation/state_constants.py |
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details.
"""
wiki_help.py -- associate webpages (typically in a wiki) with program features,
and provide access to them. Pages typically contain feature-specific help info,
FAQ, forum, etc.
@author: Will, Bruce
@version: $Id$
@copyright: 2005-2008 Nanorex, Inc. See LICENSE file for details.
Code cleanup needed:
There are two parallel systems of code which do similar things
(e.g. prepending "Feature:" and wiki_prefix()) for different kinds
of UI access; see duplicated code warnings below.
Module classification: [bruce 080101]
Mostly ui code, some io code; could be a subsystem of a general help system,
if we have one outside of ne1_ui.
Definitely doesn't belong in ne1_ui:
* not specific to NE1's individual UI
* imports nothing from ne1_ui
* imported by various things which ought to be lower than ne1_ui.
So, if we have a help module outside ne1_ui, put it there;
if we don't, it probably belongs in something like foundation.
Terminology note:
We use "web help" rather than "wiki help" in menu command text, since more users
will know what it means, and since nothing in principle forces the web pages
accessed this way to be wiki pages.
But we use "wiki help" in history messages, since we want people to think of
the wiki (rather than a rarely-changing web page) as an integral part of the
idea.
History:
idea from Eric D
will 051010 added wiki help feature to Mode classes
bruce 051130 revised it
bruce 051201 made new source file for it, extended it to other kinds of objects
(so far, some Node subclasses)
"""
from PyQt4 import QtGui
from PyQt4.Qt import QDialog
from PyQt4.Qt import QTextBrowser
from widgets.NE1_QToolBar import NE1_QToolBar
from PyQt4.Qt import QWhatsThisClickedEvent
from PyQt4.Qt import QGridLayout
from PyQt4.Qt import QPushButton
from PyQt4.Qt import QSizePolicy
from PyQt4.Qt import QSpacerItem
from PyQt4.Qt import QSize
from PyQt4.Qt import QApplication
from PyQt4.Qt import SIGNAL
from PyQt4.Qt import SLOT
import os
import foundation.env as env
import webbrowser
from utilities.debug import print_compact_traceback
from utilities.Log import redmsg
##from qt4transition import qt4todo
from utilities.prefs_constants import wiki_help_prefix_prefs_key
def webbrowser_open(url):
"""
"""
if len(webbrowser._tryorder) == 0:
# Sometimes webbrowser.py does not find a web browser. Also, its list
# of web browsers is somewhat antiquated. Give it some help.
def register(pathname, key):
webbrowser._tryorder += [ key ]
webbrowser.register(key, None,
webbrowser.GenericBrowser("%s '%%s'" % pathname))
# Candidates are in order of decreasing desirability. Browser
# names for different platforms can be mixed in this list. Where a
# browser is not normally found on the system path (like IE on
# Windows), give its full pathname. There is a working default for
# Windows and Mac, apparently the only problem is when Linux has
# neither "mozilla" nor "netscape".
for candidate in [
'firefox',
'opera',
'netscape',
'konqueror',
# 'c:/Program Files/Internet Explorer/iexplore.exe'
]:
if os.path.exists(candidate):
# handle candidates with full pathnames
register(candidate, candidate)
continue
for dir in os.environ['PATH'].split(':'):
pathname = os.path.join(dir, candidate)
if os.path.exists(pathname):
register(pathname, candidate)
continue
if False:
# testing purposes only - simulate not finding a browser
webbrowser._tryorder = [ ]
# We should now have at least one browser available
if len(webbrowser._tryorder) == 0:
env.history.message(redmsg("Wiki Help cannot find a web browser"))
webbrowser.open(url)
def open_wiki_help_dialog( featurename, actually_open = True ):
#e actually_open = False is presently disabled in the implem
"""
Show a dialog containing a link which can
open the wiki help page corresponding to the named nE-1 feature, in ways influenced by user preferences.
Assume the featurename might contain blanks, but contains no other characters needing URL-encoding.
[In the future, we might also accept options about the context or specific instance of the feature,
which might turn into URL-anchors or the like.]
If actually_open = False [not yet implemented, probably won't ever be],
don't open a web browser, but instead
print a history message so the user can open it manually.
Intended use is for a user preference setting to pass this, either always
or when the feature is invoked in certain ways.
"""
url = wiki_help_url( featurename)
if url:
#bruce 051215 experimental: always use the dialog with a link.
# When this works, figure out how prefs should influence what to do, how to clean up the code, etc.
# Other known issues:
# - UI to access this is unfinished
# (F1 key, revise "web help" to "context help" in menu commands, access from Help menu)
# - text is a stub;
# - maybe need checkbox "retain dialog" so it stays open after the click
# - doesn't put new dialog fully in front -- at least, closing mmkit brings main window in front of dialog
# - dialog might be nonmodal, but if we keep that, we'll need to autoupdate its contents i suppose
html = """Click one of the following links to launch your web browser
to a NanoEngineer-1 wiki page containing help on the appropriate topic:<br>
- The current command/mode: %s<br>
- %s
</p>""" % (HTML_link(url, featurename), \
HTML_link(wiki_prefix() + "Main_Page", "The NanoEngineer-1 Wiki main page"))
#e in real life it'll be various aspects of your current context
def clicked_func(url):
worked = open_wiki_help_URL(url)
## close_dialog = worked # not good to not close it on error, unless text in dialog is preserved or replaced with error msg
close_dialog = True
return close_dialog
parent = env.mainwindow() # WikiHelpBrowser now in a Dialog, so this works. Fixes bug 1235. mark060322
w = WikiHelpBrowser(html, parent, clicked_func = clicked_func, caption = "Web Help")
w.show()
return
## if not actually_open: ## not yet used (and untested) as of 051201
## env.history.message("Help for %r is available at: %s" % (featurename, url))
return
def open_wiki_help_URL(url, whosdoingthis = "Wiki help"): #bruce 051229 split this out of open_wiki_help_dialog
"""
Try to open the given url in the user's browser (unless they've set preferences to prevent this (NIM)),
first emitting a history message containing the url
(which is described as coming from whosdoingthis, which should be a capitalized string).
Return True if there's no evidence of an error; print error message to history and return False if it definitely failed.
"""
url = str(url) # precaution in case of QString
###e should check prefs to see if we should really open browser; if not, print different hist message
env.history.message("%s: opening " % whosdoingthis + url) # see module docstring re "wiki help" vs. "web help"
# print this in case user wants to open it manually or debug the url prefix preference
try:
webbrowser_open( url)
worked = True
except:
#bruce 051201 catch exception to mitigate bug 1167
# (e.g. when Linux user doesn't have BROWSER env var set).
# Probably need to make this more intelligent, perhaps by
# catching the specific exception in the bug report, knowing
# the OS, passing options to webbrowser.open, etc.
print_compact_traceback("webbrowser exception: ")
env.history.message( redmsg("Problem opening web browser.") +
"Suggest opening above URL by hand. "\
"On some platforms, setting BROWSER environment variable might help."
)
worked = False
return worked
def wiki_prefix():
"""
Return the prefix to which wiki page titles should be appended,
to form their urls. By default, these reference the Nanorex
public wiki, but the prefix can be overridden by a user preference.
"""
# note: public, but only used in this file;
# but has three callers, all of which do related things
# for different kinds of UI access. [bruce 081209 comment]
prefix = env.prefs[wiki_help_prefix_prefs_key]
return prefix
def wiki_help_url( featurename):
"""
Return a URL at which the wiki help page for the named feature (e.g. "Rotary Motor" or "Build Mode")
might be found (or should be created), or '' if this is not a valid featurename for this purpose [NIM - validity not yet checked].
Assume the featurename might contain blanks, but contains no other characters needing URL-encoding.
[Note: in future, user prefs might include a series of wiki prefixes to try,
so this API might need revision to return a series of URLs to try.]
"""
# WARNING:
# If this function's behavior is ever changed, lots of wiki pages might need to be renamed,
# with both old and new names supported as long as the old code is in use.
# (The same would be true for wiki pages about specific features whose featurenames are changed.)
prefix = wiki_prefix()
title = "Feature:" + featurename.replace(' ', '_') # e.g. Feature:Build_Mode
# note: partly duplicates code in turn_featurenames_into_links in whatsthis_utilities.py
return prefix + title # assume no URL-encoding needed in title, since featurenames so far are just letters and spaces
# ==
def featurename_for_object(object):
"""
Return the standard "feature name" for the type of this object
(usually for its class), or "" if none can be found.
"""
# Note: this is presently [051201, still true 080101] only used for
# wiki help (and only in this module), but it might someday be used
# for other things requiring permanent feature names, like class-specific
# preference settings. So it might belong in a different module.
# [bruce 080101 comment]
try:
method = object.get_featurename
except AttributeError:
return ""
return method()
def wiki_help_menutext( featurename):
"""
Return the conventional menu text for offering wiki help for the feature with the given name.
"""
return "Web help: " + featurename # see module docstring re "wiki help" vs. "web help"
def wiki_help_lambda( featurename):
"""
Return a callable for use as a menuspec command, which provides wiki help for featurename.
"""
def res(arg1 = None, arg2 = None, featurename = featurename):
#k what args come in, if any? args of res might not be needed (though they would be if it was a lambda...)
open_wiki_help_dialog( featurename)
return res
def wiki_help_menuspec_for_object(object):
fn = featurename_for_object(object)
if fn:
return wiki_help_menuspec_for_featurename( fn)
return []
def wiki_help_menuspec_for_featurename( featurename):
menutext = wiki_help_menutext( featurename)
command = wiki_help_lambda( featurename)
return [( menutext, command )]
# ==
class QToolBar_WikiHelp(NE1_QToolBar):
"""
A subclass of NE1_QToolBar to be used when whatsthis text
in the toolbar might contain "wiki help" links.
It overrides the event method to interpret a QWhatsThisClickedEvent
by prepending the event's href with wiki_prefix() and opening
that url in a webbrowser.
"""
# Any widget can be extended this way. Wherever we need to have wiki help
# active (presumably in a container with buttons or some such) we should
# feel free to extend other container widgets as needed.
def event(self, evt):
if isinstance(evt, QWhatsThisClickedEvent):
url = wiki_prefix() + evt.href()
# note: I'm guessing that in older code, this was done
# by the class MyWhatsThis (still mentioned in some old
# comments) [bruce 081209 comment]
webbrowser_open(str(url)) # Must be string. mark 2007-05-10
# REVIEW: should we call its caller open_wiki_help_URL instead?
# This would add a history message and some exception catching.
# Guess: yes, perhaps with a flag to turn off history
# except when there are errors. [bruce 081209 question]
return True
else:
return NE1_QToolBar.event(self, evt)
class WikiHelpBrowser(QDialog):
"""
The WikiHelpBrowser Dialog.
"""
def __init__(self, text, parent = None, clicked_func = None, caption = "(caption)", size = None):
QDialog.__init__(self,parent)
self.setWindowTitle(caption)
self.setWindowIcon(QtGui.QIcon("ui/border/MainWindow"))
self.setObjectName("WikiHelpBrowser")
TextBrowserLayout = QGridLayout(self)
TextBrowserLayout.setSpacing(5)
TextBrowserLayout.setMargin(2)
self.text_browser = QTextBrowser(self)
self.text_browser.setOpenExternalLinks(True)
self.text_browser.setObjectName("text_browser")
TextBrowserLayout.addWidget(self.text_browser, 0, 0, 1, 0)
self.text_browser.setMinimumSize(400, 200)
# make it pale yellow like a post-it note
self.text_browser.setHtml("<qt bgcolor=\"#FFFF80\">" + text)
self.close_button = QPushButton(self)
self.close_button.setObjectName("close_button")
self.close_button.setText("Close")
TextBrowserLayout.addWidget(self.close_button, 1, 1)
spacer = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
TextBrowserLayout.addItem(spacer, 1, 0)
self.resize(QSize(300, 300).expandedTo(self.minimumSizeHint()))
if size == 1:
self.text_browser.setMinimumSize(200, 400)
self.resize(QSize(300, 550).expandedTo(self.minimumSizeHint()))
if size == 2:
self.resize(QSize(650, 250).expandedTo(self.minimumSizeHint()))
self.connect(self.close_button, SIGNAL("clicked()"), self.close)
return
pass
def HTML_link(url, text):
#e might need to do some encoding in url, don't know;
# certainly needs to in text, in principle
return "<a href=\"" + url + "\">" + text + "</a>"
# == test code
def __wiki_url_for_topic(topic, wikiprefix = None):
wikiprefix = wikiprefix or \
"http://www.nanoengineer-1.net/mediawiki/index.php?title="
# note: this is a hardcoded version of the default value
# of wiki_prefix(); maybe it should be a named constant
topic1 = topic[:1].upper() + topic[1:]
topic1 = topic1.replace(" ", "_") # assume no additional url-encoding is needed
url = wikiprefix + topic1
return url
def __wikiPageHtmlLink(topic, text = None, wikiprefix = None):
url = __wiki_url_for_topic(topic, wikiprefix = wikiprefix)
if text is None:
text = topic
return HTML_link(url, text)
def __testWikiHelpBrowser():
import sys
app = QApplication(sys.argv)
w = WikiHelpBrowser("Here is a wiki page about " +
__wikiPageHtmlLink("QWhatsThis and web links") +
" to click.")
w.show()
app.connect(app, SIGNAL("lastWindowClosed()"),
app, SLOT("quit()"))
app.exec_()
if __name__ == "__main__":
__testWikiHelpBrowser()
# end
| NanoCAD-master | cad/src/foundation/wiki_help.py |
# Copyright 2004-2009 Nanorex, Inc. See LICENSE file for details.
"""
Utility.py -- class Node (superclass for all model-tree objects),
Group [now defined in Group.py, no longer imported here],
and a few related classes or functions, defining a uniform
API to permit all Node subclasses to be shown in the model tree,
and methods for manipulation of Node trees. (Most Node subclasses
are defined in other files. Notable ones are molecule and Jig.)
(Note: all non-leaf nodes in a node tree must be instances of Group.)
See also: class Node_api (which is only the part
of the API needed by the ModelTree).
@author: Josh, Bruce
@version: $Id$
@copyright: 2004-2009 Nanorex, Inc. See LICENSE file for details.
History:
Originally by Josh; gradually has been greatly extended by Bruce,
but the basic structure of Nodes and Groups has not been changed.
Bruce 071110 split Group.py out of Utility.py. (And may soon
split out Node and/or LeafNode as well.)
Bruce 080305 added some abstract classes between Node and Group
in the inheritance hierarchy (defined in this module for now).
"""
from utilities.debug import print_compact_stack
from utilities import debug_flags
import foundation.env as env
from utilities.constants import genKey
from foundation.state_utils import copy_val, StateMixin
from utilities.Log import redmsg, orangemsg
from foundation.state_constants import S_PARENT, S_DATA, S_CHILD
from utilities.icon_utilities import imagename_to_pixmap
from foundation.Assembly_API import Assembly_API
from widgets.simple_dialogs import grab_text_line_using_dialog
_DEBUG_UNDOABLE_ATTRS = False
# ==
# Unique id for all Nodes -- might generalize to other objects too.
# Unlike python builtin id(node), this one will never be reused
# when an old node dies.
nodekey = genKey(start = 1)
# note: atoms are not nodes, so possible overlap of
# atom.key and node._id should be ok for now.
def node_id(node):
"""
session-unique id for a Node (never reused)
(legal to call for None, then returns None)
"""
if node is None:
return None
assert isinstance(node, Node) #e might relax this later
try:
node._id # make sure it exists already
except AttributeError:
node._id = nodekey.next()
return node._id
def node_name(node):
"""
use this in error or debug messages for safety, rather than node.name
"""
if node is None:
return "<None>"
try:
return node.name
except AttributeError:
return "<node has no .name>"
pass
_will_kill_count = 1
# Note: this must start > 0, even though it's incremented when next used
# [bruce 060327]
# ==
class Node( StateMixin):
"""
Superclass for model components which can be displayed in the Model Tree.
This is inherited by Groups, molecules (Chunks), Jigs, and some more
specialized subclasses. The method implementations in class Node are
designed to be typical of "leaf Nodes" -- many of them are overridden
by Group, and some of them by other Node subclasses.
"""
# default values of per-subclass constants
# (see also attribute declarations, below)
featurename = "" # wiki help featurename for Node subclass [bruce 051201]
const_pixmap = None
#bruce 090119 API revision (presence of default value of const_pixmap,
# use of boolean test on it)
# default values of instance variables
name = "" # for use before __init__ runs (used in __str__ of subclasses)
picked = False # whether it's selected
# (for highlighting in all views, and being affected by operations)
hidden = False # whether to make it temporarily invisible in the glpane
# (note: self.hidden is defined, but always False, for Groups;
# it might be set for any leaf node whether or not that node is ever
# actually shown in the glpane.)
open = False # defined on all Nodes, to make it easier to count open nodes
# in a tree (this will never become True except for Groups)
# (when more than one tree widget can show the same node, .open will
# need replacement with treewidget-specific state #e)
# Note: node.members is not defined unless node.is_group() is true,
# i.e. unless that node inherits from Group. Given that condition,
# it is always defined, whether or not node.openable() and/or node.open.
# (And those are both always defined on all nodes.)
# [bruce 080107 comment]
dad = None
part = None #bruce 050303
prior_part = None #bruce 050527
disabled_by_user_choice = False
# [bruce 050505 made this the default on all Nodes, though only Jigs
# use the attr so far; see also is_disabled]
is_movable = False #mark 060120
# attribute declarations (per-subclass constants used for copy and undo)
copyable_attrs = ('name', 'hidden', 'open', 'disabled_by_user_choice')
#bruce 050526
# (see also __declare_undoable_attrs [bruce 060223])
# subclasses need to extend this
# TODO: could someday use these to help make mmp writing and reading
# more uniform, if we also listed the ones handled specially
# (so we can handle only the others in the new uniform way)
_s_attr_dad = S_PARENT
_s_attr_picked = S_DATA
_s_categorize_picked = 'selection'
_s_attr_part = S_CHILD
# has to be child to be found
# (another way would be assy._s_scan_children);
# not S_CACHE since Parts store some defining state
#e need anything to reset prior_part to None? yes, do it in _undo_update.
_s_attr_assy = S_PARENT
# assy can't be left out, since on some or all killed nodes it's
# foolishly set to None, which is a change we need to undo when we
# revive them. TODO: stop doing that, have a killed flag instead.
def _undo_update(self): #bruce 060223
# no change to .part, since that's declared as S_CHILD
self.prior_part = None
del self.prior_part # save RAM
StateMixin._undo_update(self)
return
def __init__(self, assy, name, dad = None):
"""
Make a new node (Node or any subclass), in the given Assembly (assy)
(I think assy must always be supplied, but I'm not sure),
with the given name (or "" if the supplied name is None),
and with the optionally specified dad (a Group node or None),
adding it to that dad as a new member (unless it's None or not
specified, which is typical).
All args are supplied positionally, even the optional one.
Warning: arg order was revised by bruce 050216 to be more consistent
with subclasses, but Group's arg order was and still is inconsistent
with all other Node classes' arg order.
"""
#bruce 050205 added docstring; bruce 050216 revised it
#bruce 050216 fixed inconsistent arg order
# [re other leaf nodes -- Group is not yet fixed], made name required
self.name = name or "" # assumed to be a string by some code
# assy must be None or an Assembly or a certain string
if assy is not None and \
not isinstance(assy, Assembly_API) and \
assy != '<not an assembly>':
assert 0, "node.assy must be an Assembly"
# no need to mention the private possibilities in the error msg
# verify assy is not None (not sure if that's allowed in principle,
# but I think it never happens) [050223]
if assy is None:
#bruce 071026 always print this, not only when atom_debug
print_compact_stack("note: Node or Group constructed with assy = None: ")
self.assy = assy
if dad: # dad must be another Node (which must be a Group), or None
dad.addchild(self)
#bruce 050206 changed addmember to addchild, thus enforcing
# dad correctness
# warning [bruce 050316]: this might call inherit_part;
# subclasses must be ready for this by the time their inits call
# ours, e.g. a Group must have a members list by then.
assert self.dad is dad # addchild should have done this
if self.__declare_undoable_attrs is not None: #bruce 060223 (temporary kluge)
# it's None except the first time in each Node subclass;
# is there a faster test? (Guess: boolean test is slower.)
self.__declare_undoable_attrs()
return
def short_classname(self): #bruce 080319
"""
return self's classname, with package/module names removed
(e.g. "DnaStrand" rather than "dna.model.DnaStrand.DnaStrand")
"""
# could be more general, e.g. a helper function
# todo: use this in a lot more places that inline this
# (but in __repr__ a helper function would be safer than a method)
return self.__class__.__name__.split('.')[-1]
def __repr__(self): #bruce 060220, revised 080118, refactored 090107
"""
[subclasses can override this, and often do]
"""
classname = self.short_classname()
try:
name_msg = ", name = %r" % (self.name,)
except:
name_msg = " (exception in `self.name`)"
return "<%s at %#x%s>" % (classname, id(self), name_msg)
def set_assy(self, assy): #bruce 051227, Node method [used in PartLibrary_Command]
"""
Change self.assy from its current value to assy,
cleanly removing self from the prior self.assy if that is not assy.
"""
if self.assy is not assy:
oldassy = self.assy
if oldassy is not None:
assert oldassy != '<not an assembly>'
# simplest to just require this;
# nodes constructed that way shouldn't be moved
assert isinstance(oldassy, Assembly_API)
# some of the above conds might not be needed, or might be
# undesirable; others should be moved into following subr
self.remove_from_parents()
assert self.assy is None
# now ok to replace self.assy, which is None or (ignoring
# the above assert) '<not an assembly>'
# (safety of latter case unverified, but I think it will
# never happen, even without the assert that disallows it)
assert self.assy is None
self.assy = assy
assert self.part is None
assert self.dad is None
return
def get_featurename(self): #bruce 051201
"""
Return the wiki-help featurename for this object's class,
or '' if there isn't one.
"""
# TODO: add superclass-override checks and an "Undocumented Node"
# default value, like in Command.get_featurename, except permit
# specific classes to turn it off, at least for use in the MT cmenu,
# like they do now by leaving it as "". [bruce 071227 comment]
return self.__class__.featurename
# that's intended to be a per-subclass constant...
# so enforce that until we need to permit it to be otherwise
def _um_initargs(self): #bruce 051013 [in class Node]
# [as of 060209 this is probably well-defined and correct
# (for most subclasses), but not presently used]
# [update 071109: since then it may well have come into use]
"""
Return args and kws suitable for __init__.
[Overrides an undo-related superclass method;
see its docstring for details.]
"""
return (self.assy, self.name), {}
# self.dad (like most inter-object links) is best handled separately
def _um_existence_permitted(self):
#bruce 051005 [###@@@ as of 060209 it seems likely this should go away,
# but I'm not sure]
"""
[overrides UndoStateMixin method]
Return True iff it looks like we should be considered to exist
in self.assy's model of undoable state.
Returning False does not imply anything's wrong, or that we should
be or should have been killed/destroyed/deleted/etc --
just that changes in us should be invisible to Undo.
"""
return self.assy is not None and \
self.part is not None and \
self.dad is not None
###e and we're under root? does that method exist?
# (or should viewpoint objects count here?)
def __declare_undoable_attrs(self): #bruce 060223
"""
[private method for internal use by Node.__init__ only;
temporary kluge until individual _s_attr decls are added]
Scan the perhaps-someday-to-be-deprecated per-class list,
copyable_attrs, and add _s_attr decls for the attrs listed in them
to self.__class__ (Node or any of its subclasses).
Don't override any such decls already present, if possible
[not sure if you can tell which class added them #k].
Should be run only once per Node subclass, but needs an example object
(self) to run on. Contains its own kluge to help cause it to be run only
once.
"""
subclass = self.__class__
if _DEBUG_UNDOABLE_ATTRS:
print "debug: running __declare_undoable_attrs in", subclass
for attr in subclass.copyable_attrs:
name = "_s_attr_" + attr
if hasattr(subclass, name):
if _DEBUG_UNDOABLE_ATTRS:
print " debug: not overwriting manual decl of %r as %r" % \
(name, getattr(subclass, name))
else:
setattr( subclass, name, S_DATA)
# or S_REFS? If it needs to be S_CHILD, add an individual
# decl to override it.
# prevent further runs on same subclass (in cooperation with the sole
# calling code)
subclass.__declare_undoable_attrs = None
# important to do this in subclass, not in self or Node
return
def parent_node_of_class(self, clas):
"""
If self has a parent Node in the current part
(or a grandparent node, etc, but not self)
which is an instance of clas,
return the innermost such node; otherwise return None.
@rtype: a Group (an instance of clas), or None
@param clas: a class (only useful if it's Group or a subclass of Group)
@see: get_topmost_subnodes_of_class (method in Group and Part)
@note: for advice on avoiding import cycles when passing a class,
see docstring of Group.get_topmost_subnodes_of_class.
"""
#bruce 071206; revised 080808
part = self.part
node = self.dad
while node and node.part is part:
if isinstance( node, clas):
# assert not too high in internal MT
assy = self.assy
assert node.assy is assy
assert node is not assy.shelf
assert node is not assy.root
# but i think it's ok if node is assy.tree!
return node
node = node.dad
return None
def containing_groups(self, within_same_part = False):
"""
Return a list of the 0 or more group nodes which contain this node,
in innermost to outermost order, not including self.assy.root.
@param within_same_part: if true, only return groups in the same Part
as self (i.e. don't return assy.shelf).
"""
#bruce 080507, revised 080626
# review: would this be safe for a node in a thumbview?
res = []
group = self.dad
limit = self.assy.root
while group is not None and group is not limit:
if within_same_part and group is self.assy.shelf:
# review: use is_selection_group_container in that test?
break
res.append(group)
group = group.dad
return res
def containing_nodes(self): #bruce 080507
"""
Return a list of the 1 or more nodes which contain self
(including self in the result),
in innermost to outermost order, not including self.assy.root.
@warning: it's an error for self to *be* self.assy.root.
"""
assert self is not self.assy.root
return [self] + self.containing_groups()
def node_depth(self): #bruce 080116
"""
Return self's depth in its node tree
(defined as the number of groups it's inside,
directly or indirectly, *including* the assy.root group
which is not visible in the model tree, and including
all other special groups such as assy.shelf).
If self has no .dad (node tree parent),
which means it's either assy.root or is not in the
assy's node tree, its depth is 0. The only node in the
assy's node tree with a depth of 0 is assy.root.
Note that arbitrarily deep node trees can legally exist
outside of any assy (e.g. if some code creates a Group
but doesn't add it into assy yet).
"""
if self.dad:
return 1 + self.dad.node_depth()
return 0
def node_depth_under_parent(self, parent): #bruce 080116; untested, not yet used
"""
@param parent: optional parent node; if provided,
return -1 if self is not under or equal to parent,
and otherwise return self's depth
under parent, which is 0 if self is parent, 1 if self
is a direct child of parent, etc.
@type parent: Node (need not be a Group)
"""
if self is parent:
return 0
if not self.dad:
return -1
pdepth = self.dad.node_depth_under_parent(parent)
if pdepth == -1:
return pdepth
return pdepth + 1
def set_disabled_by_user_choice(self, val):
#bruce 050505 as part of fixing bug 593
self.disabled_by_user_choice = val
self.changed()
def changed(self):
"""
Call this whenever something in the node changes
which would affect what gets written to an mmp file
when the node gets written.
Try to call it exactly when needed, since calling it
when not needed leads to the user being told there are
unsaved changes, and asked to confirm discards of the model
upon loading a new one, even when there are no actual changes.
But if you're not sure, calling it when not needed is better
than not calling it when needed.
"""
#bruce 050505; not yet uniformly used (most code calls part.changed or
#assy.changed directly)
if self.part is not None:
self.part.changed()
#e someday we'll do self.changed which will do dad.changed....
elif self.assy is not None:
pass
# I'm not sure if it would be correct to call assy.changed
# in this case (when there's no .part set)
# [bruce 060227 comment]
return
def is_group(self): #bruce 050216; docstring revised 071024
"""
Is self a Group node (i.e. an instance of Group or a subclass)?
Usage note: assuming something is known to be a Node,
something.is_group() is preferable to isinstance(something, Group),
due to its flexibility in case of future semantics changes,
and to the fact that it doesn't require an import of Utility.
(Also, this method would work if Utility was reloaded, but isinstance
would not. This doesn't yet matter in practice since there are
probably other big obstacles to reloading Utility during debugging.)
However, isinstance(obj, Group_API) (NIM) might be even better,
since it works for any type of obj, and some of our code is
polymorphic enough for that to matter. So don't go converting
isinstance(something, Group) to something.is_group() whereever
possible, just yet.
WARNING: future changes may require us to disambiguate whether
this refers to an "internal Group" or to "something that acts in the
model tree as a Group". The former (lower-level) meaning is the intended
one, but some calls may need to be changed to a new method corresponding
to the other meaning, if these aspects of a Node diverge.
[overridden in Group]
"""
return False # for a leaf node
def isEmpty(self):
"""
Subclasses should override this method. (especially Group subclasses)
Default implementation returns False (non empty node)
@see: DnaGroup.isEmpty()
"""
return False
def readmmp_info_leaf_setitem( self, key, val, interp ):
"""
This is called when reading an mmp file, for each "info leaf" record
which occurs right after this node is read and no other node has been
read. (If this node is a group, we're called after it's closed, but
groups should ignore this record.)
Key is a list of words, val a string; the entire record format
is presently [050421] "info leaf <key> = <val>".
Interp is an object to help us translate references in <val>
into other objects read from the same mmp file or referred to by it.
See the calls of this method from files_mmp for the doc of interp
methods.
If key is recognized, set the attribute or property it refers to to
val; otherwise do nothing (or for subclasses of Node which handle
certain keys specially, call the same method in the superclass
for other keys).
(An unrecognized key, even if longer than any recognized key,
is not an error. Someday it would be ok to warn about an mmp file
containing unrecognized info records or keys, but not too verbosely
(at most once per file per type of info).)
"""
# logic bug: new mmp records for leaf nodes, skipped by old reading code,
# cause their info leaf records to erroneously get applied to the previous
# leaf node that the old code was able to read. [bruce 071109 comment]
if self.is_group():
if debug_flags.atom_debug:
print "atom_debug: mmp file error, ignored: " \
"a group got info leaf %r = ..." % (key,)
return
if key == ['hidden']:
# val should be "True" or "False" (unrecognized vals are treated as False)
val = (val == 'True')
self.hidden = val
elif key == ['disabled']: #bruce 050422
# val should be "True" or "False" (unrecognized vals are treated as False)
val = (val == 'True')
self.disabled_by_user_choice = val
elif key == ['forwarded']: #bruce 050422
# this happens just after we read this leaf node (self)
# from an mmp file, and means we should move it from where it was
# just placed (at the end of some Group still being read)
# to a previous location indicated by val, and available via interp.
interp.move_forwarded_node( self, val)
else:
if debug_flags.atom_debug:
msg = "atom_debug: fyi: info leaf (in Node) with " \
"unrecognized key %r (not an error)" % (key,)
print msg
return
def is_disabled(self):
"""
Should this node look disabled when shown in model tree
(but remain fully functional for selection)?
"""
#bruce 050421 experiment related to bug 451-9
#e what Jig method does belongs here... [050505 comment]
return False
def redmsg(self, msg):
#bruce 050203
# revised 050901 to work even after assy set to None in Node.kill
env.history.message( redmsg( msg ))
def is_top_of_selection_group(self):
"""
Whether this node is the top of a "selection group".
(Note: this can be true of leaf nodes as well as group nodes,
in spite of the name.)
We enforce a rule that limits the selection to being entirely within
one selection group at a time, since many operations on mixes
of nodes from different selection groups are unsafe.
[As of 050131, should be True of the PartGroup and any "clipboard item";
this implem is not complete, so it's overridden by PartGroup.]
"""
#bruce 050131 for Alpha [#e rename is_selection_group?]
# [#e rename concept "selectable set"?]
###@@@ [General discussion of implem of selection groups, 050201:
# in hindsight the implem should just store the selgroup
# in each node, and maintain this when dad changes, like for .assy.
# In fact, for Beta the selgroup probably *will* be the value of .assy,
# which will be different for each clipboard item; or it might be a
# separate field like .selgroup or .space or .part, depending on what
# we end up calling "the part or any clipboard item, ie anything you
# want to show in the glpane at once, and think of as being in one
# physical space, where collisions and bonds are possible".]
#
# Following implem is correct for most nodes --
# determine whether self is a "clipboard item".
# [It could even work if we someday introduce "Groups of clipboard
# items".]
# But it's wrong for PartGroup itself (thus is overridden by it).
return self.dad and self.dad.is_selection_group_container()
def is_higher_than_selection_group(self): #bruce 080222
"""
Whether this node is higher than any node which satisfies
node.is_top_of_selection_group(). True only of assy.shelf
and assy.root in the current arrangement of an Assembly.
"""
# This implem is not ideal -- it knows too much about assy.
# Probably it'd be better to ask self.assy if self has this property
# within it. [todo: refactor it like that]
return (self is self.assy.shelf or self is self.assy.root)
no_selgroup_is_ok = False
#bruce 050612 class constant, could be overridden in some subclasses
# [not presently needed, but tested]
def change_current_selgroup_to_include_self(self): #bruce 050131 for Alpha
"""
#doc
"""
# This might not be fast enough, so when there's time,
# replace it with one that optims by stopping when dad is picked.
foundselgroup, ours = self.find_selection_group_or_picked_dad()
if not foundselgroup:
# found a "picked dad"
assert ours.picked
return # no need to change (important optimization for recursive picking in groups)
if ours is None:
# this might happen for non-bugs since changed_dad calls it for
# picked nodes, but it makes sense to skeptically review any way
# that can happen, so the debug print is good even if it's not
# always a bug [bruce comment 050310]
if self.no_selgroup_is_ok:
return #bruce 050602
if debug_flags.atom_debug:
print "atom_debug: bug(?): change_current_selgroup_to_include_self " \
"on node with no selgroup; ignored"
return
# ours is this node's selgroup, and might or might not already be the
# current one in self.assy
prior = self.assy.current_selgroup_iff_valid()
# might be None but otherwise is always valid; no side effects [revised 050310]
if ours is not prior:
self.assy.set_current_selgroup( ours)
# this unpicks everything not in 'ours' and warns if it unpicked anything
return
def find_selection_group(self): #bruce 050131 for Alpha
#####@@@@@ needs update/review for being called on deleted nodes; pass assy?
"""
Return the selection group to which this node belongs, or None if none
(as of 050131 that should happen only for Clipboard or Root).
"""
node = self
while node is not None:
if node.is_top_of_selection_group():
break
node = node.dad # might be None
# always is None eventually, so loop always terminates by then
return node # might be None
def find_selection_group_or_picked_dad(self): #bruce 050131 for Alpha
"""
Return (True, selgroup) where selgroup (maybe None) would be returned
by find_selection_group, or (False, picked_dad) if you hit a "picked
dad of self" (implying that self's selection group, whatever it is, is
the current one, assuming no bugs in our new invariants). Prefer the
picked_dad retval since it's faster.
"""
node = self
while node is not None:
if node.is_top_of_selection_group():
break
node = node.dad # might be None
# always is None eventually, so loop always terminates by then
if node is not None:
# don't try this test for node is self, since it's not a "dad of self"
if node.picked:
return False, node
return True, node # might be None
def show_in_model_tree(self): #bruce 050127
"""
Should this node be shown in the model tree widget?
True for most nodes. Can be overridden by subclasses.
[Added so that Datum Plane nodes won't be shown. Initially,
it might not work much more generally than that.]
"""
###e needs renaming, sounds like "scroll to make visible" [050310]
#bruce 050417 warning: I think I never ended up honoring this. Not sure.
#bruce 050527: It's not honored now, anyway. ### REVIEW: keep or discard?
return True
def haspicked(self): #bruce 050126
"""
@return: whether node's subtree has any picked members.
@rtype: boolean
This is faster than counting them with nodespicked or "maxing" them
with hindmost, at least when anything is picked; just as slow when
nothing is (still requires a full scan). [#e should we memoize
hindmost data??]
[overridden in Group, but this docstring applies to both methods
together; should not be overridden elsewhere.]
"""
return self.picked
def permits_ungrouping(self): #bruce 050126 for Node; earlier for Group
"""
[Leaf nodes can never (yet) be ungrouped. See Group.permits_ungrouping
docstring for the general definition of this method.]
"""
return False
def MT_kids(self, display_prefs = {}):
"""
For doc, see Group.MT_kids()
[some subclasses should override this, especially Group]
"""
#bruce 050109; 080108 renamed from kids to MT_kids; revised semantics
# review: must this be [] rather than ()?
# Some calling code might add it to another list...
return []
def openable(self):
"""
Say whether tree widgets should permit the user to open/close their
view of this node (typically by displaying some sort of toggle icon
for that state). (Note, if this is True then this does not specify
whether the node view is initially open... #doc what does.)
[Some subclasses should override this; if they add nonmember MT_kids
but don't override this, those MT_kids will probably never be shown,
but that might be undefined and depend on the model tree widget --
it's better to follow the rule of never having MT_kids unless you are
openable.]
"""
# + If we decide this depends on the tree widget or on something about
# it, we'll have to pass in some args... don't do that unless/until we
# need to.
# + One reason we don't measure len(self.MT_kids()) to decide on the
# default value for this, is that some nodes might not want to compute
# self.MT_kids() until/unless it's needed, in case doing so is
# expensive. For example, Qt's dirview example (also in PyQt
# examples3) computes MT_kids only when a node (representing a
# filesystem directory) is actually opened.
return False
# REVIEW: API and method/attr names related to "rename" needs review,
# since the text shown by some nodes in a tree widget (in the future)
# might not be "their name". [bruce 050128]
def rename_enabled(self):
"""
Should tree widgets permit the user to rename this node?
(If so, they will call self.try_rename(newname) to actually request
renaming for a specific new name.)
[some subclasses should override this and/or try_rename]
"""
return True
def try_rename(self, name):
"""
Given a new name for self, store it or reject it.
Specifically, do one of these actions:
- transform it into an acceptable name, store that in the node,
do needed invals, and return (True, stored name);
- or, reject it, and return (False, reason it's not ok).
(The reason should be a string suitable for error messages.)
"""
# todo: some of TreeWidget.slot_itemRenamed should be moved into a new
# caller of this in Node, so other Qt widgets can also safely try to
# rename Nodes. [bruce 050527 comment]
# names containing ')' work now, so we permit them here [bruce 050618]
if not self.rename_enabled():
return (False, "renaming this node is not permitted")
#mark 051005 -- now name can be a python string or a QString
try:
n = str(name)
except:
return (False, "illegal string")
name = n.strip() # remove whitespace from both ends
if not name:
return (False, "blank name is not permitted")
# accept the new name.
## self._um_will_change_attr('name') #bruce 051005; this might need
## # to be called from a property-setter method for completeness
self.name = name
if self.assy:
self.assy.changed()
###e should inval any observers (i.e. model tree) --
# not yet needed, I think [bruce 050119]
return (True, name)
def rename_using_dialog(self):
"""
Rename this node using a popup dialog, whn, user chooses to do
so either from the MT or from the 3D workspace.
"""
#This method is moved (with some modifications) from modelTreeGui.py so as
#to facilitate renaming nodes from the 3D workspace as well.
#The method was originally written by Bruce -- Ninad 2008-11-17
# Don't allow renaming while animating (b/w views).
assy = self.assy
win = assy.win
glpane = assy.glpane
if glpane.is_animating:
return
# Note: see similar code in setModelData in an outtakes class.
# REVIEW: why is renaming the toplevel node not permitted?
# Because we'll lose the name when opening the file?
oldname = self.name
ok = self.rename_enabled()
# Various things below can set ok to False (if it's not already)
# and set text to the reason renaming is not ok (for use in error messages).
# Or they can put the new name in text, leave ok True, and do the renaming.
if not ok:
text = "Renaming this node is not permitted."
#e someday we might want to call try_rename on fake text
# to get a more specific error message... for now it doesn't have one.
else:
ok, text = grab_text_line_using_dialog(
title = "Rename",
label = "new name for node [%s]:" % oldname,
iconPath = "ui/actions/Edit/Rename.png",
default = oldname )
if ok:
ok, text = self.try_rename(text)
if ok:
msg = "Renamed node [%s] to [%s]" % (oldname, text) ##e need quote_html??
env.history.statusbar_msg(msg)
win.mt.mt_update() #e might be redundant with caller; if so, might be a speed hit
else:
msg = "Can't rename node [%s]: %s" % (oldname, text) # text is reason why not
env.history.statusbar_msg(msg)
return
def drag_move_ok(self): # renamed/split from drag_enabled; docstring revised 050201
"""
Say whether a drag_move which includes this node can be started (for
"drag and drop").
It's ok if only some drop-targets (nodes or inter-node gaps) can
accept this node; we'll ask the targets if they'll take a specific
drag_moved list of nodes (which includes this node).
A tree widget asked to drag_move some selected nodes might filter them
by drag_move_ok to get the ones to actually move, or it might refuse
the whole operation unless all are ok to move -- that's a UI decision,
not a node semantics decision.
[some subclasses should override this]
"""
return True
def drag_copy_ok(self): # renamed/split from drag_enabled; docstring revised 050201
#bruce 050527 comment: this API needs revision, since the decision for
# jigs depends on what other nodes are included.
# And we should revise it more, so we can construct a Copier object, let it "prep",
# and use it for not only filtering out some nodes (like this does)
# but getting the summary msg for the drag graphic, etc. #####@@@@@
"""
Say whether a drag_copy which includes this node can be started (for
"drag and drop"). Same comments as for drag_move_ok apply.
[some subclasses should override this]
"""
return True
def drop_on_should_autogroup(self, drag_type, nodes): #bruce 071025
"""
Say whether Model Tree DND drops onto this node (self),
of the given drag_type and list of nodes,
should automatically group the dropped nodes.
@note: this is called even if there is only one node,
so if you want to group only when there's more than one,
return len(nodes) > 1 rather than just True.
@param drag_type: 'move' or 'copy'
@param nodes: Python list of nodes being DND'd onto self.
[overridden in some subclasses]
"""
return False
def MT_DND_can_drop_inside(self): #bruce 080317
"""
Are ModelTree Drag and Drop operations permitted to drop nodes
inside self?
[overridden in Group and again in some of its subclasses]
"""
return False
def node_icon(self, display_prefs):
"""
#doc this - should return a cached icon
[all Node subclasses should either override this
or define a class or instance value for const_pixmap attribute]
"""
if self.const_pixmap:
return self.const_pixmap
# let simple nodes just set this in __init__ (or as a
# class constant) and be done with it [bruce 060523/090119]
else:
msg = "bug: Node subclass %s forgot to override node_icon method " \
"or set self.const_pixmap" % self.__class__.__name__
fake_filename = msg
return imagename_to_pixmap( fake_filename)
# should print msg, at most once per class
# (some people might consider this a kluge)
pass
# most methods before this are by bruce [050108 or later]
# and should be reviewed when my rewrite is done ###@@@
def addsibling(self, node, before = False):
"""
Add the given node after (default) or before self, in self's Group.
Node should not already be in any Group, since it is not removed from one.
(Some existing code violates this; this is probably ok if node's old Group
is never again used, but that practice should be deprecated, and then
this method should detect the error of node.dad already being set,
or perhaps be extended to remove node from its dad.)
[Special case: legal and no effect if node is self. But this should be
made into an error, since it violates the rule that node is not presently
in any Group!]
[It is unlikely that any subclass should override this, since its
semantics should not depend on anything about self, only (perhaps)
on things about its Group, i.e. self.dad.]
[Before bruce 050113 this was called Node.addmember, but it had different
semantics from Group.addmember, so I split that into two methods.]
"""
if node is self:
# bruce comment 010510: looks like an error, and not nearly the
# only one possible... maybe we should detect more errors too,
# and either work around them (as this does) or report them.
#bruce comment 050216: probably no longer needed since probably done in addchild
return
if before:
self.dad.addchild( node, before = self) # Insert node before self
else:
self.dad.addchild( node, after = self) # Insert node after self
return
def addmember(self, node, before_or_top = False):
"""
[Deprecated public method; overridden in Group with different behavior:]
Like addsibling or addchild, depending on whether self is
a leaf node or a Group. (Also misnamed, since a sibling is not a member.)
Any call to addmember whose node is known to be always a Group
can be replaced with addchild (default option values are compatible),
except that if a named option is supplied, it must be renamed.
Any call whose node is *never* a Group can be changed to addsibling.
Any call whose node is sometimes a Group and sometimes not might need
to call this method, or might have a bug because it does, if the calling
code was written with the wrong assumption about node's Groupness!
[We might un-deprecate this by redefining it as what to do when you drop
node onto self during drag-and-drop, but really that should be its own
separate method, even if it's pretty similar to this one. This is one is
used by a lot of old code as if it was always one or the other of the
two methods it can call! This was sometimes justified but in other cases
was wrong and caused bugs.]
"""
###REVIEW: how should each call of this behave if node is a group that
# acts like a leaf node for some purposes, e.g. DnaGroup? @@@@
# [bruce 080303 comment]
# for a leaf node, add it to the dad node just after us;
# note that Group implem has different behavior.
# [bruce 071110 revised, as part of splitting Group into its own module]
self.addsibling( node, before = before_or_top)
return
def genvisibleleaves(self, include_parents = False): #bruce 060220
"""
Assuming self is visible in the MT (ignoring scrolling), return a
generator which yields the set of self and/or its children which have
no visible children (i.e. which are leaf nodes, or empty Group nodes
(good??), or closed Group nodes).
By default, skip anything which has children we'll yield, but if
include_parents is True, include them anyway.
[Note that this uses .open which might be considered
model-tree-specific state -- if we ever let two MTs show the model
hierarchy at once, this will need an argument which is the
openness-dict, or need to become an MT method.]
"""
# Note: this is not presently used, but should be used, since it helped
# implement the MT arrow key bindings, which are desirable but were left
# out in the port to Qt4, even though their implem has nothing to do
# with Qt except for receiving the arrow key events.
# [bruce 071206 comment]
if self.is_group() and self.open and self.openable():
#bruce 080108 added .openable cond (guess)
visible_kids = self.MT_kids() #bruce 080108 .members -> .MT_kids()
if visible_kids:
if include_parents:
yield self
#e Do we want another option, for yielding parents before
# vs. after their kids? I don't yet know of a use for it
# ('before' is what we want for MT arrow keys, whether
# moving up or down, since for 'up' we reverse this entire
# sequence).
for m in visible_kids:
for s in m.genvisibleleaves(include_parents = include_parents):
yield s
return
yield self
return
def pick(self):
"""
select the object
[extended in many subclasses, notably in Group]
[Note: only Node methods should directly alter self.picked,
since in the future these methods will sometimes invalidate other state
which needs to depend on which Nodes are picked.]
"""
###@@@ I don't know whether that new rule is yet followed by
# external code [bruce 050124].
#bruce 050131 for Alpha: I tried to make sure it is; at least
# it's now followed in "pick" methods.
if not self.picked:
if self.part is None:
#bruce 080314 check for this
print "likely to cause bugs: .part is None in .pick for %r" % self
self.picked = True
# bruce 050125: should we also call self.assy.permit_picked_parts()
# here? ###@@@ [not just in chunk.pick]
#bruce 050131 for Alpha: I'm guessing we don't need to, for jigs
# or groups, since they don't get into assy.molecules or selmols.
# Whether doing it anyway would be good or bad, I don't know,
# so no change for now.
self.changed_selection() #bruce 060227
self.change_current_selgroup_to_include_self()
# note: stops at a picked dad, so should be fast enough during recursive use
def ModelTree_plain_left_click(self): #bruce 080213 addition to Node API
"""
Subclasses which want side effects from a plain, direct left click
in a model tree widget (after the usual effect of self.pick)
should implement those by overriding this method.
(Note that .pick, unlike this method, can also be called due to
selecting a Group, select all, context menu, or even Undo.)
"""
pass
def ModelTree_context_menu_section(self): #bruce 080225 addition to Node API
"""
Return a menu_spec list to be included in the Model Tree's context
menu for this node, when this is the only selected node
(which implies the context menu is specifically for this node).
Default implementation returns []. Subclasses which want to extend this
should in most cases first call the superclass version of this method,
and then append their menu item tuples to the end of the list it returns,
and return that. But in principle they could prepend or insert new items
between specific items in the superclass value, or even remove superclass
items, add wrappers to their methods, etc.
@see: makemenu_helper, for documentation of the menu_spec format.
"""
return []
def unpick(self):
"""
unselect the object, and all its ancestor nodes.
[extended in many subclasses, notably in Group]
[Note: only Node methods should directly alter self.picked,
since in the future these methods will sometimes invalidate other state
which needs to depend on which Nodes are picked.]
"""
###@@@ I don't know whether that new rule is yet followed by external
# code [bruce 050124].
if self.picked:
self.picked = False
self.changed_selection() #bruce 060227
# bruce 050126 change: also set *all its ancestors* to be unpicked.
# this is required to strictly enforce the rule
# "selected groupnode implies all selected members".
# We'd do this inside the 'if' -- but only once we're sure all other code
# no longer bypasses this method and sets node.picked = False directly;
# this way, if that happens, we might happen to fix up the situation later.
if self.dad and self.dad.picked:
self.dad.unpick_top() # use the method, in case a Group subclass overrides it
def changed_selection(self): #bruce 060227
"""
Record the fact that the selection state of self or its contents
(Group members or Chunk atoms) might have changed.
"""
if self.assy is not None:
self.assy.changed_selection()
return
def unpick_all_except(self, node):
"""
unpick all of self and its subtree except whatever is inside node and
its subtree; return value says whether anything was actually unpicked
"""
# this implem should work for Groups too, since self.unpick does.
if self is node:
return False
res = self.picked # since no retval from unpick_top; this is a
# correct one if our invariants are always true
self.unpick_top()
res2 = self.unpick_all_members_except( node)
# btw, during recursive use of this method,
# unpick_top (either the Node or Group implem)
# will see self.dad is not picked
# and not need to keep calling unpick_top
return res or res2
def unpick_all_members_except(self, node):
"""
[#doc; overridden in Group]
return value says whether anything was actually unpicked
"""
return False
def unpick_top(self): #bruce 050124 #bruce 050131 making it correct for chunk and jig
"""
unselect the object -- but (unlike Group.unpick) don't change
the selection state of its members. But do unselect all its ancestor nodes.
[unlike unpick, this is generally NOT extended in subclasses, except in Group.]
"""
# this implem is only correct for leaf nodes:
self.unpick() # before 050131 was Node.unpick(self), which was wrong for chunk and jig.
def is_glpane_content_itself(self): #bruce 080319
"""
Is self (not counting its content) normally shown in the glpane
due to its class or nature (ignoring anything transient like
display style settings or current part)?
And if so, should its not being picked prevent Groups
containing it from being picked due to all their other
glpane content being picked, when they occur inside certain
kinds of Groups on which this can be called? (This is a moot
point for most kinds of nodes based on planned usage of this
method as of 080319, but might matter later.)
@rtype: boolean
@see: methods (on other classes) with "movable" in their name.
[many subclasses must override this; not all yet do, but this
does not yet cause harm due to how this so far needs to be used,
as of 080319. For current usage, Chunk must override this,
and DnaMarker must return false for it. For correctness,
many other jigs, including ChainAtomMarker by default,
ought to return True for it, but this may be NIM.]
"""
# See comment on Jig method for effect of this being False
# when self is visible in GLPane, and discussion. [bruce 080319]
# Note: some code which tests for "Chunk or Jig" might do better
# to test for this method's return value. [bruce circa 080319]
# REVIEW: rename to indicate "3d" or something else about the physical
# model, rather than "glpane"? It's not about graphical display, but
# about selection semantics based on what's part of the 3d model on
# which selection operates. [bruce 090123 comment]
return False
def pick_if_all_glpane_content_is_picked(self): #bruce 080319
"""
For documentation, see the Group implementation of this method.
@return: whether self contains any (or is, itself) "glpane content".
@see: Group.pick_if_all_glpane_content_is_picked
@note: has no side effect when self is a leaf node, since if it
should pick self, self is already picked.
[must be overridden by Group; should not need to be overridden
by any other subclasses]
"""
return self.is_glpane_content_itself() # correct for leaf nodes
def call_on_topmost_unpicked_nodes_of_certain_classes(self, func, classes): #bruce 080319
"""
Call func on the topmost unpicked subnodes of self (i.e. self
or its members at any level) which have one of the given classes.
(The "topmost such nodes" means the ones that are not contained in other
such nodes. I.e. if we call func on a node, we never call it on
a member of that node at any level.)
"""
if self.picked:
return
call_on_self = False
for clas in classes:
if isinstance(self, clas):
# review: can isinstance just be passed a list or tuple?
# (in Pythons as old as the ones we still support)
call_on_self = True
break
continue
if call_on_self:
func(self)
elif self.is_group():
for m in self.members:
m.call_on_topmost_unpicked_nodes_of_certain_classes(func, classes)
return
_old_dad = None ###k not yet used?
#####@@@@@ review got to here, except: to chgdad added only cmts plus
#####docstring plus new name
def changed_dad(self):
"""
[private method]
Must be called after self.dad might have changed, before again exposing
modified node to the public. Keeps some things up to date continuously;
records info to permit updating other things later.
"""
node = self
## from changes import changed #bruce 050303, removed 050909
## not needed as of 050309:
## changed.dads.record(node)
## # make sure node's Part will be updated later if needed
## # [bruce 050303]
assert node.dad is not None
#k not sure if good to need this, but seems to fit existing calls...
# that might change [050205 comment]
#e if no dad: assy, space, selgroup is None.... or maybe keep
# prior ones around until new real dad, not sure
assert node.assy is node.dad.assy or node.assy is None, \
"node.assy is not node.dad.assy or None: " \
"node %r, .assy %r, .dad %r, .dad.assy %r" % \
(node, node.assy, node.dad, node.dad.assy )
# bruce 050308/080218, since following assy code & part code
# has no provision yet for coexisting assemblies
node.assy = node.dad.assy
# this might change soon, or might not... but if it's valid at
# all, it needs to be propogated down! we leave it like this for
# now only in case it's ever being used to init the assy field
# from None.
#bruce 050308: continually let assigned node.dad.part get inherited
# by unassigned node.part (recursively)
if node.dad.part is not None:
if node.part is None:
# Note, this is the usual way that newly made nodes
# acquire their .part for the first time!
# They might be this node or one of its kids
# (if they were added to a homeless Group, which is this node).
#
# update, bruce 080314: it is also ok for newly made nodes
# to call .inherit_part directly in their constructor,
# which means this call will do nothing. This is necessary
# before they call things that want them to have a .part,
# like .pick. Doing this fixed a bug in DnaLadderRailChunk.
node.inherit_part(node.dad.part) # recurses only into kids with no .parts
else:
#bruce 050527 new feature: dad can also inherit from kid, but only prior_part
if node.dad.prior_part is None: # as well as node.dad.part, already checked
node.copy_prior_part_to(node.dad)
if node.picked:
# bruce 050131 for Alpha:
# worry about whether node is in a different selection group than before;
# don't know if this ever happens, but let's try to cooperate if it does:
node.change_current_selgroup_to_include_self()
# note: this has no effect if node doesn't have a selgroup
if node.dad.picked:
node.pick()
#bruce 050126 - maintain the new invariant! (two methods need this)
# Warning: this might make some callers need to update glpane who
# didn't need to before. possible bugs from this are not yet
# analyzed.
# Note 050206: the clipboard can't be selected, and if it could
# be, our invariants would be inconsistent if it had more than one
# item! (Since all items would be selected but only one selgroup
# should be.) So, this line never picks a clipboard item as a
# whole.
return
def inherit_part(self, part): #bruce 050308
"""
#doc (see Group method docstring)
[overridden in Group]
"""
# this implem is sufficient only for leaf nodes
assert self.part is None
part.add(self)
assert self.part is part
def all_content_is_hidden(self): #ninad 080129
"""
Returns whether this node, including all its contents, is hidden
(not shown in GLPane, and shown with inverted icon in Model Tree).
The default implementation for Node returns the value of self.hidden.
But for Group, this method (all_content_is_hidden) should be overridden
to return True only when all members of the Group are hidden.
@see: Group.all_content_is_hidden (which overrides this method)
"""
#bruce 080205 renamed this from isHidden to all_content_is_hidden,
# to avoid confusion with the QWidget method isHidden (also used
# in our code)
return self.hidden
def hide(self):
if not self.hidden:
self.changed() #bruce 050512 part of fixing bug 614
self.hidden = True
self.unpick()
def Hide(self):
"""
Hide self, and update the MT and GLPane accordingly.
"""
# note: this is called from a node's (Jig) "Hide" context menu item
# (in the GLPane, not MT). mark 060312.
self.hide()
if self is self.assy.o.selobj:
# Without this, self will remain highlighted until the mouse moves.
self.assy.o.selobj = None
self.assy.w.win_update()
def unhide(self):
if self.hidden:
self.changed() #bruce 050512 part of fixing bug 614
self.hidden = False
def apply2all(self, fn):
"""
Apply fn to self and (as overridden in Group) all its members;
see Group.apply2all docstring for details.
[overridden in Group]
"""
fn(self)
return
def apply_to_groups(self, fn):
"""
Like apply2all, but only applies fn to all Group nodes (at or under self).
@note: this *does* apply fn to leaf-like Groups such as DnaStrand,
and to any groups inside them (even though they are not
user-visible in the model tree).
[overridden in Group]
"""
pass
def apply2picked(self, fn):
"""
Apply fn to the topmost picked nodes under (or equal to) self,
but don't scan below picked nodes.
See Group.apply2picked docstring for details.
[overridden in Group]
"""
if self.picked:
fn(self)
return
def hindmost(self):
"""
[docstring is meant for both Node and Group methods taken together:]
Thinking of nodes as subtrees of the model tree, return the smallest
subtree of self which contains all picked nodes in this subtree, or None
if there are no picked nodes in this subtree. Note that the result does
not depend on the order of traversal of the members of a Group.
"""
if self.picked:
return self
return None
def ungroup(self):
"""
If this Node is a Group, dissolve it, letting its members
join its dad, if this is possible and if it's permitted as a
user-requested operation. See our Group implem for details.
"""
#bruce 050121 inferred docstring from 2 implems and 1 call
return
# == copy methods -- by default, Nodes can't be copied, so all
# == copyable Node subclasses should override these methods.
def will_copy_if_selected(self, sel, realCopy):
"""
Will this node copy itself when asked (via copy_in_mapping or
postcopy_in_mapping [#doc which one!]) because it's selected in sel,
which is being copied as a whole?
[Node types which implement an appropriate copy method should override
this method.]
If the realCopy boolean is set (indicating this is a real copy
operation and not just a test), and if this node will not copy, it may
want to print a warning.
"""
#bruce 050525; wware 060329 added realCopy arg
if realCopy:
#bruce 060329 added this default message, since it's correct if
#the whole realCopy scheme is, though I'm dubious about the whole
#scheme.
msg = "Node [%s] won't be copied." % (self.name)
env.history.message(orangemsg(msg))
return False # conservative answer
def will_partly_copy_due_to_selatoms(self, sel):
"""
For nodes which say True to .confers_properties_on(atom) for one or
more atoms which are part of a selection being copied, but when this
node is not selected, will it nonetheless copy all or part of itself,
when its copy_partial_in_mapping method is called, so that the copied
atoms still have the property it confers?
[Node types which implement an appropriate copy method should override
this method too.]
"""
return False # conservative answer
def confers_properties_on(self, atom):
"""
Does this Jig (or any node of a type that might appear in atom.jigs)
confer a property on atom, so that it should be partly copied, if
possible (by self.copy_partial_in_mapping) when atom is?
Note: only Anchor overrides this (as of 070608), and the only new
kinds of Nodes that might need to override it would be Jigs designed
to alter the rendering or simulation properties of all their atoms, as
a substitute for directly storing those properties on the atoms. If in
doubt, don't override it.
"""
return False # default value for most jigs and (for now) all other Nodes
def copy_full_in_mapping(self, mapping): # Node method
"""
If self can be fully copied, this method (as overridden in self's
subclass) should do so, recording in mapping how self and all its
components (eg chunk atoms, group members) get copied, and returning
the copy of self, which must be created in mapping.assy (which may
differ from self.assy).
If self will refuse to be fully copied, this method should return
None. [###k does it need to record that in mapping, too?? not for
now.]
It can assume self and all its components have not been copied yet
(except for shared components like bonds #k #doc). It can leave out
some mapping records for components, if it knows nothing will need to
know them (e.g. atoms only need them regarding some bonds and jigs).
For references to things which might not have been copied yet, or
might never be copied (e.g. atom refs in jigs), this method can make
an incomplete copy and record a method in mapping to fix it up at the
end. But it must decide now whether self will agree or refuse to be
copied (using mapping.sel if necessary to know what is being copied in
all).
[All copyable subclasses should override this method.]
"""
return None # conservative version
copy_partial_in_mapping = copy_full_in_mapping
# equivalent for all jigs which need it, as of 050526
# [method name added 050704]
#
# Note (bruce 060523): this might be wrong for jigs that overrode
# copy_full_in_mapping, but since copy_partial_in_mapping is not
# presently called, I won't bother to clean it up for now.
def copy_in_mapping_with_specified_atoms(self, mapping, atoms):
#bruce circa 050525; docstring revised 050704
"""
#doc; must honor mapping.assy; certain subclasses should override
[e.g. chunk]; for use in copying selected atoms
"""
return None
def copy_copyable_attrs_to(self, target, own_mutable_state = True):
"""
Copy all copyable attrs (as defined by a typically-subclass-specific
constant tuple which lists their names, self.copyable_attrs)
from self to target (presumably a Node of the same subclass as self,
but this is not checked, and violating it might not be an error,
in principle; in particular, as of 051003 target is explicitly permitted
to be a methodless attribute-holder).
Target and self need not be in the same assy (i.e. need not have the
same .assy attribute), and when this situation occurs, it must not be
disturbed (e.g. setting target.assy = self.assy would be a bug).
This method doesn't do any invals or updates in target.
This is not intended to be a full copy of self, since copyable_attrs
(in current client code) should not contain object-valued attrs like
Group.members, Node.dad, or Chunk.atoms, but only "parameter-like"
attributes. It's meant to be used as a helper function for making full
or partial copies of self, and related purposes. The exact set of
attributes to include can be chosen somewhat arbitrarily by each
subclass, but any which are left out will have to be handled separately
by the copy methods; in practice, new attributes in subclasses should
almost always be declared in copyable_attrs.
As of 051003, this method (implem and spec) has been extended to
"deep copy" any mutable objects found in attribute values (of the
standard kinds defined by state_utils.copy_val), so that no mutable
state is shared between copies and originals. This can be turned off
by passing own_mutable_state = False, which is a useful optimization
if serial copies are made and intermediate copies won't be kept.
This is intended as a private helper method for subclass-specific copy
methods, which may need to do further work to make these attribute-
copies fully correct -- for example, modifying the values of id- or
(perhaps) name-like attributes, or doing appropriate invals or updates
in target.
[subclasses probably never need to extend this method]
"""
#bruce 050526; behavior and docstring revised 051003
# REVIEW/TODO: rename this to be private, if indeed it is
for attr in self.copyable_attrs:
assert attr != 'assy' # todo: optim by doing this once per class
val = getattr(self, attr)
if own_mutable_state:
val = copy_val(val)
setattr(target, attr, val)
# note: waste of RAM: this turns some default class attrs
# into unneeded instance attrs (nevermind for now;
# but note that some classes copy some attrs outside of this
# method for this reason)
if isinstance(target, Node):
# having this condition permits target being just a
# methodless attribute-holder [new feature, bruce 051003]
self.copy_prior_part_to( target)
return
def copyable_attrs_dict(self):
"""
Returns a new dictionary containing copied values of attributes
listed in self.copyable_attrs.
"""
res = {}
for attr in self.copyable_attrs:
val = getattr(self, attr)
val = copy_val(val)
res[attr] = val
return res
def attr_update(self, dict1):
"""
Updates the attribute values from dict1 to self
"""
for attr, val in dict1.iteritems():
setattr(self, attr, val)
def copy_prior_part_to(self, target): #bruce 050527
"""
If target (presumed to be a Node) has no part or prior_part, set its
prior_part from self, for sake of initial views of new Parts
containing target, if any such new Parts are yet to be made.
"""
if target.part is None and target.prior_part is None:
if self.part is not None:
target.prior_part = self.part
else:
target.prior_part = self.prior_part
return
def own_mutable_copyable_attrs(self):
"""
[WARNING: this docstring is out of date as of 051003]
If any copyable_attrs of self are mutable and might be shared with
another copy of self (by self.copy_copyable_attrs_to(target) -- where
this method might then be called on self or target or both), replace
them with copies so that they are no longer shared and can safely be
independently changed.
[some subclasses must extend this]
"""
#bruce 051003 revision: now that copy_copyable_attrs_to deepcopies
#mutable parameter values, this method will only need overriding for
#mutable state of types that method can't handle or which for some
#other reason is not declared in self.copyable_attrs.
##e note: docstring and perhaps method name should be changed; most
#calls should remain, but all overridings of this method (and/or
#related decls of mutable_attrs) should be reviewed for removal. [as
#of 060523, the only override is in jig_Gamess.py, and it could
#probably be removed but that requires analysis.]
pass
## def copy(self, dad):
## # just for backwards compatibility until old code is changed [050527]
## # This method should be removed soon; AFAIK the only caller
## # is _pasteJig, which never works [bruce 090113 comment]
## self.redmsg("This cannot yet be copied")
## if debug_flags.atom_debug:
## print_compact_stack("atom_debug: who's still calling this " \
## "deprecated method? this is:\n ")
## return None # bruce 050131 changed this from "return 0"
# ==
def kill_with_contents(self):
"""
Kill this Node including the 'logical contents' of the node. i.e.
the contents of the node that are self.members as well as non-members.
Example: A DnaSegment's logical contents are AxisChunks and StrandChunks.
Out of these, only AxisChunks are the direct members of the DnaSegment
but the StrandChunks are logical contents of it (non-members).
So, some callers may specifically want to delete self along with its
members and logical contents. These callers should use this method.
The default implementation just calls self.kill().
@see: dna_model.DnaSegment.kill_with_contents which overrides this
method.
@see: EditCommand._removeStructure() which calls this Node API method
@see: InsertDna_EditCommand._removeSegments()
"""
#NOTE: This method was defined on 2008-02-22 to support dna_updater
#implementation in InsertDna_EditCommand.
#This method is called in EditCommands instead of calling widely used
#'kill' method.(Example: we are not modifying DnaSegment.kill to delete
#even the non-members of DnaSegment, to avoid potential internal bugs)
self.kill()
def kill(self): # see also self.destroy()
"""
Remove self from its parents and (maybe) destroy enough of its content
that it takes little room (but be Undoable).
[subclasses should extend this, but should call this Node method at
the end of their own kill methods]
"""
###@@@ bruce 050214 changes and comments:
#
#e needs docstring; as of now, intended to be called at end (not start
# middle or never) of all subclass kill methods; ok to call twice on a
# node (i.e. to call on an already-killed node); subclass methods
# should preserve this property
#
# also modified the Group.kill method, which extends this method
## self._f_prekill() #bruce 060327 ##k not positive this is needed in Node
## # (rather than just Group and Chunk being enough)
## ###@@@ defect in this (important): jigs dying due to one or all
## # their atoms dying will run this and mess up the counter.
self.remove_from_parents()
_f_will_kill = 0
def _f_prekill(self):
"""
[private helper method for Node.kill and its subclass implems]
Set self._f_will_kill = ++ _will_kill_count on self, all child nodes,
and all other owned subobjects that self.kill() would kill, but only
when it's not already set on self (to avoid exponential runtime in
Node tree depth, when recursive kill calls this), and only on Node
classes which might own objects which need it (currently Atoms and
maybe Bonds and conceivably Parts).
This flag tells Atoms being killed not to create new bondpoints on
their neighbors when those are also being killed, which is a big
optimization. It can do other similar things if we discover them -- in
general, it means "I'm also being killed so don't spend lots of time
healing my wounds when you're being killed".
@note: Undo will revive killed objects, so kill needs to remove this
flag from them when it returns, and Undo might assert that it's not
set on revived objects.
@note: We increment a counter when setting this, so as not to have to
worry about whether leftover sets of it will cause trouble. This might
make some of what's said above (about unsetting it) unnecessary.
[subclasses should not extend this, but should extend _f_set_will_kill
instead; at least Group and Chunk need to do that]
"""
#bruce 060327 in Node (mainly to speed up Delete of chunks, also
#(short term purpose) to reduce memory leaks)
global _will_kill_count
if self._f_will_kill < _will_kill_count:
_will_kill_count += 1
self._f_set_will_kill( _will_kill_count)
# sets it to this value (with no checks) on self, children, atoms
return
def _f_set_will_kill(self, val): #bruce 060327 in Node
"""
[private helper method for _f_prekill; see its docstring for details;
subclasses with owned objects should extend this]
"""
self._f_will_kill = val
glname = 0
# required class constant in case of repeated calls of self.destroy()
# [bruce 060322]
def destroy(self):
"""
delete cyclic refs (so python refdecr can free self)
and refs to large RAM-consuming attrs; and more
[#doc, see code comments]
"""
self.kill() #bruce 060117 guess at implem
#bruce 060117 draft, experimental, not yet widely used;
# obs comment: not sure if it should differ from kill [but see below]
#bruce 060322 comments:
# Bugs: arbitrary-order calls (vs other obj destroy methods) are
# probably not yet safe (for planned future calls of this method, to
# plug memory leaks).
# Note: a potential difference of destroy from kill -- after kill, a
# Node might be revived by Undo; after destroy, it won't be. Things
# like its entry in various global dicts for change-tracking, glname,
# undo objkey, etc, should either be weak or should be explicitly
# removed by destroy. This is nim, but is important for plugging
# memory leaks. These comments apply to the destroy methods of all
# model objects and their child or helper objects, not only to Nodes.
# ###@@@ #e
# We want this dealloc_my_glselect_name, but first we have to review
# all calls to Node.destroy to verify it's not called when it
# shouldn't be (e.g. when that node might still be revived by Undo).
# ###@@@ BTW, as of 060322 the appropriate init, alloc, and draw code
# for glname is only done (or needed) in Jig.
## self.assy.dealloc_my_glselect_name( self, self.glname )
## # only ok for some subclasses; some have ._glname instead
##e more is needed too... see Atom and Bond methods
# do we want this:
## self.__dict__.clear() ###k is this safe???
return
def remove_from_parents(self):
#bruce 051227 split this out of Node.kill for use in new Node.set_assy
"""
Remove self from its parents of various kinds
(part, dad, assy, selection) without otherwise altering it.
"""
###@@@ bruce 050214 changes and comments:
# added condition on self.dad existing, before delmember
# added unpick (*after* dad.delmember)
# added self.assy = None
## self._um_deinit() #bruce 051005 #k this is not good enough unless
## # this is always called when a node is lost from the MT!
if self.dad:
self.dad.delmember(self)
# this does assy.changed (if assy), dad = None, and unpick,
# but the unpick might be removed someday, so we do it below too
# [bruce 050214]
self.unpick() # must come after delmember (else would unpick dad) and
# before forgetting self.assy
self.reset_subtree_part_assy()
def reset_subtree_part_assy(self): #bruce 051227 split this out
"""
Cleanly reset self.part and self.assy to None, in self and its node-subtree
(removing self and kids from those containers in whatever ways are needed).
Assume self is not picked.
[Subclasses (especially Group) must extend this as needed.]
"""
assert not self.picked
if self.part:
#bruce 050303; bruce 051227 moved from start of routine (before
# delmember) to here (after unpick), not sure ok
self.part.remove(self)
env.node_departing_assy(self, self.assy) #bruce 060315 for Undo
self.assy = None #bruce 050214 added this ###k review more
#bruce 060315 comments about this old code:
# reasons to set assy to None:
# - helps avoid cycles when destroying Nodes
# - logical part of set_assy (but could wait til new assy is stored)
# reasons not to:
# - Undo-tracked changes might like to use it to find the right
# AssyUndoArchive to tell about the change
# (can we fix that by telling it right now? Not sure... in theory,
# more than one assy could claim it if we Undo in some!)
# - we might avoid needing to scan it and store it as undoable state
# - some bugs are caused by code that tries to find win, glpane, etc
# from assy
# tentative conclusion:
# - don't stop doing this for A7
# - but tell Undo about the change, as part of letting it know which
# atoms are changing (namely, all those still in this Node, if
# it's a chunk -- perhaps this will usually be no atoms?);
# other changes on atoms can safely only tell the assy they refer
# to (via atom.molecule.assy) (or no assy if that's None).
def is_ascendant(self, node): # implem corrected by bruce 050121; was "return None"
"""
Is node in the subtree of nodes headed by self?
[Optimization of Group.is_ascendant for leaf nodes;
see its docstring for more info.]
"""
return self is node # only correct for self being a leaf node
def moveto(self, node, before = False):
"""
DEPRECATED. Use node.addchild(self) or node.addsibling(self) instead.
Move self to a new location in the model tree, before or after node
according to the <before> flag, or if node is a Group, somewhere
inside it (reinterpreting 'before' flag as 'top' flag, to decide where
inside it). Special case: if self is node, return with no effect
(even if node is a Group).
"""
#todo: rename for DND, and clean up; has several external calls
# (but as of 080317, no longer used in MT DND)
###REVIEW: how should each call of this behave if node is a group that
# acts like a leaf node for some purposes, e.g. DnaGroup? @@@@
# [bruce 080303 comment]
#bruce 050110 updated docstring to fit current code.
# (Note that this makes it even more clear, beyond what's said in addmember
# docstrings, that addmember interface mixes two things that ought to be
# separated.)
#bruce 050205 change: just go directly to addmember, after my addchild
# upgrades today. note, this 'before' is a positional arg for the
# before_or_top flag, not the named arg 'before' of addchild!
# BTW we *do* need to call addmember (with its dual personality
# depending on node being a leaf or not) for now, while DND uses
# "drop onto a leaf node" to mean what "drop under it" ought to mean.
node.addmember(self, before_or_top = before)
# note: this needs to be addmember, not addchild or addsibling
return
def nodespicked(self):
"""
Return the number of nodes currently selected in this subtree.
[subclasses must override this!]
Warning (about current subclass implementations [050113]):
scans the entire tree... calling this on every node in the tree
might be slow (every node scanned as many times as it is deep in the tree).
"""
if self.picked:
return 1 # number, not boolean!
else:
return 0
def edit(self): # REVIEW [bruce 090106]: should this method be renamed editProperties?
# (Would that name apply even when it enters a command?
# BTW should the same API method even be used in those two cases?)
# To rename it, search for 'def edit', '.edit' (whole word), "edit method".
# But note that not all of those methods are on subclasses of Node.
"""
[should be overridden in most subclasses]
If this kind of Node has properties that can be edited
with some sort of interactive dialog, do that
(put up the dialog, wait for user to dismiss it, change the properties
as requested, and do all needed invals or updates),
and then return None (regardless of Cancel, Apply, Revert, etc).
Or if it or its properties can be edited by a Command,
enter that command and return None.
If this kind of Node *doesn't* support editing of properties,
return a suitable text string for use in an error message.
(In that case, editProperties_enabled should also be overridden,
and if it is, probably this method will never get called.)
"""
#bruce 050121 inferred docstring from all 7 implems and 1 call.
# Also added feature of refusing and returning error message, used in 2 implems so far.
#bruce 050425 revised this error message.
#bruce 090106 revised error message again, and added "Command"
# part of docstring, guessing this from the implem in NanotubeSegment.
return "Edit Properties is not available for %s." % self.short_classname()
def editProperties_enabled(self): #bruce 050121 added this feature #bruce 090106 renamed
"""
Subclasses should override this and make it return False
if their edit method would refuse to edit their properties.
"""
# i don't know if they all do that yet...
#e should we check here to see if they override Node.edit?? nah.
return True # wrong for an abstract Node, but there is no such thing!
def dumptree(self, depth = 0): # just for debugging
print depth * "...", self.name
def node_must_follow_what_nodes(self):
#bruce 050422 made Node and Jig implems of this from function of same name
"""
[should be overridden by Jig]
If this node is a leaf node which must come after some other leaf nodes
due to limitations in the mmp file format, then return a list of those nodes
it must follow; otherwise return (). For all Groups, return ().
Note:
If we upgrade the mmp file format to permit forward refs to atoms
(not just to whole leaf nodes, as it does now),
then this function could legally return () for all nodes
(unless by then there are nodes needing prior-refs to things other than atoms).
However, it probably shouldn't, since it is also used for placement of nodes
which refer to other nodes in a logical relative position in the model tree.
"""
return () #bruce 071214 optim: return (), not []
def writemmp(self, mapping): #bruce 050322 revised interface to use mapping
"""
Write this Node to an mmp file, as controlled by mapping,
which should be an instance of writemmp_mapping.
[subclasses must override this if they need to be writable into an mmp file;
we print a debug warning if they don't (and something tries to write them).]
"""
# bruce 050322 revising this; this implem used to be the normal way
# to write Jigs; now it's basically an error to call this implem,
# but it's harmless -- it puts a comment in the mmp file and prints a debug warning.
line = "# not yet implemented: mmp record for %r" % self.__class__.__name__
mapping.write(line + '\n')
if debug_flags.atom_debug:
print "atom_debug:", line
return
def writemmp_info_leaf(self, mapping): #bruce 050421
"""
leaf node subclasses should call this in their writemmp methods,
after writing enough that the mmp file reader will have created a Node for them
and added it to its current group (at the end is always safe, if they write no sub-nodes)
[could be overridden by subclasses with more kinds of "info leaf" keys to write]
"""
assert not self.is_group()
if self.hidden:
mapping.write("info leaf hidden = True\n")
if self.disabled_by_user_choice:
# [bruce 050505 revised this so all Nodes have the attribute,
# tho so far only Jigs use it]
mapping.write("info leaf disabled = True\n") #bruce 050422
return
def writemdl(self, alist, f, dispdef):
#bruce 050430 added Node default method to fix bug reported by Ninad for A5
pass
def writepov(self, file, dispdef): #bruce 050208 added Node default method
pass
def draw(self, glpane, dispdef):
"""
@see: self.draw_after_highlighting()
"""
pass
def draw_after_highlighting(self,
glpane,
dispdef,
pickCheckOnly = False):
"""
Draw the part of self's graphical appearance (or that of its members
if it's a Group) that needs to be drawn AFTER the main drawing
code has completed its highlighting/stenciling for selobj,
and after main model and graphicsMode drawing (Draw_model,
Draw_other, etc).
Subclasses can override this method. Default implementation
draws nothing and returns False (which is correct for most kinds
of Nodes, at present). Overridden in class Group.
@param pickCheckOnly: [needs documentation of its effect]
(for example use, see this method in class Plane)
@type pickCheckOnly: boolean
@return: A boolean flag 'anythingDrawn' that tells whether this method
drew anything.
@rtype: boolean
@see: GraphicsMode.Draw_after_highlighting() which calls this method
[note difference in capitalization and arg signature]
"""
#Ninad 2008-06-20: This is a new API method that completely
#replaces the implementation originally in method Utility._drawESPImage().
#Also did many bug fixes in the original implementation.
#
###TODO: The return value anythingDrawn is retained from the old
# implementation, as some other code in SelectGraphicsMode._calibrateZ
# apparently uses it. Need to check if that code is used anywhere.
# [bruce 080917 adds: Yes, it's used in jigGLSelect and
# get_jig_under_cursor, which are still needed for now,
# though they should be removed someday. That is probably the
# only ultimate use of this return value (not sure).]
anythingDrawn = False
return anythingDrawn
def draw_in_abs_coords(self, glpane, color):
#bruce 050729 to fix some bugs caused by Huaicai's jig-selection code
"""
Default implementation of draw_in_abs_coords. Some implem is needed
by any nodes or other drawable objects which get registered with
self.assy.alloc_my_glselect_name and thereby need to provide Selobj_API.
[Subclasses which are able to use color for highlighting in Build mode,
or which want to look different when highlighted in Build mode,
or which are ever drawn in non-absolute modelview coordinates,
or for which glpane.displayMode is not what would be passed to their draw method,
should override this method.]
"""
dispdef = glpane.displayMode
del color
self.draw(glpane, dispdef)
return
def killed(self): #bruce 050729 to fix some bugs caused by Huaicai's jig-selection code
alive = self.dad is not None and self.assy is not None
return not alive # probably not correct, but should be good enough for now
def getinfo(self):
pass
def init_statistics(self, stats):
"""
Initialize statistics for this Node
"""
# Currently, this is only used by "part" and "group" nodes.
# See PartProp.__init__() or GroupProp.__init__().
# Mark 050911.
stats.nchunks = 0
stats.natoms = 0
stats.nsinglets = 0
stats.nrmotors = 0
stats.nlmotors = 0
stats.nanchors = 0
stats.nstats = 0
stats.nthermos = 0
stats.ngamess = 0
stats.num_espimage = 0
stats.num_gridplane = 0
stats.num_mdistance = 0
stats.num_mangle = 0
stats.num_mdihedral = 0
stats.ngroups = -1 # Must subtract self.
def getstatistics(self, stats):
pass
def break_interpart_bonds(self):
#bruce 050308 for assy/part split, and to fix bug 371 and related bugs for Jigs
"""
Break all illegal bonds (atom-atom or atom-Jig or (in future) anything similar)
between this node and other nodes in a different Part.
[Note that as of 050513 and earlier, all atom-Jig interpart bonds
are permitted; but we let the Jig decide that.]
Error if this node or nodes it bonds to have no .part.
Subclasses with bonds must override this method as appropriate.
It's ok if some kinds of nodes do this more fancily than mere "breakage",
e.g. if some Jigs break into pieces so they can keep connecting
to the same atoms without having any inter-Part bonds,
as long as, after this is run on all nodes in any subtree using apply2all,
no inter-part bonds are left, and it works whether or not newly
created nodes (created by this method while apply2all runs)
have this method called on them or not.
The Group implem does *not* call this on its members --
use apply2all for that.
[As of 050308, this is overridden only in class Chunk and
class Jig and/or its subclasses.]
"""
pass
def move(self, offset): #bruce 070501 added this to Node API
"""
If self has any geometry in 3d space, and if this operation makes
sense for self's class, translate self in 3d space by the vector offset;
do all necessary invalidations, but try to optimize those based on
self's relative structure not having changed or reoriented.
See also self.rot() and self.pivot().
@param offset: vector by which to translate self in 3d space
@type offset: L{VQT.V}
@note: there is not yet a Node API method to find out whether
this method is a noop. However, the Node API defines
a class constant attribute, is_movable, which is
closely related to that. See also "getSelectedMovables".
[most subclasses with content in 3d space should override this method]
"""
return # correct for many kinds of nodes
def rot(self, quat):
#bruce 080305 added this to Node API (already on many subclasses)
"""
If self has any geometry in 3d space, and if this operation makes
sense for self's class, rotate self around its center of rotation
(defined differently by different subclasses) by quaternion I{quat};
do all necessary invalidations, but optimize those based on
self's relative structure not having changed. See also self.pivot()
and self.move().
@param quat: The quaternion to rotate self by.
@type quat: L{VQT.Q}
@note: there is not yet a Node API method to retrieve self's
center of rotation, or to find out whether it has one,
or to find out whether this method is a noop.
[most subclasses with content in 3d space should override this method]
"""
return # correct for many kinds of nodes
def pivot(self, point, quat):
#bruce 080305 added this to Node API (already on some subclasses)
"""
If self has any geometry in 3d space, and if this operation makes
sense for self's class, rotate self around point by quaternion quat;
do all necessary invalidations, but optimize those based on
self's relative structure not having changed. See also self.rot()
and self.move().
@param point: The point to rotate self around.
@type point: L{VQT.V}
@param quat: The quaternion to rotate self by, around I{point}.
@type quat: L{VQT.Q}
@note: some subclasses define self.rot but not self.pivot.
@note: there is not yet a Node API method to find out whether
this method is a noop.
[most subclasses with content in 3d space should override this method]
"""
return # correct for many kinds of nodes
def pickatoms(self):
#bruce 070501 added this to Node API (was defined only in Chunk)
"""
Pick the atoms owned by self which are not already picked, and which the selection filter
permits the user to pick (select). Return the number of newly picked atoms.
[subclasses that can contain atoms must override this method]
"""
return 0 # correct for most kinds of nodes
def contains_atom(self, atom):
#bruce 080305 added this to Node API
# (was defined only in Chunk and in a class outside the Node hierarchy)
"""
Does self contain the given atom (a real atom or bondpoint)?
[subclasses that can contain atoms must override this method]
"""
return False # correct for nodes that can't contain atoms
def get_atom_content(self, flags = -1): #bruce 080306
"""
Return your current (up to date) atom content
which intersects the given content flags.
@param flags: the subset of content flags we should update and return
@type flags: an "or" of content flag bits [#doc where they are defined]
@return: current atom content of self
@rtype: an "or" of content flag bits
[subclasses which can have any atom content need to override
this method]
"""
# default implem, for nodes which can never have atom content
return 0
def _f_updated_atom_content(self): #bruce 080306
"""
Recompute, record, and return our atom content,
optimizing this if it's exactly known on self or on any node-subtrees.
[Subclasses which can contain atoms need to override this method.]
"""
# default implem, for nodes which can never have atom content
# (note, this default definition is needed on Node, since it's called
# on all members of a Group, whether or not they can contain atoms)
return 0
# an old todo comment:
#in addition, each Node should have the following methods:
# draw, cut, copy, paste
pass # end of class Node
# ==
class NodeWith3DContents(Node): #bruce 080305
# REVIEW: which methods can safely assert that subclass must implement?
"""
Abstract class for Node subclasses which can have contents
with 3D position (possibly appearing in the graphics area
and/or affecting a simulation).
Notable subclasses (some indirect) include Chunk, Group, Jig.
"""
def break_interpart_bonds(self):
"""
[overrides Node method; subclasses must override this method]
"""
pass ### assert 0, "subclass must implement"
def move(self, offset):
"""
[overrides Node method; subclasses must override this method]
"""
pass ### assert 0, "subclass must implement"
def rot(self, quat):
"""
[overrides Node method; subclasses must override this method]
"""
assert 0, "subclass must implement"
def pivot(self, point, quat):
"""
[overrides Node method; subclasses must override this method]
"""
assert 0, "subclass must implement"
# def draw_in_abs_coords?
pass
# ==
class SimpleCopyMixin(Node):
# This will probably just become the default implems for these methods in
# Node, rather than its own class... but first, test it in Comment and
# View. When it's stable, also see if the copy methods in Jig and even
# Chunk can make use of these methods somehow (perhaps with these modified
# to call new optional subclass methods). [bruce 060523]
# Note: there's no reason to put this in its own file different than Node,
# because it needs no imports of its own, and anything that imports it
# also has to import Node. [bruce 071026 comment]
# status [bruce 080313 comment]: used only by Comment, NamedView, PovrayScene.
# See also def copy_..._mapping methods in other classes.
"""
Node subclasses that want to be copyable via their _s_attr or
copyable_attrs decls, and that don't need any optimizations for atoms or
bonds or for avoiding full copy_val of all attrs, and that don't need any
special cases like worrying about refs to other copied things needing to
be transformed through the mapping (i.e. for which all copyable attrs are
pure data, not node or atom refs), can mix in this class, BEFORE Node,
provided they contain a correct definition of _um_initargs for use in
creating the copy-stub, and don't interfere with the attrs of self stored
by this class, self._orig and self._mapping.
"""
def will_copy_if_selected(self, sel, realCopy): # in class SimpleCopyMixin
"""
[overrides Node method]
"""
return True
def copy_full_in_mapping(self, mapping): # in class SimpleCopyMixin
# warning: most of this code is copied from the Jig method.
clas = self.__class__
method = self._um_initargs # fyi: for Node, the returned args are assy, name
args, kws = method()
# replace self.assy with mapping.assy in args
# [new requirement of this method API, bruce 070430]
newargs = list(args)
for i in range(len(args)):
if args[i] is self.assy:
newargs[i] = mapping.assy
args = tuple(newargs)
new = clas(*args, **kws)
# store special info to help _copy_fixup_at_end
# (note: these attrnames don't start with __ since name-mangling would prevent
# subclasses from overriding _copy_fixup_at_end or this method;
# that means all subclasses have to take care not to use those attrnames!
# It might be better to let them be "manually name-mangled". ##e FIX)
new._orig = self
new._mapping = mapping
new.name = "[being copied]" # should never be seen
mapping.do_at_end( new._copy_fixup_at_end)
#k any need to call mapping.record_copy?? probably not for now,
# but maybe later if these nodes can be ref'd by others
# (or maybe the general copy code that calls this will take care of that then).
return new
def _copy_fixup_at_end(self): # in class SimpleCopyMixin
# warning: most of this code is copied from the Jig method.
"""
[Private method]
This runs at the end of a copy operation to copy attributes from the old node
(which could have been done at the start but might as well be done now for most of them).
Self is the copy, self._orig is the original.
"""
orig = self._orig
del self._orig
mapping = self._mapping # REVIEW: is keeping this reference until we return necessary?
del self._mapping
copy = self
orig.copy_copyable_attrs_to(copy) # this uses copy_val on all attrs
return
pass # end of class SimpleCopyMixin
# ==
def topmost_nodes( nodes): #bruce 050303
"""
Given 0 or more nodes (as a python sequence), return a list
of the given nodes that are not descendants of other given nodes.
@see: related method hindmost and function topmost_selected_nodes,
but those only work for the set of selected nodes.
@warning: current implementation is quadratic time in len(retval)
"""
res = {} # from id(node) to node
for node in nodes:
assert node is not None # incorrect otherwise -- None won't have .is_ascendant method
dad = node
# not node.dad, that way we remove dups as well (might never be needed, but good)
while dad is not None:
if id(dad) in res:
break
dad = node.dad
if dad is None:
# node and its dads (all levels) were not in res
# add node, but also remove any members that are below it (how?)
#e (could be more efficient if we sorted nodes by depth in tree,
# or perhaps even sorted the tree-paths from root to each node)
for other in res.values():
if node.is_ascendant(other):
del res[id(other)]
res[id(node)] = node
return res.values()
# end
| NanoCAD-master | cad/src/foundation/Utility.py |
# Copyright 2007-2008 Nanorex, Inc. See LICENSE file for details.
"""
LeafLikeGroup.py - abstract superclass for groups that appear as leaves
in the MT, e.g. DnaStrandOrSegment, NanotubeSegment, PeptideSegment.
@author: Bruce
@version: $Id$
@copyright: 2007-2008 Nanorex, Inc. See LICENSE file for details.
History:
most methods written by Bruce as part of DnaStrandOrSegment,
and subsequently copied into NanotubeSegment and PeptideSegment
by Ninad and/or Mark and/or Piotr.
bruce 081217 made this class to contain common code for those
classes which originated in DnaStrandOrSegment (which was
svn copied to initiate this file).
TODO:
notice and pull in other common code subsequently
added to some of our subclasses.
"""
from foundation.Group import Group
from utilities.debug_prefs import debug_pref, Choice_boolean_False
class LeafLikeGroup(Group):
"""
Abstract superclass for Groups that appear as leaves in the Model Tree.
Internally, this is just a specialized Group containing subclass-
specific subobjects and attributes, and overriding some Node or Group API
methods to change Model Tree behavior (and perhaps other behavior).
"""
def permit_addnode_inside(self): #bruce 080626
"""
[overrides superclass method]
"""
return False
def permits_ungrouping(self):
"""
Should the user interface permit users to dissolve this Group
using self.ungroup?
[overrides superclass method]
"""
#bruce 080207 in deprecated class Block, copied to DnaStrandOrSegment 080318
return self._show_all_kids_for_debug() # normally False
def _show_all_kids_for_debug(self):
#bruce 080207 in deprecated class Block, copied to DnaStrandOrSegment 080318
#bruce 081217: revised to use same debug_pref for all node classes
debug_pref_name = "Model Tree: show content of leaf-like Groups?"
return debug_pref( debug_pref_name, Choice_boolean_False )
def _f_wants_to_be_killed(self, pre_updaters = True, **opts): # in LeafLikeGroup
"""
[friend method for enforce_permitted_members_in_groups and subroutines]
Does self want to be killed due to members that got ejected
by _f_move_nonpermitted_members (or due to completely invalid structure
from before then, and no value in keeping self even temporarily)?
@rtype: boolean
[overrides superclass method]
"""
#bruce 080319
del opts, pre_updaters
return not self.members
def MT_DND_can_drop_inside(self): #bruce 080317, revised 080318
"""
Are ModelTree Drag and Drop operations permitted to drop nodes
inside self?
[overrides superclass method]
"""
return self._show_all_kids_for_debug() # normally False
def openable(self):
"""
whether tree widgets should permit the user to open/close
their view of this node
[overrides superclass method]
"""
# if we decide this depends on the tree widget or on somet for thing about it,
# we'll have to pass in some args... don't do that unless/until we need to.
#If there are no MT_kids (subnodes visible in MT under this group) then
#don't make this node 'openable'. This makes sure that expand/ collapse
#pixmap next to the node is not shown for this type of Group with 0
#MT_kids
#Examples of such groups include empty groups, DnaStrand Groups,
#DnaSegments etc -- Ninad 2008-03-15
return len(self.MT_kids()) != 0
def MT_kids(self, display_prefs = {}):
"""
[overrides superclass method]
"""
if self._show_all_kids_for_debug(): # normally False
# bruce 080318
return self.members
return ()
def get_all_content_chunks(self): # by Ninad; moved & docstring revised by bruce 081217
"""
Return all the chunks which should be considered logical contents
of self.
The default implementation returns direct members of self
which are chunks.
Some subclasses must override this.
"""
# TODO: refactor this and its callers to use a more general definition
# of what to return, and to use related submethods such as one to
# iterate over logical contents (perhaps adding that to API).
# The different calls might need splitting into different API methods.
# REVIEW: I suspect there are bugs at present from not including jigs,
# when this is used for dragging.
# [bruce 081217 comments]
all_content_chunk_list = []
for member in self.members:
if isinstance(member, self.assy.Chunk):
all_content_chunk_list.append(member)
return all_content_chunk_list
pass # end of class DnaStrandOrSegment
# end
| NanoCAD-master | cad/src/foundation/LeafLikeGroup.py |
# Copyright 2005-2009 Nanorex, Inc. See LICENSE file for details.
"""
preferences.py -- Preferences system.
@author: Bruce
@version: $Id$
@copyright: 2005-2009 Nanorex, Inc. See LICENSE file for details.
Module classification: [bruce 071215]
At least foundation, due to integral use of "from changes import UsageTracker".
But also could be construed to have lots of app-specific knowledge,
due to "from prefs_constants import prefs_table". But for now, experiment
with pretending that's not app-specific, which we can get away with since
it's pure data... and this might even make sense, if different apps
share code which references the same prefs_keys from that table,
as long as we make sure they can use different (per-app) prefs files.
(For the same reason, we'll also classify prefs_constants as foundation
or lower. In fact, it'll be utilities or constants for now, as explained
in its docstring.)
A desirable refactoring might be to classify prefs_constants higher
(model or a specific app) and pass it to a prefs singleton as an argument.
Then it'd be more clearly ok to call this module "foundation", but let
prefs_constants be higher. OTOH, the reason explained above may make it
perfectly ok for prefs_constants to be very low.
==
Prototype for Alpha.
See lower-down docstrings for usage.
==
History:
bruce 050106 or so: created it.
[some minor changes since then]
bruce 050804: added prefs usage/change tracking.
==
Should be used with bsddb,
but works without it too, after printing a warning.
The module bsddb is present in our standard installations
of windows and linux python, but not yet Mac python;
but we can add it, since it's easily available from
http://undefined.org/python/pimp/darwin-7.0.0-Power_Macintosh.html
(package bsddb3 4.1.6)
BUT WE SHOULD LOOK INTO THE LICENSE TO MAKE SURE IT'S OK!
(It probably is, and [050804] I think Huaicai investigated this
and confirmed that it is.)
"""
import os
import time
import NE1_Build_Constants
from utilities import debug_flags
from platform_dependent.PlatformDependent import mkdirs_in_filename
from platform_dependent.PlatformDependent import find_or_make_Nanorex_directory
import foundation.env as env
import utilities.EndUser as EndUser
from utilities.debug import print_compact_traceback
from utilities.constants import str_or_unicode
from foundation.changes import UsageTracker
from utilities.prefs_constants import prefs_table
_tmpary = NE1_Build_Constants.NE1_RELEASE_VERSION.split(".")
if len(_tmpary) >= 3:
DEFAULT_PREFS_BASENAME = "default_prefs_v%s-%s-%s.txt" % \
(_tmpary[0], _tmpary[1], _tmpary[2])
else:
DEFAULT_PREFS_BASENAME = "default_prefs_v%s-%s.txt" % \
(_tmpary[0], _tmpary[1])
#Derrick 080703
# note: this name is still hardcoded into
# packaging/Pref_Mod/pref_modifier.py
# some imports remain lower down, for now: bsddb and shelve
"""
Some internal & client-code documentation, as of 050106:
We store prefs in a shelf. Restrictions imposed by the shelve module:
Keys must be strings, values can be any pickleable python exprs,
and neither can be extremely long (exact limits are not made clear).
When these restrictions become a problem, we will make our intermediating
layer handle them (for example, by translating long keys to short ones).
==
Concurrent access:
We usually keep the shelf closed, in case other processes want to access or modify it too.
This only works if we assume that these processes only open it briefly when processing
some user event (typed command or clicked button), and this doesn't happen in two processes
at once since the user can only give events to one process at a time. For this reason,
it's important to only open it briefly during a user event (and only at the beginning
if the processing takes a long time), and never any other time!
Also, if you (a process) start another process which might access the prefs when it starts,
you should only access them yourself just before it starts (and during subsequent user events,
assuming that subprocess follows the same rule).
We rely on the client code to follow these rules; we don't try to enforce them.
Breaking them might conceivably trash the entire prefs database, or perhaps more likely,
cause an error in the process trying to access it while another process is doing so.
(This depends on the db module, and I don't know what bsddb does in this case.)
We make no attempt yet to handle these errors or back up the prefs database.
==
Internal shelf key usage:
Current internal shelf key usage (this might change at any time,
without the client-code keys changing):
Keys starting "k " are translated versions of client-code keys;
see internal _attr2key method (which will be renamed).
Keys starting '_' or with a digit are reserved for use by this code.
In fact, all other keys are reserved. Presently used: see the code.
The most important one is _format_version.
==
High-level keys and values:
Keys supplied by client code (translated through _attr2key into shelf keys)
are presently just strings, using conventions still mostly to be invented,
but in the future will be able to be more kinds of objects.
Values supplied by client code will in the future be translated, and have
metainfo added, but this is not yet done. Values must be pickleable, and
also should not include instances of classes until we decide which of
those are ok. (But Numeric arrays are ok.)
For now, all modules use the same global namespace of high-level keys,
but this might change. To permit this, the module defining the key
needs to be detectable by this code... basically this means any given key
should be passed into this module from the same external module.
Details to be documented when they are implemented and become relevant.
==
Usage by client code (for now -- this might change!):
from foundation.preferences import prefs_context
prefs = prefs_context()
key = "some string" # naming conventions to be introduced later
prefs[key] = value
value = prefs[key] # raises KeyError if not there
# these dict-like operations might or might not work
# (not yet tested; someday we will probably suppport them
# and make them more efficient than individual operations
# when several prefs are changed at once)
prefs.get(key, defaultvalue)
prefs.update(dict1)
dict1.update(prefs)
"""
# ===
# This module wants bsddb, just to make sure the shelf is stored in a format
# that (we hope) all platforms can open. (It also might be more reliable,
# be better at concurrent access, and/or permit longer keys and (especially)
# values than other db packages.)
# But, we'll run without it if necessary, but when we do, we'll use a different
# shelf name, in case the binary formats are incompatible. (Not a perfect solution,
# since there's no guarantee the db format without bsddb is always the same...
# but I don't know a good-enough way to find out which db module shelve is actually using.)
_USE_bsddb3 = NE1_Build_Constants.NE1_USE_bsddb3
try:
if _USE_bsddb3:
import bsddb3 as _junk
else:
import bsddb as _junk
_junk # try to tell pylint we need this import [bruce 071023]
except:
dbname = "somedb"
print """\
Warning: import bsddb failed; using some other db format for preferences file;
giving it a different name in case that uses an incompatible binary format;
this means, when you upgrade to bsddb, you'll lose your preferences."""
if EndUser.getAlternateSourcePath() != None:
# [bruce 070704]
print "(Note: as of 070704 this is a common side-effect of using the"
print "ALTERNATE_CAD_SRC_PATH feature, since the built release has a"
print "patch to use bsddb3 which is not present in cvs code."
print "This situation will probably be fixed soon.)"
print
else:
dbname = "bsddb"
# And this module requires shelve. We assume without checking that if bsddb is available,
# shelve will use it. (I don't know any straightforward way to check this. But the
# docs for shelve say it will use it, I think. #k check this ###@@@)
if _USE_bsddb3:
from bsddb3 import dbshelve as shelve
else:
import shelve
# (For the actual filename of the prefs file, see the code of _make_prefs_shelf()
# below, which specifies the basename only; the db module decides what extension
# to add. This is one reason we store the prefs in a subdirectory.)
# ===
_shelfname = _shelf = _cache = None
_defaults = _trackers = None #bruce 050804 new features
def _make_prefs_shelf():
"""
[private function]
call this once per session,
to create or find the shelf (whose name depends only on the dbm format we'll use for it),
and create the cache of its contents,
and store a comment there about this process,
and close the shelf again in case a concurrent process is sharing the same shelf with us.
"""
global _shelfname, _shelf, _cache, _defaults, _trackers
nanorex = find_or_make_Nanorex_directory()
global dbname
_shelfname = str_or_unicode(os.path.join( nanorex, "Preferences", "%s-shelf" % dbname ))
# This name should differ when db format differs.
# Note: the actual filename used might have an extension added
# by the db module (in theory, it might even create two files
# with different extensions from the given basename).
# By experiment, on the Mac, with bsddb there is no extension added,
# and without it there is '.db' added. [bruce 050105]
mkdirs_in_filename(_shelfname)
_shelf = shelve.open(_shelfname.encode("utf_8"))
_cache = {}
_cache.update(_shelf) # will this work?
was_just_made = (not _cache) #bruce 080505
if was_just_made:
print u"made prefs db, basename", _shelfname.encode("utf_8")
else:
print u"prefs db already existed, basename", _shelfname.encode("utf_8")
_defaults = {}
_trackers = {}
# zap obsolete contents
obskeys = []
for key in _cache.keys():
if key.isdigit() or key in ['_session_counter']:
obskeys.append(key)
for key in obskeys:
del _shelf[key]
del _cache[key]
###@@@ following should be revised to handle junk contents gracefully,
# and to notice the existing format version and handle older formats appropriately
# or reject them gracefully.
_store_while_open('_format_version', 'preferences.py/v050106')
# storing this blindly is only ok since the only prior version is one
# we can transparently convert to this one by the "zap obskeys" above.
# store a comment about the last process to start using this shelf
# (nothing yet looks at this comment)
proc_info = "process: pid = %d, starttime = %r" % (os.getpid(), time.asctime())
_store_while_open( '_fyi/last_proc', proc_info ) # (nothing yet looks at this)
_close()
if was_just_made:
# use DEFAULT_PREFS_BASENAME [bruce 080505 new feature];
# file format must correspond with that written by
# packaging/Pref_Mod/pref_modifier.py
default_prefs_values = {}
# read the values from DEFAULT_PREFS_BASENAME
# (while shelf is closed, in case this takes time)
try:
filename = os.path.join( nanorex, "Preferences", DEFAULT_PREFS_BASENAME )
if not os.path.exists(filename):
lines = []
print u"didn't find", filename.encode("utf_8")
else:
file = open( filename, "rU")
lines = file.readlines()
file.close()
print u"reading from", filename.encode("utf_8")
for line in lines:
line0 = line
try:
# try/except so corrupted lines don't break good ones added later
# assume line has the correct format: key = val\n
while line[-1] in ('\r', '\n'):
# 'while' is to handle Windows newlines
# (probably not needed due to 'rU')
line = line[:-1]
key, val = line.split(" = ")
# don't strip key or val -- they might end with spaces
def decode(string1):
words = string1.split(r'\\')
for i in range(len(words)):
word = words[i]
word = word.replace(r'\=', '=')
word = word.replace(r'\n', '\n')
word = word.replace(r'\r', '\r')
words[i] = word
continue
return '\\'.join(words)
key = decode(key)
val = decode(val)
if val == 'True':
val = True
elif val == 'False':
val = False
default_prefs_values[key] = val
# print "read key, val = (%r, %r)" % (key, val)
pass
except:
print "ignoring exception in this line: %r" % (line0,)
pass
continue
pass
except:
print "ignoring exception reading from", DEFAULT_PREFS_BASENAME
default_prefs_values = {}
pass
items = default_prefs_values.items()
items.sort() # just to make the following console prints look nicer
# now open, store the values, and close
_shelf = shelve.open(_shelfname.encode("utf_8"))
for key, val in items:
pkey = _PREFS_KEY_TO_SHELF_KEY(key)
_store_while_open( pkey, val)
print "stored key, val = (%r, %r)" % (key, val)
_close()
pass
return
def _close():
global _shelf
_shelf.close()
_shelf = None
return
def _reopen():
_ensure_shelf_exists()
global _shelf
assert _shelf is None
_shelf = shelve.open(_shelfname.encode("utf_8"))
# don't bother to re-update our _cache! This would be too slow to do every time.
return
def _store_new_while_open(key, val): # [not used as of 050804]
assert not _shelf.has_key(key) # checks _shelf, not merely _cache
assert not _cache.has_key(key)
_cache[key] = val
_shelf[key] = val
return
def _store_while_open(key, val): # [used only when initializing the shelf, as of 050804]
# don't assert _cache and _shelf are the same at this key -- it's not an error if they are not,
# or if shelf has a value and cache does not, since a concurrent process is allowed to write
# a prefs value on its own.
_cache[key] = val
_shelf[key] = val
return
def _ensure_shelf_exists():
if not _shelfname:
_make_prefs_shelf()
return
#bruce 050804/050805 new features:
def _track_change(pkey):
_tracker_for_pkey( pkey).track_change()
def _track_use(pkey):
_tracker_for_pkey( pkey).track_use()
def _tracker_for_pkey(pkey):
try:
return _trackers[pkey]
except KeyError:
tracker = _trackers[pkey] = UsageTracker()
return tracker
pass
def _get_pkey_key(pkey, key): #bruce 050804 split this out of __getitem__ so I can also use it in get (both methods)
"""
[#doc better; note: pkey and key args are redundant;
they're both provided just for this implem's convenience]
"""
_track_use(pkey) # note, this is done even if we raise KeyError below (which is good)
try:
return _cache[pkey]
except KeyError:
raise KeyError, key # note: exception detail is key, not pkey as it would be if we just said "raise"
pass
def _get_pkey_faster(pkey): # optimization of _get_pkey_key(pkey, key) when the KeyError exception detail doesn't matter
_track_use(pkey)
return _cache[pkey]
def _record_default( pkey, dflt):
"""
Record this default value (if none is yet known for pkey),
so other code can find out what the default value is,
for use in "restore defaults" buttons in prefs UI.
In debug version, also ensure this is the same as any previously recorded default value.
Note, dflt can be anything, even None, though some callers have a special case
which avoids calling this when dflt is None.
"""
_defaults.setdefault( pkey, dflt) # only affects it the first time, for a given pkey
if debug_flags.atom_debug:
# also check consistency each time
if dflt != _defaults[pkey]:
print "atom_debug: bug: ignoring inconsistent default %r for pref %r; retaining %r" % \
( dflt, pkey, _defaults[pkey] ) #e also print key if in future the key/pkey relation gets more complex
return
def _restore_default_while_open( pkey): #bruce 050805
"""
Remove the pref for pkey from the prefs db (but no error if it's not present there).
As for the internal value of the pref (in _cache, and for track_change, and for subscriptions to its value):
If a default value has been recorded, change the cached value to that value
(as it would be if this pref had originally been missing from the db, and a default value was then recorded).
If not, remove it from _cache as well, and use the internal value of None.
Either way, if the new internal value differs from the one before this function was called,
track the change and fulfill any subscriptions to it.
If possible, don't track a use of the prefs value.
"""
priorval = _cache.get(pkey) # might be None
if _shelf.has_key(pkey):
del _shelf[pkey]
try:
dflt = _defaults[pkey]
except KeyError:
if debug_flags.atom_debug:
print "atom_debug: fyi: restore defaults finds no default yet recorded for %r; using None" % pkey
_cache[pkey] = dflt = None
del _cache[pkey]
else:
_cache[pkey] = dflt
if dflt != priorval:
_track_change(pkey)
#e fulfill any subscriptions to this value (if this is ever done by something other than track_change itself)
return
def keys_list( keys): #bruce 050805
"""
Given a key or a list of keys (or a nested list), return an equivalent list of keys.
Note: tuples of keys are not allowed (someday they might be a new kind of primitive key).
"""
res = []
if type(keys) == type([]):
for sub in keys:
res.extend( keys_list( sub) )
#e could be optimized (trivially, if we disallowed nested lists)
else:
assert type(keys) == type("a")
res.append(keys)
return res
# ==
# Now make a prefs function, which returns a prefs object [someday] customized for the calling module,
# in which prefs can be accessed or stored using attributes, whose names are interpreted in a context
# which might differ for each module.
_NOT_PASSED = [] # private object for use as keyword arg default [bruce 070110, part of fixing bug of None as Choice value]
# (note, the same global name is used for different objects in preferences.py and debug_prefs.py)
def _PREFS_KEY_TO_SHELF_KEY(prefs_key):
"""
Translate a prefs_key string (used in external code)
to a shelf database key string (called "pkey" in some local variables).
"""
#bruce 080505 split this out of _prefs_context._attr2key
return "k " + prefs_key
class _prefs_context:
"""
Represents a symbol context for prefs names, possibly [someday] customized for one module.
"""
def __init__(self, modname):
# modname is not presently used
_ensure_shelf_exists() # needed before __getattr__ and __getitem__ are called
self.trackers = {}
def _attr2key(self, attr): # note: method and its arg are misnamed.
return _PREFS_KEY_TO_SHELF_KEY(attr)
#e Someday we will support more complex keys,
# which are like exprs whose heads (at all levels) are in our context.
# For now, just support arbitrary strings as items.
def __setitem__(self, key, val):
assert type(key) == type("a") # not unicode, numbers, lists, ... for now
pkey = self._attr2key(key) # but we might use a more general func for this, at some point
try:
#bruce 050804 new feature: detect "change with no effect" (where new value equals existing value),
# so we can avoid tracking that as an actual change.
# We also avoid tracking this as a use (even though we do use the value for the comparison).
# And, while we're at it, optimize by not changing the prefs db in this case.
# This is not just an optimization, since if the prefs db contains no value for this pref,
# and no value other than the default value (according to the current code) has been stored during this session
# and if this remains true in the present call (i.e. val equals the default value),
# then (due to some of today's changes to other code here, particularly self.get storing dflt in cache), #####IMPLEM
# we won't store anything in the prefs db now.
cached_val = _cache[pkey] # this might be a default value from the present code which is not in the prefs db
except KeyError:
same = False
else:
# If no default value is known, we consider any value to differ from it.
# [##e Would it be better to treat this as if the default value was None (like prefs.get does)??]
same = (val == cached_val)
if same:
if 0 and debug_flags.atom_debug:
print "atom_debug: fyi: returning early from prefs.__setitem__(%r) since val == cached_val, %r == %r" % (key, val, cached_val)
return # see long comment above
if _shelf:
_shelf[pkey] = _cache[pkey] = val
_track_change(pkey) # do this only after the change happens, for the sake of formulas...
#e (someday we might pass an arg saying the change is done, or the curval is merely invalid,
# and if the latter, whether another track_change will occur when the change is done.)
else:
try:
_reopen()
_shelf[pkey] = _cache[pkey] = val
_track_change(pkey)
finally:
_close()
return
def __getitem__(self, key):
assert type(key) == type("a")
pkey = self._attr2key(key)
return _get_pkey_key( pkey, key)
def get(self, key, dflt = _NOT_PASSED): #bruce 050117; revised 050804, and 070110 to use _NOT_PASSED
assert type(key) == type("a")
pkey = self._attr2key(key)
if dflt is not _NOT_PASSED:
_record_default( pkey, dflt)
#bruce 070110 bugfix: use _NOT_PASSED rather than None.
# Before this fix, passing None explicitly as dflt would fail to record it, which could cause later exceptions
# when client code used env.prefs[key] if the pref had never been saved. This was one of two bugs in
# using a Choice value of None in debug_prefs.py. The other part is fixed in debug_prefs.py dated today.
del dflt # [if dflt was used below and we removed this del, we'd need to replace _NOT_PASSED with None in this localvar]
try:
return _get_pkey_faster( pkey) # optim of self[key]
# note: usage of this pref is tracked in _get_pkey_faster even if it then raises KeyError.
except KeyError:
#bruce 050804 new features (see long comment in __setitem__ for partial explanation):
# if default value must be used, then
# (1) let it be the first one recorded regardless of the one passed to this call, for consistency;
# (2) store it in _cache (so this isn't called again, and for other reasons mentioned in __setitem__)
# but not in the prefs db itself.
try:
dflt = _defaults[pkey] # might be None, if that was explicitly recorded by a direct call to _record_default
except KeyError:
# no default value was yet recorded
dflt = None # but don't save None in _cache in this case
if debug_flags.atom_debug:
print "atom_debug: warning: prefs.get(%r) returning None since no default value was yet recorded" % (key,)
else:
_cache[pkey] = dflt # store in cache but not in prefs-db
return dflt
pass
def update(self, dict1): #bruce 050117
# note: unlike repeated setitem, this only opens and closes once.
if _shelf:
for key, val in dict1.items():
#e (on one KeyError, should we store the rest?)
#e (better, should we check all keys before storing anything?)
self[key] = val #e could optimize, but at least this leaves it open
# that will do _track_use(pkey); if we optimize this, remember to do that here.
else:
try:
_reopen()
self.update(dict1)
finally:
_close()
return
def suspend_saving_changes(self): #bruce 051205 new feature
"""
Let prefs changes after this point be saved in RAM and take full effect
(including notifying subscribers),
but not be saved to disk until the next call to resume_saving_changes
(which should be called within the same user command or mouse drag,
but not for every mouse motion during a drag).
Use this to prevent constant updates to disk for every mouse motion
during a drag (e.g. as a prefs slider is adjusted).
Warn if called when changes are already suspended,
but as a special case to mitigate bugs of failing to call resume,
save all accumulated changes whenever called.
"""
if _shelf:
# already suspended -- briefly resume (so they're saved) before suspending (again)
print "bug: suspend_saving_changes when already suspended -- probably means resume was missing; saving them now"
_close()
_reopen()
return
def resume_saving_changes(self, redundant_is_ok = False): #bruce 051205 new feature
"""
Resume saving changes, after a call of suspend_saving_changes.
Optional redundant_is_ok = True prevents a warning about a redundant call;
this is useful for letting callers make sure changes are being saved
when they should be (and probably already are).
"""
if _shelf:
if redundant_is_ok: # this case untested (no immediate use is planned as of 051205)
print "Warning: resume_saving_changes(redundant_is_ok = True) was in fact redundant --"
print " i.e. it may have been necessary to work around a bug and save prefs."
_close()
else:
if not redundant_is_ok:
print "warning: redundant resume_saving_changes ignored"
return
def restore_defaults(self, keys): #bruce 050805
"""
Given a key or a list of keys,
restore the default value of each given preference
(if one has yet been recorded, e.g. if prefs.get has been provided with one),
with all side effects as if the user set it to that value,
but actually remove the value from the prefs db as well
(so if future code has a different default value for the same pref,
that newer value will be used by that future code).
[#e we might decide to make that prefs-db-removal feature optional.]
"""
if _shelf:
for key in keys_list( keys):
pkey = self._attr2key(key)
_restore_default_while_open( pkey)
else:
try:
_reopen()
self.restore_defaults( keys)
finally:
_close()
return
def get_default_values(self, keys): #bruce 080131 UNTESTED @@@@
"""
@param keys: a list of key strings (tuple not allowed; nested list not allowed)
"""
assert type(keys) == type([])
return map( self.get_default_value, keys)
def get_default_value(self, key, _default_return_value = None): #bruce 080131/080201 UNTESTED @@@@
"""
@param key: a key string
"""
# review: should default value of _default_return_value be None (as now), or _NOT_PASSED?
assert type(key) == type("")
pkey = self._attr2key(key)
dflt = _defaults.get(pkey, _default_return_value)
return dflt
def has_default_value(self, key): #bruce 080131/080201 UNTESTED @@@@
"""
@param key: a key string
"""
# This is a ###STUB in a few ways:
# - it ought to compare using same_vals, not != (also in setitem??)
# - the specification doesn't say what to do when no default is yet recorded
# - old version without _NOT_PASSED:
# it might record a default of None if no default is yet recorded (not sure)
# - new version with _NOT_PASSED: correctness not fully reviewed
dflt = self.get_default_value(key, _NOT_PASSED)
current = self.get(key, dflt) # does usage tracking (good)
same = not (dflt != current)
# (note: this is a safer comparison than ==, but not perfect,
# re Numeric arrays)
return same
def have_default_values(self, keys): #bruce 080201 UNTESTED @@@@
"""
Return True if every prefs key in the given list currently has
its default value (i.e. if restore_defaults would not
change their current values).
@param keys: a list of key strings (tuple not allowed; nested list not allowed)
"""
assert type(keys) == type([])
# note: I think this does not access the shelf,
# so we don't need to optimize it to only open the shelf once.
for key in keys:
if not self.has_default_value(key):
return False
return True
pass # end of class _prefs_context
# for now, in this stub code, all modules use one context:
_global_context = _prefs_context("allmodules")
def prefs_context():
###@@@ stub: always use the same context, not customized to the calling module.
return _global_context
# ==
# initialization code [bruce 050805] (includes the set of env.prefs)
def declare_pref( attrname, typecode, prefskey, dflt = None ): # arg format is same as prefs_table record format
assert typecode in ['color','boolean','string','int', 'float'] or type(typecode) == type([]) #e or others as we define them
#e create type object from typecode
#e get dflt from type object if it's None here, otherwise tell this dflt to type object
#e record type object
#e use attrname to set up faster/cleaner access to this pref?
#e etc.
# Record the default value now, before any other code can define it or ask for the pref.
# (This value is used if that pref is not yet in the db;
# it's also used by "reset to default values" buttons in the UI,
# though those will have the side effect of defining that value in the db.)
prefs = prefs_context()
if dflt is not None:
curvaljunk = prefs.get( prefskey, dflt)
return
def init_prefs_table( prefs_table): # sets env.prefs
for prefrec in prefs_table:
try:
declare_pref(*prefrec)
except:
print_compact_traceback( "ignoring prefs_table entry %r with this exception: " % (prefrec,) )
pass
env.prefs = prefs_context() # this is only ok because all modules use the same prefs context.
if 0 and debug_flags.atom_debug:
print "atom_debug: done with prefs_table" # remove when works
return
init_prefs_table( prefs_table)
# this is guaranteed to be done before any prefs_context object exists, including env.prefs
# (but not necessarily just after this module is imported, though presently, it is;
# similarly, it's not guaranteed that env.prefs exists arbitrarily early,
# though in practice it does after this module is imported, and for now it's ok
# to write code which would fail if that changed, since it'll be easy to fix that code
# (and to detect that we need to) if it ever does change.)
# ==
"""
use prefs_context() like this:
prefs = prefs_context() # once per module which uses it (must then use it in the same module)
... prefs['atom_debug'] = 1
... if prefs['atom_debug']:
...
or make up keys as strings and use indexing, prefs[key],
but try to compute the strings in only one place
and use them from only one module.
We will gradually introduce naming conventions into the keys,
for example, module/subname, type:name. These will be documented
once they are formalized.
[these rules might be revised!]
"""
# == test code (very incomplete) [revised 050804 since it was out of date]
if __name__ == '__main__':
## defaults = dict(hi = 2, lo = 1)
## print "grabbing %r, got %r" % (defaults, grab_some_prefs_from_cache(defaults))
## new = dict(hi = time.asctime())
## print "now will store new values %r" % new
## store_some_prefs(new)
## print "now we grab in same way %r" % grab_some_prefs_from_cache(defaults) # this failed to get new value, but next proc gets it
## print "done with this grossly incomplete test; the shelfname was", _shelfname
# now try this:
testprefs = prefs_context()
testprefs['x'] = 7
print "should be 7:",testprefs['x']
# end
| NanoCAD-master | cad/src/foundation/preferences.py |
# Copyright 2005-2007 Nanorex, Inc. See LICENSE file for details.
"""
changedicts.py - utilities related to dictionaries of changed objects
@author: Bruce
@version: $Id$
@copyright: 2005-2007 Nanorex, Inc. See LICENSE file for details.
History:
bruce 071106 split this out of changes.py
Current status:
This code appears to be active and essential for undo updating;
details unclear, as is whether it's used for any other kind of updating,
e.g. bond_updater -- guess, no (though some of the same individual dicts
might be). [bruce 071106 comment]
Update 071210: since that comment, it's also been used in the dna updater.
"""
from utilities.debug import print_compact_traceback
from foundation.changes import register_postinit_item
DEBUG_CHANGEDICTS = False # do not commit with True
# ==
class changedict_processor:
"""
Allow a single transient changedict to be observed by multiple subscribers
who periodically (at independent times) want to become up to date regarding it
(which they do by calling our process_changes method),
and who don't mind becoming forcibly up to date at other times as well,
so that the dict can be cleared out each time any subscriber wants to be updated
(by having all its items given to all the subscribers at once).
"""
#bruce 060329 moved/modified from chem.py prototype
# (for Undo differential scanning optim).
# Note: as of 071106, this class is used only by register_changedict
# in this file (i.e. it could be private).
def __init__(self, changedict, changedict_name = "<some changedict>"):
self.subscribers = {}
# public dict from owner-ids to subscribers; their update
# methods are called by self.process_changes
assert type(changedict) == type({}) #k needed?
self.changedict = changedict
self.changedict_name = changedict_name
return
def subscribe(self, key, dictlike):
"""
subscribe dictlike (which needs a dict-compatible .update method)
to self.changedict [#doc more?]
"""
assert not self.subscribers.has_key(key)
self.subscribers[key] = dictlike
# note: it's ok if it overrides some other sub at same key,
# since we assume caller owns key
return
def unsubscribe(self, key):
del self.subscribers[key]
return
def process_changes(self):
"""
Update all subscribers to self.changedict by passing it to their
update methods (which should not change its value)
(typically, subscribers are themselves just dicts); then clear it.
Typically, one subscriber calls this just before checking its
subscribing dict, but other subscribers might call it at arbitrary
other times.
"""
sublist = self.subscribers
# note: this is actually a dict, not a list,
# but 'subdict' would be an unclear name for a
# local variable (imho)
if DEBUG_CHANGEDICTS:
print "DEBUG_CHANGEDICTS: %r has %d subscribers" % (self, len(sublist))
changedict = self.changedict
changedict_name = self.changedict_name
len1 = len(changedict)
for subkey, sub in sublist.items():
try:
unsub = sub.update( changedict)
# kluge: this API is compatible with dict.update()
# (which returns None).
except:
#e reword the name in this? include %r for self, with id?
print_compact_traceback(
"bug: exception (ignored but unsubbing) in .update " \
"of sub (key %r) in %s: " % (subkey, changedict_name) )
unsub = True
if unsub:
try:
del sublist[subkey]
except KeyError:
pass
len2 = len(changedict)
if len1 != len2:
#e reword the name in this? include %r for self, with id?
print "bug: some sub (key %r) in %s apparently changed " \
"its length from %d to %d!" % (subkey, changedict_name, len1, len2)
len1 = len2
continue
changedict.clear()
assert changedict is self.changedict
return
pass # end of class changedict_processor
_dictname_for_dictid = {} # maps id(dict) to its name;
# truly private, used here in both register_ functions;
# it's ok for multiple dicts to have the same name;
# never cleared (memory leak is ok since it's small)
_cdproc_for_dictid = {} # maps id(dict) to its changedict_processor;
# not sure if leak is ok, and/or if this could be used to provide names too
# WARNING: the name says it's private, but it's directly referenced in
# undo_archive.get_and_clear_changed_objs and
# undo_archive.sub_or_unsub_to_one_changedict;
# it's used here only in register_changedict
# [bruce 071106 comment]
def register_changedict( changedict, its_name, related_attrs ):
#bruce 060329 not yet well defined what it should do ###@@@
#e does it need to know the involved class?
cdp = changedict_processor( changedict, its_name )
del related_attrs # not sure these should come from an arg at all,
# vs per-class decls... or if we even need them...
#stub?
dictid = id(changedict)
## assert not _dictname_for_dictid.has_key(dictid)
# this is not valid to assert, since ids can be recycled if dicts are freed
_dictname_for_dictid[dictid] = its_name
_cdproc_for_dictid[dictid] = cdp
return
_changedicts_for_classid = {} # maps id(class) to map from dictname to dict
### [what about subclass/superclass? do for every leafclass?]
# WARNING: the name says it's private, but it's directly referenced in
# undo_archive._archive_meet_class; used here only in register_class_changedicts
# [bruce 071106 comment]
def register_class_changedicts( class1, changedicts ):
"""
This must be called exactly once, for each class1 (original or reloaded),
to register it as being changetracked by the given changedicts, each of
which must have been previously passed to register_changedict.
"""
classid = id(class1)
# make sure class1 never passed to us before; this method is only
# legitimate since we know these classes will be kept forever
# (by register_postinit_item below), so id won't be recycled
assert not _changedicts_for_classid.has_key(classid), \
"register_class_changedicts was passed the same class " \
"(or a class with the same id) twice: %r" % (class1,)
assert not hasattr(changedicts, 'get'), \
"register_class_changedicts should be passed a sequence of dicts, not a dict"
# kluge (not entirely valid): make sure we were passed a list or tuple,
# not a dict, to work around one of Python's few terrible features,
# namely its ability to iterate over dicts w/o complaining
# (by iterating over their keys)
for changedict in changedicts:
changedict_for_name = _changedicts_for_classid.setdefault(classid, {})
dictname = _dictname_for_dictid[id(changedict)]
# if this fails (KeyError), it means dict was not
# registered with register_changedict
changedict_for_name[dictname] = changedict
# in future we might be able to auto-translate old-class objects
# to new classes... so (TODO, maybe) store classname->newestclass map,
# so you know which objects to upgrade and how...
# This is needed now, and has to be done after all the changedicts were
# stored above:
register_postinit_item( '_archive_meet_class', class1)
# Note: we could instead pass a tuple of (class1, other_useful_info)
# if necessary. All undo_archives (or anything else wanting to change-
# track all objects it might need to) should call
# register_postinit_object( '_archive_meet_class', self )
# when they are ready to receive callbacks (then and later) on
# self._archive_meet_class for all present-then and future classes of
# objects they might need to changetrack.
#
# Note: those classes will be passed to all new archives and will
# therefore still exist (then and forever), and this system therefore
# memory-leaks redefined (obsolete) classes, even if all their objects
# disappear, but that should be ok, and (in far future) we can even
# imagine it being good if their objects might have been saved to files
# (it won't help in future sessions, which means user/developer should
# be warned, but it will help in present one and might let them upgrade
# and resave, i.e. rescue, those objects).
return
#e now something to take class1 and look up the changedicts and their names
#e and let this run when we make InstanceClassification
##e class multiple_changedict_processor?
# ==
class refreshing_changedict_subscription(object): #bruce 071116; TODO: rename
"""
Helper class, for one style of subscribing to a changedict_processor
"""
cdp = None
def __init__(self, cdp):
self.cdp = cdp # a changedict_processor (public?)
self._key = id(self)
self._dict = {}
self._subscribe()
def _subscribe(self):
self.cdp.subscribe( self._key, self._dict)
def _unsubscribe(self):
self.cdp.unsubscribe( self._key)
def get_changes_and_clear(self):
self.cdp.process_changes()
res = self._dict # caller will own this when we return it
#e optim, when works without it:
## if not res:
## return {} # note: it would be wrong to return res!
self._unsubscribe()
self._dict = {} # make a new dict, rather than copying/clearing old one
self._subscribe()
return res
def __del__(self):
# When we're gone, we no longer own id(self) as a key in self.cdp!
# So free it. (Also presumably an optim.)
if self.cdp:
try:
self._unsubscribe()
except:
print_compact_traceback("bug, ignored: error during __del__: ")
pass
return
pass
# end
| NanoCAD-master | cad/src/foundation/changedicts.py |
NanoCAD-master | cad/src/foundation/__init__.py |
|
# Copyright 2005-2009 Nanorex, Inc. See LICENSE file for details.
"""
state_utils.py - general state-related utilities, and undo-related uses of them.
@author: Bruce
@version: $Id$
@copyright: 2005-2009 Nanorex, Inc. See LICENSE file for details.
Note: same_vals was moved from here into a new file, utilities/Comparison.py,
to break an import cycle. It is closely related to copy_val which remains here.
TODO:
- Move some of this code into undo_archive, or to methods on one of those,
and/or some code from there to here, so that this module no longer
needs to import undo_archive.
[bruce 071025 suggestion, echoing older comments herein]
DISCUSSION of general data handling functions, their C extensions,
their relation to Undo, and their relation to new-style classes
[bruce 090205/090206]:
There are four general data-handling functions, which might be used on
any data value found in undoable state or model state:
- same_vals
- copy_val
- scan_vals
- is_mutable
These functions need to handle many Python builtin types and extension types,
as well as our own classes, old style or new style. They each use lookup tables
by type, and for class instances (old style or new style), we define special
APIs by which classes can customize their behavior regarding these functions.
There is also type-specific code in Undo other than just calls to those
functions.
We have a C extension module, samevals.pyx / samevalshelp.c, which defines
optimized C versions of same_vals and copy_val.
We have an experimental C extension module which causes classes Atom and Bond
(normally old-style) to be Python extension classes (new-style).
The customization/optimization system for all those functions relies on looking
up an object's type to find hardcoded cases, and in some cases testing it for
equalling InstanceType (for old-style class instances) or for the object
being an instance of InstanceLike (for new-style class instances). (The class
of a new style instance is also its type, but we don't take advantage of that
to optimize by memoizing the classes in the hardcoded-type dicts, since we
presume the isinstance test is fast enough. If we later want to change that,
we can make the C code directly access the same per-type dicts that the Python
code both accesses and maintains.)
This is being revised (supposedly done as of 090206, but not fully tested) so that
new-style classes are also handled if they define appropriate methods and/or
inherit from a new superclass InstanceLike (which StateMixin and DataMixin
now do). The code changes required included replacing or augmenting checks for
InstanceType with checks for InstanceLike, and/or directly checking for
certain methods or attrs without bothering to first check for InstanceLike.
(I tested all 4 combos of (Python vs C same_vals) x (InstanceLike old vs new style),
but only briefly: make atoms, change bond type, make dna, undo all that.)
In general, to make this work properly for any newly defined class (old or new
style), the new class needs to inherit one of the mixin superclasses StateMixin
or DataMixin, and to have correct definitions of __eq__ and __ne__. (One special
case is class Bond, discussed in comments in Comparison.py dated 090205.)
Here is how each Python and C function mentioned above works properly for
new-style classes (assuming the code added 090206 works properly):
- same_vals: this is discussed at length in a comment in Comparison.py dated
090205. Briefly, its special case for InstanceType (to cause
_same_InstanceType_helper to be called) is not useful for any well-coded class
in principle, and is only needed for class Bond (to avoid its kluge in having
a looser version of __eq__ than StateMixin provides), and Bond is an old-style
class (when standard code is being used), so it's fine that we don't extend
that same_vals special case for InstanceType to cover new-style classes.
This is true for both the Python and C versions of same_vals. (And both would
need fixing if we wanted to handle Bond being a new-style class but continue
using its __eq__ kluge. Since Bond is the only class that would need that fix,
we'd want a fix that didn't slow down the general case.)
Re the experimental code which makes Bond a new-style class: the lack
of a same_vals special case would cause __eq__ to be used by same_vals
(for both Python and C versions); this might conceivably cause trouble
in Undo, so it should be fixed. I documented that issue in Bond and made it
print a warning if it's new-style. In fact, now it *is* new-style,
since I'm trying out making all InstanceLike classes new-style.
I'm not bothering to fix this for now (in either Python or C same_vals),
since there's no immediate need to let Bond be a new-style class
(though I've made it one, along with all InstanceLikes, as an experiment),
and the warning should mean we don't forget; we'll see if it causes
any noticable undo bugs. But it ought to be cleaned up before the next release. ### DOIT
- copy_val: this properly handles all old-style and new-style classes.
The details are slightly different for Python and C, and include
detection of InstanceType, isinstance check for InstanceLike,
and looking directly for the _copyOfObject method. The copying of instances
(old or new style) (and unrecognized types and some builtin types)
is handled by _generalCopier (formerly called copy_InstanceType).
- scan_vals: this only has a Python version. It properly handles all
old-style and new-style classes similarly to copy_val, calling _scan_Instance
to handle them.
- is_mutable: I think this is correct for both old and new style classes,
since in either case it detects an attribute defined only in DataMixin.
- other code, all related to Undo:
- various checks for InstanceType -- these now also check for InstanceLike,
or are removed in favor of just checking an arbitrary object for certain
attrs or methods
- use of class names via __class__.__name__ -- should work for old- or new-
style classes, as long as class "basenames" are unique (which is probably not
verified by this code ### FIX)
(old-style __name__ has dotted module name, new-style __name__ doesn't)
TODO [as of 090206]:
- I've experimentally made InstanceLike inherit object, forcing all DataMixin/
StateMixins (incl Atom Bond Node) to be new-style. Test everything to see
whether this has caused any trouble, and optimize for it by replacing
__getattr__ methods with properties in certain classes.
- cleanups listed herein
- renamings
- variables and functions with InstanceType in their names -- most of these
now also cover InstanceLikes; maybe the name can just say Instance or
InstanceLike?
- _same_InstanceType_helper (defined and used in Comparison.py)
(actually, for this one, InstanceType in name is still appropriate,
until we clean it up to work for a new-style Bond as well as old-style)
- copiers_for_InstanceType_class_names (a kluge which I forget; obsolete?)
- misleasing names
- _s_isPureData
- others?
"""
from types import InstanceType
from foundation.state_constants import S_DATA
from foundation.state_constants import S_CHILD, S_CHILDREN, S_CHILDREN_NOT_DATA
from foundation.state_constants import S_REF, S_REFS
from foundation.state_constants import S_PARENT, S_PARENTS
from foundation.state_constants import UNDO_SPECIALCASE_ATOM, UNDO_SPECIALCASE_BOND
from foundation.state_constants import ATOM_CHUNK_ATTRIBUTE_NAME
from foundation.state_constants import _UNSET_, _Bugval
import foundation.env as env
from utilities.debug import print_compact_stack
from utilities import debug_flags
from utilities.Comparison import same_vals, SAMEVALS_SPEEDUP
from utilities.constants import remove_prefix
from utilities.GlobalPreferences import debug_pyrex_atoms
DEBUG_PYREX_ATOMS = debug_pyrex_atoms()
### TODO:
"""
Where is _copyOfObject (etc) documented? (In code and on wiki?)
On wiki:
http://www.nanoengineer-1.net/mediawiki/index.php?title=How_to_add_attributes_to_a_model_object_in_NE1
That documentation should say:
- When defining _copyOfObject, consider:
- is its own return value __eq__ to the original? It should be,
so you have to define __eq__ AND __ne__ accordingly.
[later: I think the default def of __ne__ from __eq__ means
you needn't define your own __ne__.]
- should you define _s_scan_children too, to scan the same things
that are copied?
(Only if they are instance objects, and are "children".
See S_CHILDREN doc for what that means.)
[later: I think this only matters if they can contain undoable state
or point to other objects which can.]
I did the above for VQT and jigs_planes, but still no __eq__ or children
for jig_Gamess -- I'll let that be a known bug I fix later,
to test the behavior of my error detect/response code.
[later: I think this ended up getting done.]
Also, when I do fix it (requires analyzing jig_Gamess contents)
I might as well turn it into using a mixin
to give it a proper __eq__ based on declaring the state attrs!
I might as well put state decls into the archived-state objects I'm creating,
so they too could be compared by __eq__ and diffed!!! (Actually, that wouldn't
work for diff since it has to descend into their dictionaries in an intelligent
way. But it might work for __eq__.)
This means my archived-state objects should really be objects, not just
tuples or dicts. Let's change the code that collects one to make this true.
Search for... attrdict?
S_CHILDREN: we might need a decl that we (some class) have no children
(so we don't print a warning about that), and a registration for external classes
of the same thing. [TODO: this needs a better explanation]
And certainly we need to go through the existing stateholder classes (e.g. Node)
and add their attr/child decls. [that has been done]
Maybe rather than accomodating copyable_attrs, we'll just replace it?
Not sure, maybe later (a lot of things use it). [it's still there, 071113]
Do any inappropriate objects get a key (like data or foreign objs)
in current code?? #####@@@@@
"""
##debug_dont_trust_Numeric_copy = False # 060302 -- will this fix last night's
## singlet-pulling bug? [no, that was weird '==' semantics]
## # (warning, it's slow!)
##
##debug_print_every_array_passed_to_Numeric_copy = False # hmm, this might be
## slow too... to be safe, the runtime use of it should condition it on env.debug(),
## and to be fast, also on debug_dont_trust_Numeric_copy.
# ==
class _eq_id_mixin_: #bruce 060209 ##e refile? use more? (GLPane?)
"""
For efficiency, any classes defining __getattr__ which might frequently
be compared using == or != or coerced to a boolean, should have definitions
for __eq__ and __ne__ and __nonzero__ (and any others we forgot??),
even if those are semantically equivalent to Python's behavior when they don't.
Otherwise Python has to call __getattr__ on each comparison of these objects,
just to check whether they have one of those special method definitions
(potentially as a value returned by __getattr__). This makes those simple
comparisons MUCH SLOWER!
This mixin class is one way of solving that problem by providing definitions
for those methods.
It's ok for a subclass to override some of the methods defined here.
It can override both __eq__ and __ne__, or __eq__ alone (which will cause
our __ne__ to follow suit, since it calls the instances __eq__), but it
should not normally override __ne__ alone, since that would probably
cause __eq__ and __ne__ to be incompatible.
These definitions are suitable for objects meant as containers for "named"
mutable state (for which different objects are never equal, even if their
*current* state is equal, since their future state might not be equal).
They are not suitable for data-like objects. This is why the class name
contains '_eq_id_' rather than '_eq_data_'. For datalike objects, there is
no shortcut to defining each of these methods in a customized way (and that
should definitely be done, for efficiency, under the same conditions in
which use of this mixin is recommended). (We might still decide to make
an _eq_data_mixin_(?) class for them, for some other reason.)
"""
def __eq__(self, other):
return self is other
def __ne__(self, other):
## return not (self == other)
## # presumably this uses self.__eq__ -- would direct use be faster?
return not self.__eq__(other)
def __nonzero__(self):
### warning: I did not verify in Python docs that __nonzero__ is the
### correct name for this! [bruce 060209]
return True
def __hash__(self):
#####k guess at name; guess at need for this due to __eq__,
##### but it did make our objects ok as dict keys again
return id(self) #####k guess at legal value
pass
# ==
def noop(*args, **kws):
# duplicate code warning: redundant with def noop in constants.py,
# but i don't want to try importing from there right now [bruce 070412]
pass
def transclose( toscan, collector, layer_collector = noop, pass_counter = False):
"""
General transitive closure routine using dictionaries for collections in its API,
where the keys can be whatever you want as long as they are unique (for desired equiv classes of objects)
and used consistently.
Details: Toscan should be a dictionary whose values are the starting point for the closure,
and collector(obj1, dict1) should take one such value obj1 (never passed to it before)
and for each obj it finds from obj1 (in whatever way it wants -- that defines the relation we're transitively closing),
store it as a new value in dict1 (with appropriate consistent key).
We don't modify toscan, and we return a new dictionary (with consistent keys) whose values
are all the objects we found. Collector will have been called exactly once on each object we return.
It must not modify toscan (since we use itervalues on it while calling collector),
at least when it was called on one of the values in toscan.
If pass_counter = True, then pass collector a third argument, counter,
which is 1 for values in toscan, 2 for new values found directly connected to those,
3 for the new values found connected to *those*, etc.
If layer_collector is provided, then pass it (counter, set of values with that counter)
each time that becomes known. If it stores counter and collector can see that value,
it can know the counter value it would be passed (same as the last stored one).
If it wants to store the set of values, it can know we won't modify it, but it must not
modify it itself either, until we return. After that, it can own all those sets it got,
except that the first one might be identical to (i.e. be the same mutable object as) toscan.
"""
# We have three dicts at a time: objects we're finding (not being checked for newness yet),
# objects we're scanning to look for those, and objects seen (i.e. done with or being scanned).
# Keys are consistent in all these dicts (and should be as unique as objects need to be distinct),
# but what they actually are is entirely up to our args (but they must be consistent between the two args as well).
# [#doc: need to explain that better]
seen = dict(toscan)
counter = 1 # only matters for pass_counter or layer_collector
while toscan:
layer_collector(counter, toscan) # new feature [bruce 070412]
found = {}
len1 = len(toscan)
for obj in toscan.itervalues():
if pass_counter:
# new feature [bruce 070412] [untested, since layer_collector turned out to be more useful]
collector(obj, found, counter)
else:
collector(obj, found) #e might the collector also want to know the key??
len2 = len(toscan)
if len1 != len2:
# btw, if this happens when toscan is not the one passed in, it's still a bug.
print "bug: transclose's collector %r modified dict toscan (id %#x, length %d -> %d)" % \
(collector, id(toscan), len1, len2)
# now "subtract seen from found"
new = {}
for key, obj in found.iteritems():
if not seen.has_key(key):
new[key] = obj
seen.update(new)
toscan = new ##e should API permit asking us to store each toscan in an external dict? more useful than passing counter!
counter += 1
continue
return seen
# ==
# private exceptions for use in private helper functions for certain recursive scanners:
class _IsMutable(Exception):
pass
class Classification: #e want _eq_id_mixin_? probably not, since no getattr.
"""
Classifications record policies and methods for inspecting/diffing/copying/etc all objects of one kind,
or can be used as dict keys for storing those policies externally.
(By "one kind of object", we often mean the instances of one Python class, but not always.)
"""
pass
# possible future optim: some of these could be just attrholders, not instances, so their methods wouldn't require 'self'...
# OTOH some of them do make use of self, and, we might put generic methods on the superclass.
# ... ok, leave it like this for now, and plan to turn it into C code someday; or, use class methods.
# (or just use these subclasses but define funcs outside and store them as attrs on the sole instances)
###@@@ not yet clear if these simple ones end up being used...
##class AtomicClassification(Classification):
## """Atomic (immutable, part-free) types can be scanned and copied trivially....
## """
#### def scan(self, val, func):
#### "call func on nontrivial parts of val (what this means precisely is not yet defined)"
#### pass
## def copy(self, val, func):
## "copy val, using func to copy its parts"
## return val
## pass
##
##class ListClassification(Classification):
## "Classification for Lists (or maybe any similar kind of mutable sequences?)"
#### def scan(self, val, func):
#### "call func on all parts of val"
#### for part in val:
#### func(part)
## def copy(self, val, func):
## "copy val, using func for nontrivial parts"
## return map( func, val) #e optimize by changing API to be same as map, then just using an attrholder, holding map?
## pass
def copy_list(val):
return map(copy_val, val)
def scan_list(val, func):
for elt in val:
## func(elt)
scan_val( elt, func) #bruce 060315 bugfix
return
##class DictClassification(Classification):
## def copy(self, val, func):
## res = {} #e or use same type or class as val? not for now.
## for key, val1 in val.iteritems():
## # as an optim, strictly follow a convention that dict keys are immutable so don't need copying
## res[key] = func(val1)
## return res
## pass
def copy_dict(val):
res = {}
for key, val1 in val.iteritems():
res[key] = copy_val(val1)
return res
def scan_dict(dict1, func):
len1 = len(dict1) #060405 new code (#####@@@@@ needs to be done in C version too!)
for elt in dict1.itervalues(): # func must not harm dict1
## func(elt)
scan_val( elt, func) #bruce 060315 bugfix
len2 = len(dict1)
if len1 != len2:
print "bug: scan_dict's func %r modified dict %#x (len %d -> %d) during itervalues" % \
(func, id(dict1), len1, len2)
return
##class TupleClassification(Classification):
## def copy(self, val, func):
## """simple version should be best for now
## """
## return tuple(map(func, val))
## # not worth the runtime to save memory by not copying if all parts immutable; save that for the C version.
## pass
def copy_tuple(val):
return tuple(map(copy_val, val))
scan_tuple = scan_list
# Tuple of state attr decl values used for attrs which hold "defining state",
# which means state that should (usually) be saved, compared, copied, tracked/changed by Undo, etc.
# Should not include attrs recomputable from these, even if as an optim we sometimes track or save them too (I think).
STATE_ATTR_DECLS = (S_DATA, S_CHILD, S_CHILDREN, S_REF, S_REFS, S_PARENT, S_PARENTS) # but not S_CACHE, S_JUNK(?), S_CHILDREN_NOT_DATA, etc
#e refile in state_constants.py ? not sure, since it's not needed outside this module
class InstanceClassification(Classification): #k used to be called StateHolderInstanceClassification; not yet sure of scope
# we might decide to have a helper func classify an instance and return one of several classes, or None-or-so...
# i mean some more atomic-like classification for classes that deserve one... [060221 late]
#k not sure if this gains anything from its superclass
"""
###doc, implem - hmm, why do we use same obj for outside and inside? because, from outside, you might add to explore-list...
"""
def __init__(self, class1):
"""
Become a Classification for class class1 (applicable to its instances)
"""
self.policies = {} # maps attrname to policy for that attr #k format TBD, now a map from attrname to decl val
self.class1 = class1
#e now something to take class1 and look up the changedicts and their names -- see changes.register_class_changedict
#e and let this run when we make InstanceClassification??
# no, we need to know about new class's chanegdicts before we see any instances!
# (who does? obj_classifier or undo_archive?)
self.attrcodes_with_no_dflt = []
# public list of attrcodes with no declared or evident default value (might be turned into a tuple)
# an attrcode means a pair (attr, acode) where acode's main purpose is to let same-named attrs have different attrdicts
# [attrcodes were added 060330]
self.attrcode_dflt_pairs = [] # as of 060409 this is not filled with anything, but in future, we'll want it again ##e
# public list of attrcode, dflt pairs, for attrs with a default value (has actual value, not a copy);
# attrcode will be distinct whenever dflt value differs (and maybe more often) [as of 060330]
self.dict_of_all_state_attrcodes = {}
self.dict_of_all_Atom_chunk_attrcodes = {} #bruce 071104 kluge
self.attrcodes_with_undo_setattr = {} #060404, maps the attrcodes to an arbitrary value (only the keys are currently used)
self.categories = {} # (public) categories (e.g. 'selection', 'view') for attrs which declare them using _s_categorize_xxx
self.attrlayers = {} # (public) similar [060404]
self.attrcode_defaultvals = {} # (public) #doc
##@@ use more, also know is_mutable about them, maybe more policy about del on copy
# as of 060330 this is only used by commented-out code not yet adapted from attr to attrcode.
self.warn = True # from decls seen so far, do we need to warn about this class (once, when we encounter it)?
self.debug_all_attrs = False # was env.debug(); can normally be False now that system works # DEBUG_PYREX_ATOMS?
self._find_attr_decls(class1) # fills self.policies and some other instance variables derived from them
self.attrcodes_with_no_dflt = tuple(self.attrcodes_with_no_dflt) # optimization, I presume; bad if we let new attrs get added later
self.attrcode_dflt_pairs = tuple(self.attrcode_dflt_pairs)
self.S_CHILDREN_attrs = self.attrs_declared_as(S_CHILD) + \
self.attrs_declared_as(S_CHILDREN) + \
self.attrs_declared_as(S_CHILDREN_NOT_DATA) #e sorted somehow? no need yet.
self._objs_are_data = copiers_for_InstanceType_class_names.has_key(class1.__name__) or \
hasattr(class1, '_s_isPureData')
# WARNING: this code is duplicated/optimized in _same_InstanceType_helper [as of bruce 060419, for A7]
if self.warn and (env.debug() or DEBUG_PYREX_ATOMS):
# note: this should not be env.debug() since anyone adding new classes needs to see it...
# but during development, with known bugs like this, we should not print stuff so often...
# so it's env.debug for now, ####@@@@ FIX THAT LATER [060227]
print "InstanceClassification for %r sees no mixin or _s_attr decls; please add them or register it (nim)" \
% class1.__name__
if (env.debug() or DEBUG_PYREX_ATOMS) and not self.attrcodes_with_no_dflt and self.attrcode_dflt_pairs: #060302; doesn't get printed (good)
print "InstanceClassification for %r: all attrs have defaults, worry about bug resetting all-default objects"\
% class1.__name__
return
def __repr__(self):
return "<%s at %#x for %s>" % (self.__class__.__name__, id(self), self.class1.__name__)
def _acode_should_be_classname(self, class1): #bruce 071114
"""
Say whether the acode component of all attrcodes for
undoable attributes in instances of class1
should equal the classname of class1,
rather than having its usual value
as determined by the caller.
"""
# VERIFY right thing it should be, right cond; understand why;
# [prior code was equivalent to class1.__name__ in ('Atom', 'Bond')
# and had comment "###@@@ kluge 060404, in two places",
# which occurred at both code snippets that are now calls of this.
# One of them also says "# it matters that this can't equal id(dflt)
# for this attr in some other class" -- I think that was depended on
# to let each subclass with same attrname have its own dfltval,
# back when dfltvals were supported. But there might be other things
# that still happen that assume this, I don't know. [bruce 071114]]
specialcase_type = getattr( class1, '_s_undo_specialcase', None)
if specialcase_type in (UNDO_SPECIALCASE_ATOM,
UNDO_SPECIALCASE_BOND):
return True
return False
def _find_attr_decls(self, class1):
"""
find _s_attr_xxx decls on class1, and process/store them
"""
if self.debug_all_attrs:
print "debug: _find_attr_decls in %s:" % (class1.__name__,)
all_s_attr_decls = filter(lambda x: x.startswith("_s_"), dir(class1))
for name in all_s_attr_decls:
if name.startswith('_s_attr_'):
## attr_its_about = name[len('_s_attr_'):]
attr_its_about = remove_prefix( name, '_s_attr_')
setattr_name = '_undo_setattr_' + attr_its_about
declval = getattr(class1, name) # the value assigned to _s_attr_<attr>
self.policies[attr_its_about] = declval #k for class1, not in general
if self.debug_all_attrs:
print " %s = %s" % (name, declval)
self.warn = False # enough to be legitimate state
#e check if per-instance? if callable? if legal?
if declval in STATE_ATTR_DECLS:
# figure out if this attr has a known default value... in future we'll need decls to guide/override this
# (REVIEW: is this really about a declared one, or a class-defined one, if both of those are supported?
# This is academic now, since neither is supported. [bruce 071113 comment])
has_dflt, dflt = self.attr_has_default(class1, attr_its_about)
assert not has_dflt # see comment in attr_has_default about why current code requires this
if not has_dflt:
acode = 0 ###stub, but will work initially;
# later will need info about whether class is diffscanned, in layer, etc
if self._acode_should_be_classname(class1):
acode = class1.__name__
assert _KLUGE_acode_is_special_for_extract_layers(acode)
else:
assert not _KLUGE_acode_is_special_for_extract_layers(acode)
attrcode = (attr_its_about, acode)
self.attrcodes_with_no_dflt.append(attrcode)
else:
# NOTE: this case never runs as of long before 071114.
# attr has a default value.
# first, make sure it has no _undo_setattr_ method, since our code for those has a bug
# in that reset_obj_attrs_to_defaults would need to exclude those attrs, but doesn't...
# for more info see the comment near its call. [060404]
assert not hasattr(class1, setattr_name), "bug: attr with class default can't have %s too" % setattr_name
# this limitation could be removed when we need to, by fixing the code that calls reset_obj_attrs_to_defaults
acode = id(dflt) ###stub, but should eliminate issue of attrs with conflicting dflts in different classes
# (more principled would be to use the innermost class which changed the _s_decl in a way that matters)
if self._acode_should_be_classname(class1):
acode = class1.__name__ # it matters that this can't equal id(dflt) for this attr in some other class
assert _KLUGE_acode_is_special_for_extract_layers(acode)
else:
assert not _KLUGE_acode_is_special_for_extract_layers(acode)
attrcode = (attr_its_about, acode)
self.attrcode_dflt_pairs.append( (attrcode, dflt) )
self.attrcode_defaultvals[attrcode] = dflt
if (env.debug() or DEBUG_PYREX_ATOMS) and is_mutable(dflt): #env.debug() (redundant here) is just to make prerelease snapshot safer
if (env.debug() or DEBUG_PYREX_ATOMS):
print "debug warning: dflt val for %r in %r is mutable: %r" % (attr_its_about, class1, dflt)
pass # when we see what is warned about, we'll decide what this should do then [060302]
# e.g. debug warning: dflt val for 'axis' in <class jigs_motors.LinearMotor at 0x3557d50>
# is mutable: array([ 0., 0., 0.])
if self.debug_all_attrs:
print " dflt val for %r is %r" % (attrcode, dflt,)
pass
self.dict_of_all_state_attrcodes[ attrcode ] = None
if hasattr(class1, setattr_name):
self.attrcodes_with_undo_setattr[ attrcode ] = True
# only True, not e.g. the unbound method, since classes
# with this can share attrcodes, so it's just a hint
specialcase_type = getattr( class1, '_s_undo_specialcase', None)
if specialcase_type == UNDO_SPECIALCASE_ATOM and \
attr_its_about == ATOM_CHUNK_ATTRIBUTE_NAME:
self.dict_of_all_Atom_chunk_attrcodes[ attrcode ] = None #071114
pass
elif name == '_s_isPureData': # note: exact name (not a prefix), and doesn't end with '_'
self.warn = False # enough to be legitimate data
elif name == '_s_scan_children':
pass ## probably not: self.warn = False
elif name == '_s_undo_specialcase':
pass
elif name == '_s_undo_class_alias':
pass
elif name.startswith('_s_categorize_'):
#060227; #e should we rename it _s_category_ ?? do we still need it, now that we have _s_attrlayer_ ? (we do use it)
attr_its_about = name[len('_s_categorize_'):]
declval = getattr(class1, name)
assert type(declval) == type('category') # not essential, just to warn of errors in initial planned uses
self.categories[attr_its_about] = declval
elif name.startswith('_s_attrlayer_'): #060404
attr_its_about = name[len('_s_attrlayer_'):]
declval = getattr(class1, name)
assert type(declval) == type('attrlayer') # not essential, just to warn of errors in initial planned uses
self.attrlayers[attr_its_about] = declval
else:
print "warning: unrecognized _s_ attribute ignored:", name ##e
return
def attr_has_default(self, class1, attr):
"""
Figure out whether attr has a known default value in class1.
@param class1: a class whose instances might be scanned for diffs by Undo.
@param attr: an attribute of instances of class1, which contains undoable state.
@return: (has_dflt, dflt), where has_dflt is a boolean saying
whether attr has a default value in class1, and (if it does)
dflt is that value (and is identical to it, not just equal to it,
if that might matter).
@note: this currently always says "no" by returning (False, None),
to accomodate limitations in other code. (See code comment for details.)
@note: in future we'll need decls to guide/override this.
When we do, the callers may need to distinguish whether there's
a known default value, vs whether it's stored as a class attribute.
"""
if 0:
# This version is disabled because differential mash_attrs requires
# no dflt vals at all, or it's too complicated.
#
# What would be better (and will be needed to support dfltvals
# for binary mmp save) would be to still store attrs with dflts
# here, but to change the undo iter loops to iterate over all
# attrs with or without defaults, unlike now.
#
# An earlier kluge [060405 1138p], trying to fix some bugs,
# was, for change-tracked classes, to pretend there's no such thing
# as a default value, since I suspected some logic bugs in the dual
# meaning of missing state entries as dflt or dontcare.
# That kluge looked at class1.__name__ in ('Atom', 'Bond') to detect
# change-tracked classes.
try:
return True, getattr(class1, attr)
except AttributeError:
# assume no default value unless one is declared (which is nim)
return False, None
pass
return False, None
def attrs_declared_as(self, S_something):
#e if this is commonly called, we'll memoize it in __init__ for each S_something
res = []
for attr, decl in self.policies.iteritems():
if decl == S_something:
res.append(attr)
return res
def obj_is_data(self, obj):
"""
Should obj (one of our class's instances) be considered a data object?
"""
return self._objs_are_data
## or hasattr(obj, '_s_isPureData'),
# if we want to let individual instances override this
def copy(self, val, func): # from outside, when in vals, it might as well be atomic! WRONG, it might add self to todo list...
"""
Copy val, a (PyObject pointer to an) instance of our class
"""
return val
def scan_children( self, obj1, func, deferred_category_collectors = {}, exclude_layers = ()):
"""
[for #doc of deferred_category_collectors, see caller docstring]
"""
try:
# (we might as well test this on obj1 itself, since not a lot slower than memoizing the same test on its class)
method = obj1._s_scan_children # bug: not yet passing deferred_category_collectors ###@@@ [not needed for A7, I think]
except AttributeError:
for attr in self.S_CHILDREN_attrs:
if self.exclude(attr, exclude_layers):
continue
val = getattr(obj1, attr, None)
cat = self.categories.get(attr) #e need to optimize this (separate lists of attrs with each cat)?
# cat is usually None; following code works then too;
#e future optim note: often, cat is 'selection' but val contains no objects (for attr 'picked', val is boolean)
collector = deferred_category_collectors.get(cat) # either None, or a dict we should modify (perhaps empty now)
if collector is not None: # can't use boolean test, since if it's {} we want to use it
def func2(obj):
## print "collecting %r into %r while scanning %r" % (obj, cat, attr) # works [060227]
collector[id(obj)] = obj
scan_val(val, func2)
else:
scan_val(val, func) # don't optimize for val is None, since it's probably rare, and this is pretty quick anyway
#e we might optimize by inlining scan_val, though
else:
method(func)
return
def exclude(self, attr, exclude_layers):
"""
Should you scan attr (of an obj of this clas), given these exclude_layers (perhaps ())?
"""
return self.attrlayers.get(attr) in exclude_layers # correct even though .get is often None
pass # end of class InstanceClassification
# == helper code [##e all code in this module needs reordering]
_known_type_copiers = {}
# needs no entry for types whose instances can all be copied as themselves
_known_mutable_types = {} # used by is_mutable
_known_type_scanners = {}
# only needs entries for types whose instances might contain (or be)
# InstanceType or InstanceLike objects, and which might need to be entered
# for finding "children" (declared with S_CHILD) -- for now we assume that
# means there's no need to scan inside bound method objects, though this
# policy might change.
# not yet needed, but let the variable exist since there's one use of it I might as well leave active (since rarely run):
copiers_for_InstanceType_class_names = {} # copier functions for InstanceTypes whose classes have certain names
# (This is mainly for use when we can't add methods to the classes themselves.
# The copiers should verify the class is the expected one, and return the original object unchanged if not
# (perhaps with a warning), or raise an exception if they "own" the classname.)
#
# WARNING: some code is optimized to assume without checking that copiers_for_InstanceType_class_names is empty,
# so search for all uses of it (in commented out code) if you ever add something to it. [bruce 060419]
# scanners_for_class_names would work the same way, but we don't need it yet.
def copy_val(val):
"""
Efficiently copy a general Python value (so that mutable components are
not shared with the original), returning class instances unchanged unless
they define a _copyOfObject method, and returning unrecognized objects
(e.g. QWidgets, bound methods) unchanged.
(See a code comment for the reason we can't just use the standard Python
copy module for this.)
@note: this function is replaced by a C implementation when that is
available. See COPYVALS_SPEEDUP in the code.
"""
#bruce 060221 generalized semantics and rewrote for efficiency
try:
# wware 060308 small performance improvement (use try/except);
# made safer by bruce, same day.
# [REVIEW: is this in fact faster than using .get?]
# note: _known_type_copiers is a fixed public dictionary
copier = _known_type_copiers[type(val)]
except KeyError:
# we used to optimize by not storing any copier for atomic types...
# but now that we call _generalCopier [bruce 090206] that is no longer
# an optimization, but since the C code is
# used by all end-users and most developers, nevermind for now.
return _generalCopier(val)
else:
# assume copier is not None, since we know what's stored in _known_type_copiers
return copier(val)
pass
def is_mutable(val): #bruce 060302
"""
Efficiently scan a potential argument to copy_val to see if it contains
any mutable parts (including itself), with special cases suitable for use
on state-holding attribute values for Undo, which might be surprising
in other applications (notably, for most InstanceType/InstanceLike objects).
Details:
Treat list and dict as mutable, tuple (per se) as not (but scan its
components) -- all this is correct.
Treat Numeric.array as mutable, regardless of size or type
(dubious for size 0, though type and maybe shape could probably be changed,
but this doesn't matter for now).
Treat unknown types occuring in _known_mutable_types as mutable (ok for now,
though might need registration scheme in future; might cover some of the
above cases).
Treat InstanceLike instances as mutable if and only if they define an
_s_isPureData attribute. (The other ones, we're thinking of as immutable
references or object pointers, and we don't care whether the objects they
point to are mutable.)
"""
# REVIEW: should this be used more?
# As of 090206, it only occurs in this file, and its only actual use
# is for a debug-only warning about mutable default values of undoable
# attrs (which are allowed and do occur in our code). (Perhaps the
# original intent was to optimize for non-mutable ones, but this is
# not presently done.)
try:
_is_mutable_helper(val)
except _IsMutable:
return True
return False
def _is_mutable_helper(val, _tupletype = type(())):
"""
[private recursive helper for is_mutable]
raise _IsMutable if val (or part of it) is mutable
"""
#bruce 060303, revised 090206
typ = type(val)
if typ is _tupletype:
# (kluge, 090206: storing _tupletype as an optional arg's default value
# is just for faster comparison -- caller should never pass it)
# tuple is a special case, since it has components that might be
# mutable but is not itself mutable -- someday, make provisions for
# more special cases like this, which can register themselves
for thing in val:
_is_mutable_helper(thing)
pass
elif _known_mutable_types.get(typ): # a fixed public dictionary
raise _IsMutable
elif hasattr(val, '_s_isPureData'):
# (note: only possible when isinstance(val, InstanceLike),
# but always safe to check, so no need to check for InstanceLike)
raise _IsMutable
return # immutable or unrecognized types
def scan_val(val, func):
"""
Efficiently scan a general Python value, and call func on all InstanceType
objects (old-style class instances) and/or InstanceLike objects (instances
of subclasses of InstanceLike, whether old or new style) encountered.
No need to descend inside any values unless they might contain class instances.
Note that some classes define the _s_scan_children method, but we *don't*
descend into their instances here using that method -- this is only done
by other code, such as whatever code func might end up delivering such objects to.
Special case: we never descend into bound method objects either.
@return: an arbitrary value which caller should not use (always None in
the present implem)
"""
#doc -- the docstring needs to explain why we never descend into bound
# method objects. It used to say "see comment on _known_type_scanners for
# why", but I removed that since I can no longer find that comment.
# [bruce 090206]
typ = type(val)
scanner = _known_type_scanners.get(typ) # a fixed public dictionary
if scanner is not None:
# we used to optimize by not storing any scanner for atomic types,
# or a few others; as of 090206 this might be a slowdown ###OPTIM sometime
scanner(val, func)
elif isinstance( val, InstanceLike):
_scan_Instance(val, func) #bruce 090206, so no need to register these
return
_known_type_copiers[type([])] = copy_list
_known_type_copiers[type({})] = copy_dict
_known_type_copiers[type(())] = copy_tuple
_known_mutable_types[type([])] = True
_known_mutable_types[type({})] = True
# not tuple -- it's hardcoded in the code that uses this
_known_type_scanners[type([])] = scan_list
_known_type_scanners[type({})] = scan_dict
_known_type_scanners[type(())] = scan_tuple
# ==
def _debug_check_copyOfObject(obj, res):
"""
[private helper for _generalCopier]
"""
if (obj != res or (not (obj == res))):
#bruce 060311, revised 090206
# [warning -- copy_val recursion means enabling the call of this function
# is exponential time in depth of copied values, I think... not sure,
# maybe it cuts off at instances and is only relevant for depths of
# the pure python part, rarely more than 2. --bruce 090206]
# This has detected a bug in copy_method, which will cause false
# positives in change-detection in Undo (since we'll return res anyway).
# (It's still better to return res than obj, since returning obj could
# cause Undo to completely miss changes.)
#
# Note: we require obj == res, but not res == obj (e.g. in case a fancy
# number turns into a plain one). Hopefully the fancy object could
# define some sort of __req__ method, but we'll try to not force it to
# for now; this has implications for how our diff-making archiver should
# test for differences. ###@@@doit
msg = "bug: obj != res or (not (obj == res)), res is _copyOfObject of obj; " \
"obj is %r, res is %r, == is %r, != is %r: " % \
(obj, res, obj == res, obj != res)
if not env.debug():
print msg
else:
print_compact_stack( msg + ": ")
try:
method = obj._s_printed_diff
# experimental (#e rename):
# list of strings (or so) which explain why __eq_ returns
# false [060306, for bug 1616]
except AttributeError:
pass
else:
print " a reason they are not equal:\n", method(res)
#e also print history redmsg, once per class per session?
return res
# ==
_generalCopier_exceptions = {}
# set of types which _generalCopier should not complain about;
# extended at runtime
if 1:
# add exceptions for known types we should trivially copy
# whenever they lack a _copyOfObject method
class _NEW_STYLE_1(object):
pass
class _OLD_STYLE_1:
pass
for _x in [1, # int
None, # NoneType
True, # bool
"", # str
u"", # unicode
0.1, # float
# not InstanceType -- warn about missing _copyOfObject method
## _OLD_STYLE_1(), # instance == InstanceType
_OLD_STYLE_1, # classobj
_NEW_STYLE_1 # type
]:
_generalCopier_exceptions[type(_x)] = type(_x)
# ==
def _generalCopier(obj):
"""
@param obj: a class instance (old or new style), or anything else whose type
is not specifically known to copy_val, which occurs as or in the
value of some copyable attribute in a "model object" passed to
copy_val
@type obj: anything
@return: a copy of obj, for the purposes of copy_val. Note: this is *not*
the same as a copy of a model object for purposes of user-level
copy operations -- e.g., that usually makes a new model object,
whereas this just returns it unchanged (treating it as a reference
to the same mutable object as before) if it inherits StateMixin.
The difference comes from copying obj as it's used in some other
object's attr value (as we do here), vs copying "obj itself".
@note: This function's main point is to honor the _copyOfObject method on
obj (returning whatever it returns), rather than just returning obj
(as it does anyway if that method is not defined).
@note: this is called from both C and Python copy_val, for both old and new
style class instances (and any other unrecognized types)
@see: InstanceClassification (related; may share code, or maybe ought to)
"""
#bruce 090206 (modelled after older copy_InstanceType, and superceding it)
try:
copy_method = obj._copyOfObject
# note: not compatible with copy.deepcopy's __deepcopy__ method;
# see DataMixin and IdentityCopyMixin below.
except AttributeError:
# This will happen once for anything whose type is not known to copy_val
# and which doesn't inherit DataMixin or IdentityCopyMixin or StateMixin
# (or otherwise define _copyOfObject), unless we added it to
# _generalCopier_exceptions above.
if not _generalCopier_exceptions.get(type(obj)):
print "\n***** adding _generalCopier exception for %r " \
"(bad if not a built-in type -- classes used in copied model " \
"attributes should inherit something like DataMixin or " \
"StateMixin)" % type(obj)
_generalCopier_exceptions[type(obj)] = type(obj)
return obj
else:
res = copy_method()
#bruce 081229 no longer pass copy_val (no implem used it)
if debug_flags.atom_debug: ## TEST: this might slow down the C version
_debug_check_copyOfObject(obj, res)
return res
pass
# note: the following old code/comments came from copy_InstanceType when it
# was merged into _generalCopier; they may or may not be obsolete. [bruce 090206]
#e pass copy_val as an optional arg?
# the following code for QColor is not yet needed, since QColor instances
# are not of type InstanceType (but keep the code for when it is needed):
##copier = copiers_for_InstanceType_class_names.get(obj.__class__.__name__)
## # We do this by name so we don't need to import QColor (for example)
## # unless we encounter one. Similar code might be needed by anything
## # that looks for _copyOfObject (as a type test or to use it).
## # ###@@@ DOIT, then remove cmt
## #e There's no way around checking this every time, though we might
## # optimize by storing specific classes which copy as selves into some
## # dict; it's not too important since we'll optimize Atom and Bond
## # copying in other ways.
##if copier is not None:
## return copier(obj, copy_val) # e.g. for QColor
# ==
COPYVAL_SPEEDUP = False # changed below if possible [bruce 090305 revised]
if SAMEVALS_SPEEDUP:
try:
# Try to replace copy_val definition above with the extension's version.
# (This is done for same_vals in utilities/Comparison.py,
# and for copy_val here in state_utils.py.)
from samevals import copy_val as _copy_val, setInstanceCopier, setGeneralCopier, setArrayCopier
except:
print "using SAMEVALS_SPEEDUP but not COPYVAL_SPEEDUP; is samevals.{dll,so} out of date?"
pass
else:
copy_val = _copy_val
setInstanceCopier(_generalCopier) # [review: still needed?]
# note: this means _generalCopier is applied by the C version
# of copy_val to instances of InstanceType (or of any class in the
# list passed to setInstanceLikeClasses, but we no longer do that).
setGeneralCopier(_generalCopier)
# note: _generalCopier is applied to anything that lacks hardcoded copy
# code which isn't handled by setInstanceCopier, including
# miscellaneous extension types, and instances of any new-style classes
# not passed to setInstanceLikeClasses. In current code and in routine
# usage, it is probably never used, but if we introduce new-style model
# classes (or use the experimental atombase extension), it will be used
# to copy their instances. Soon I plan to make some model classes
# new-style. [new feature, bruce 090206]
setArrayCopier(lambda x: x.copy())
COPYVAL_SPEEDUP = True
print "COPYVAL_SPEEDUP is True"
pass
pass
# inlined:
## def is_mutable_Instance(obj):
## return hasattr(obj, '_s_isPureData')
_known_type_copiers[ InstanceType ] = _generalCopier
def _scan_Instance(obj, func):
"""
This is called by scan_vals to support old-style instances,
or new-style instances whose classes inherit InstanceLike
(or its subclasses such as StateMixin or DataMixin).
@param obj: the object (class instance) being scanned.
@param func: ###doc this
"""
func(obj)
#e future optim: could we change API so that apply could serve in place
# of _scan_Instance? Probably not, but nevermind, we'll just do all
# this in C at some point.
return None
_known_type_scanners[ InstanceType ] = _scan_Instance
# (storing this is mainly just an optimization, but not entirely,
# if there are any old-style classes that we should scan this way
# but which don't inherit InstanceLike; that is probably an error
# but not currently detected. [bruce 090206 comment])
# ==
def copy_Numeric_array(obj):
if obj.typecode() == PyObject:
if (env.debug() or DEBUG_PYREX_ATOMS):
print "atom_debug: ran copy_Numeric_array, PyObject case" # remove when works once ###@@@
return array( map( copy_val, obj) )
###e this is probably incorrect for multiple dimensions; doesn't matter for now.
# Note: We can't assume the typecode of the copied array should also be PyObject,
# since _copyOfObject methods could return anything, so let it be inferred.
# In future we might decide to let this typecode be declared somehow...
## if debug_dont_trust_Numeric_copy: # 060302
## res = array( map( copy_val, list(obj)) )
## if debug_print_every_array_passed_to_Numeric_copy and env.debug():
## print "copy_Numeric_array on %#x produced %#x (not using Numeric.copy); input data %s" % \
## (id(obj), id(res), obj)
## return res
return obj.copy() # use Numeric's copy method for Character and number arrays
###@@@ verify ok from doc of this method...
def scan_Numeric_array(obj, func):
if obj.typecode() == PyObject:
# note: this doesn't imply each element is an InstanceType instance,
# just an arbitrary Python value
if env.debug() or DEBUG_PYREX_ATOMS:
print "atom_debug: ran scan_Numeric_array, PyObject case" # remove when works once ###@@@
## map( func, obj)
for elt in obj:
scan_val(elt, func) #bruce 060315 bugfix
# is there a more efficient way?
###e this is probably correct but far too slow for multiple dimensions; doesn't matter for now.
return
try:
from Numeric import array, PyObject
except:
if env.debug() or DEBUG_PYREX_ATOMS:
print "fyi: can't import array, PyObject from Numeric, so not registering its copy & scan functions"
else:
# note: related code exists in utilities/Comparison.py.
numeric_array_type = type(array(range(2)))
# note: __name__ is 'array', but Numeric.array itself is a
# built-in function, not a type
assert numeric_array_type != InstanceType
_known_type_copiers[ numeric_array_type ] = copy_Numeric_array
_known_type_scanners[ numeric_array_type ] = scan_Numeric_array
_known_mutable_types[ numeric_array_type ] = True
_Numeric_array_type = numeric_array_type #bruce 060309 kluge, might be temporary
del numeric_array_type
# but leave array, PyObject as module globals for use by the
# functions above, for efficiency
pass
# ==
def copy_QColor(obj):
from PyQt4.Qt import QColor
assert obj.__class__ is QColor # might fail (in existing calls) if some other class has the same name
if (env.debug() or DEBUG_PYREX_ATOMS):
print "atom_debug: ran copy_QColor" # remove when works once; will equality work right? ###@@@
return QColor(obj)
try:
# this is the simplest way to handle QColor for now; if always importing qt from this module
# becomes a problem (e.g. if this module should work in environments where qt is not available),
# make other modules register QColor with us, or make sure it's ok if this import fails
# (it is in theory).
from PyQt4.Qt import QColor
except:
if (env.debug() or DEBUG_PYREX_ATOMS):
print "fyi: can't import QColor from qt, so not registering its copy function"
else:
QColor_type = type(QColor())
# note: this is the type of a QColor instance, not of the class!
# type(QColor) is <type 'sip.wrappertype'>, which we'll just treat as a constant,
# so we don't need to handle it specially.
if QColor_type != InstanceType:
## wrong: copiers_for_InstanceType_class_names['qt.QColor'] = copy_QColor
_known_type_copiers[ QColor_type ] = copy_QColor
_known_mutable_types[ QColor_type ] = True # not sure if needed, but might be, and safe
else:
print "Warning: QColor_type is %r, id %#x,\n and InstanceType is %r, id %#x," % \
( QColor_type, id(QColor_type), InstanceType, id(InstanceType) )
print " and they should be != but are not,"
print " so Undo is not yet able to copy QColors properly; this is not known to cause bugs"
print " but its full implications are not yet understood. So far this is only known to happen"
print " in some systems running Mandrake Linux 10.1. [message last updated 060421]"
# no scanner for QColor is needed, since it contains no InstanceType/InstanceLike
# objects. no same_helper is needed, since '!=' will work correctly
# (only possible since it contains no general Python objects).
del QColor, QColor_type
pass
# ==
##e Do we need a copier function for a Qt event? Probably not, since they're only safe
# to store after making copies (see comments around QMouseEvent in selectMode.py circa 060220),
# and (by convention) those copies are treated as immutable.
# The reason we can't use the standard Python copy module's deepcopy function:
# it doesn't give us enough control over what it does to instances of unrecognized classes.
# For our own classes, we could do anything, but for other classes, we need them to be copied
# as the identity (i.e. as unaggressively as possible), or perhaps signalled as errors or warnings,
# but copy.deepcopy would copy everything inside them, i.e. copy them as aggressively as possible,
# and there appears to be no way around this.
#
##>>> import copy
##>>> class c:pass
##...
##>>> c1 = c()
##>>> c2 = c()
##>>> print id(c1), id(c2)
##270288 269568
##>>> c3 = copy.deepcopy(c1)
##>>> print id(c3)
##269968
#
# And what about the copy_reg module?... it's just a way of extending the Pickle module
# (which we also can't use for this), so it's not relevant.
#
# Notes for the future: we might use copy.deepcopy in some circumstances where we had no fear of
# encountering objects we didn't define;
# or we might write our own C/Pyrex code to imitate copy_val and friends.
# ==
# state_utils-copy_val-outtake.py went here, defined:
# class copy_run, etc
# copy_val
# ==
# state_utils-scanner-outtake.py went here, defined
# class attrlayer_scanner
# class scanner
# ##class Classifier: #partly obs? superseded by known_types?? [guess, 060221]
# ##the_Classifier = Classifier()
# ==
class objkey_allocator:
"""
Use one of these to allocate small int keys (guaranteed nonzero) for objects you're willing to keep forever.
We provide public dict attrs with our tables, and useful methods for whether we saw an object yet, etc.
Note: a main motivation for having these keys at all is speed and space when using them as dict keys in large dicts,
compared to python id() values. Other motivations are their uniqueness, and possible use in out-of-session encodings,
or in other non-live encodings of object references.
"""
def __init__(self):
self.obj4key = {}
# maps key to object. this is intentionally not weak-valued. It's public.
self._key4obj = {} # maps id(obj) -> key; semiprivate
self._lastobjkey = 0
def clear(self):
self.obj4key.clear()
self._key4obj.clear()
#e but don't change self._lastobjkey
return
def destroy(self):
self.clear()
self.obj4key = self._key4obj = self._lastobjkey = 'destroyed'
return
def allocate_key(self, key = None): # maybe not yet directly called; untested
"""
Allocate the requested key (assertfail if it's not available), or a new one we make up, and store None for it.
"""
if key is not None:
# this only makes sense if we allocated it before and then abandoned it (leaving a hole), which is NIM anyway,
# or possibly if we use md5 or sha1 strings or the like for keys (though we'd probably first have to test for prior decl).
# if that starts happening, remove the assert 0.
assert 0, "this feature should never be used in current code (though it ought to work if it was used correctly)"
assert not self.obj4key.has_key(key)
else:
# note: this code also occurs directly in key4obj_maybe_new, for speed
self._lastobjkey += 1
key = self._lastobjkey
assert not self.obj4key.has_key(key) # different line number than identical assert above (intended)
self.obj4key[key] = None # placeholder; nothing is yet stored into self._key4obj, since we don't know obj!
assert key, "keys are required to be true, whether or not allocated by caller; this one is false: %r" % (key,)
return key
def key4obj(self, obj): # maybe not yet directly called; untested; inlined by some code
"""
What's the key for this object, if it has one? Return None if we didn't yet allocate one for it.
Ok to call on objects for which allocating a key would be illegal (in fact, on any Python values, I think #k).
"""
return self._key4obj.get(id(obj)) #e future optim: store in the obj, for some objs? not sure it's worth the trouble,
# except maybe in addition to this, for use in inlined code customized to the classes. here, we don't need to know.
# Note: We know we're not using a recycled id since we have a ref to obj! (No need to test it -- having it prevents
# that obj's id from being recycled. If it left and came back, this is not valid, but then neither would the comparison be!)
def key4obj_maybe_new(self, obj):
"""
What's the key for this object, which we may not have ever seen before (in which case, make one up)?
Only legal to call when you know it's ok for this obj to have a key (since this method doesn't check that).
Optimized for when key already exists.
"""
try:
return self._key4obj[id(obj)]
except KeyError:
# this is the usual way to assign new keys to newly seen objects (maybe the only way)
# note: this is an inlined portion of self.allocate_key()
self._lastobjkey += 1
key = self._lastobjkey
assert not self.obj4key.has_key(key)
self.obj4key[key] = obj
self._key4obj[id(obj)] = key
return key
pass
pass # end of class objkey_allocator
# ==
class StateSnapshot:
"""
A big pile of saved (copies of) attribute values -- for each known attr, a dict from objkey to value.
The code that stores data into one of these is the collect_state method in some other class.
The code that applies this data to live current objects is... presently in assy_become_scanned_state
but maybe should be a method in this class. ####@@@@
As of 060302 we *might* (#k) require that even default or _UNSET_ attrvalues be stored explicitly, since we suspect
not doing so can cause bugs, particularly in code to apply a state back into live objects. In future we might
like to optimize by not storing default values; this is hard to do correctly now since they are not always
the same for all objects in one attrdict.
"""
#e later we'll have one for whole state and one for differential state and decide if they're different classes, etc
def __init__(self, attrcodes = (), debugname = ""):
self.debugname = debugname
self.attrdicts = {} # maps attrcodes to their dicts; each dict maps objkeys to values; public attribute for efficiency(??)
for attrcode in attrcodes:
self.make_attrdict(attrcode)
return
#e methods to apply the data and to help grab the data? see also assy_become_scanned_state, SharedDiffopData (in undo_archive)
#e future: methods to read and write the data, to diff it, etc, and state-decls to let it be compared...
#e will __eq__ just be eq on our attrdicts? or should attrdict missing or {} be the same? guess: same.
def make_attrdict(self, attrcode):
"""
Make an attrdict for attrcode. Assume we don't already have one.
"""
assert self.attrdicts.get(attrcode) is None
self.attrdicts[attrcode] = {}
def size(self): ##e should this be a __len__ and/or a __nonzero__ method? ### shares code with DiffObj; use common superclass?
"""
return the total number of attribute values we're storing (over all objects and all attrnames)
"""
res = 0
for d in self.attrdicts.itervalues():
# <rant> Stupid Python didn't complain when I forgot to say .values(),
# but just told me how many letters were in all the attrnames put together! </rant>
# (Iteration over a dict being permitted is bad enough (is typing .items() so hard?!?),
# but its being over the keys rather than over the values is even worse. IMO.)
res += len(d)
return res
def __repr__(self): #060405 changed this from __str__ to __repr__
extra = self.debugname and " (%s)" % self.debugname or ""
return "<%s at %#x%s, %d attrdicts, %d total values>" % \
(self.__class__.__name__, id(self), extra, len(self.attrdicts), self.size())
## def print_value_stats(self): # debug function, not yet needed
## for d in self.attrdicts:
## # (attrname in more than one class is common, due to inheritance)
## pass # not yet needed now that I fixed the bug in self.size(), above
def __eq__(self, other):
"""
[this is used by undo_archive to see if the state has really changed]
"""
# this implem assumes modtimes/change_counters are not stored in the snapshots (or, not seen by diff_snapshots)!
return self.__class__ is other.__class__ and not diff_snapshots(self, other)
def __ne__(self, other):
return not (self == other)
def val_diff_func_for(self, attrcode):
attr, acode_unused = attrcode
if attr == '_posnxxx': # kluge, should use an _s_decl; remove xxx when this should be tried out ###@@@ [not used for A7]
return val_diff_func_for__posn # stub for testing
return None
def extract_layers(self, layernames): #060404 first implem
assert layernames == ('atoms',), "layernames can't be %r" % layernames #e generalize, someday
res = self.__class__()
for attrcode, attrdict_unused in self.attrdicts.items():
attr_unused, acode = attrcode
if _KLUGE_acode_is_special_for_extract_layers(acode):
res.attrdicts[attrcode] = self.attrdicts.pop(attrcode)
return res
def insert_layers(self, layerstuff): #060404 first implem
for attrcode in layerstuff.attrdicts.keys():
assert not self.attrdicts.has_key(attrcode), "attrcode %r overlaps, between %r and %r" % (attrcode, self, layerstuff)
self.attrdicts.update(layerstuff.attrdicts)
layerstuff.attrdicts.clear() # optional, maybe not even good...
return
pass # end of class StateSnapshot
def _KLUGE_acode_is_special_for_extract_layers(acode): #bruce 071114
#e rename when we see all the uses...
# guess: probably just means
# "acode is for a change-tracked attr, not a full-diff-scanned attr"
# (together with assuming that for any class, all or none of its attrs are change-tracked),
# (where by "change-tracked" we mean that changes are reported in registered changedicts --
# not directly related to the changed/usage tracked invalidatable lvalues in changes.py)
# but the code in this file also assumes those objects are all part of
# the so-called "atoms layer".
## was: return acode in ('Atom', 'Bond'): ###@@@ kluge 060404; assume acode is class name... only true for these two classes!!
return type(acode) == type("")
def val_diff_func_for__posn( (p1, p2), whatret): # purely a stub for testing, though it should work
assert type(p1) is _Numeric_array_type
assert type(p2) is _Numeric_array_type
return p2 - p1
def diff_snapshots(snap1, snap2, whatret = 0): #060227
"""
Diff two snapshots. Retval format [needs doc]. Missing attrdicts
are like empty ones. obj/attr sorting by varid to be added later.
"""
keydict = dict(snap1.attrdicts) # shallow copy, used only for its keys (presence of big values shouldn't slow this down)
keydict.update(snap2.attrdicts)
attrcodes = keydict.keys()
del keydict
attrcodes.sort() # just so we're deterministic
res = {}
for attrcode in attrcodes:
d1 = snap1.attrdicts.get(attrcode, {})
d2 = snap2.attrdicts.get(attrcode, {})
# now diff these dicts; set of keys might not be the same
dflt = _UNSET_
# This might be correct, assuming each attrdict has been optimized to not store true dflt val for attrdict,
# or each hasn't been (i.e. same policy for both). Also it's assumed by diff_snapshots_oneway and its caller.
# Needs review. ##k ###@@@ [060227-28 comment]
# 060309 experimental [not used as of 060409]: support special diff algs for certain attrs,
# like sets or lists, or a single attrval representing all bonds
val_diff_func = snap2.val_diff_func_for(attrcode) # might be None; assume snap2 knows as well as snap1 what to do
assert val_diff_func == snap1.val_diff_func_for(attrcode) # make sure snap1 and snap2 agree (kluge??)
#e better to use more global knowledge instead? we'll see how it's needed on the diff-applying side...
# (we ought to clean up the OO structure, when time permits)
diff = diffdicts(d1, d2, dflt = dflt, whatret = whatret, val_diff_func = val_diff_func)
if diff:
res[attrcode] = diff #k ok not to copy its mutable state? I think so...
return res # just a diff-attrdicts-dict, with no empty dict members (so boolean test works ok) -- not a Snapshot itself.
def diff_snapshots_oneway(snap1, snap2):
"""
diff them, but only return what's needed to turn snap1 into snap2,
as an object containing attrdicts (all mutable)
"""
return DiffObj( diff_snapshots(snap1, snap2, whatret = 2) )
######@@@@@@ also store val_diff_func per attrcode? [060309 comment]
class DiffObj:
attrname_valsizes = dict(_posn = 52)
# maps attrcode (just attrname for now) to our guess about the size
# of storing one attrval for that attrcode, in this kind of undo-diff;
# this figure 52 for _posn is purely a guess
# (the 3 doubles themselves are 24, but they're in a Numeric array
# and we also need to include the overhead of the entire dict item in our attrdict),
# and the guesses ought to come from the attr decls anyway, not be
# hardcoded here (or in future we could measure them in a C-coded copy_val).
def __init__(self, attrdicts = None):
self.attrdicts = attrdicts or {}
def size(self): ### shares code with StateSnapshot; use common superclass?
"""
return the total number of attribute value differences we're
storing (over all objects and all attrnames)
"""
res = 0
for d in self.attrdicts.itervalues():
res += len(d)
return res
def RAM_usage_guess(self): #060323
"""
return a rough guess of our RAM consumption
"""
res = 0
for attrcode, d in self.attrdicts.iteritems():
attr, acode_unused = attrcode
valsize = self.attrname_valsizes.get(attr, 24) # it's a kluge to use attr rather than attrcode here
# 24 is a guess, and is conservative: 2 pointers in dict item == 8, 2 small pyobjects (8 each??)
res += len(d) * valsize
return res
def __len__(self):
return self.size()
def nonempty(self):
return self.size() > 0
def __nonzero__(self):
return self.nonempty()
def accumulate_diffs(self, diffs): #060409
"""
Modify self to incorporate a copy of the given diffs (which should be another diffobj),
so that applying the new self is like applying the old self and then applying the given diffs.
Don't change, or be bothered by future changes to, the given diffs.
Return None.
"""
assert isinstance(diffs, DiffObj)
# What really matters is that its attrdicts use _UNSET_ for missing values,
# and that each attrdict is independent, unlike for StateSnapshot attrdicts
# where the meaning of a missing value depends on whether a value is present at that key in any attrdict.
# Maybe we should handle this instead by renaming 'attrdicts' in one of these objects,
# or using differently-named get methods for them. #e
dicts2 = diffs.attrdicts
dicts1 = self.attrdicts
for attrcode, d2 in dicts2.iteritems():
d1 = dicts1.setdefault(attrcode, {})
d1.update(d2) # even if d1 starts out {}, it's important to copy d2 here, not share it
return
pass
def diffdicts(d1, d2, dflt = None, whatret = 0, val_diff_func = None):
###e dflt is problematic since we don't know it here and it might vary by obj or class [not anymore, long bfr 060309; ##doc]
"""
Given two dicts, return a new one with entries at keys where their values differ (according to same_vals),
treating missing values as dflt (which is None if not provided, but many callers should pass _UNSET_).
Values in retval depend on whatret and val_diff_func, as follows:
If val_diff_func is None, values are pairs (v1, v2) where v1 = d1.get(key, dflt), and same for v2,
unless we pass whatret == 1 or 2, in which case values are just v1 or just v2 respectively.
WARNING: v1 and v2 and dflt in these pairs are not copied; retval might share mutable state with d1 and d2 values and dflt arg.
If val_diff_func is not None, it is called with arg1 == (v1, v2) and arg2 == whatret,
and what it returns is stored; whatever it returns needs (in the scheme this is intended for)
to be sufficient to reconstruct v2 from v1 (for whatret == 2),
or v1 from v2 (for whatret == 1), or both (for whatret == 0), assuming the reconstructor knows which val_diff_func was used.
"""
###E maybe this dflt feature is not needed, if we already didn't store vals equal to dflt? but how to say "unset" in retval?
# Do we need a new unique object not equal to anything else, just to use for Unset?
# [later, 060302:] looks like this dflt is per-attrdict and always _UNSET_, but needn't be the same as a per-object one
# that can safely be per-class. see comments elsewhere dated today.
res = {}
for key, v1 in d1.iteritems():
v2 = d2.get(key, dflt)
## if 0:
## if v1 == v2: # optim: == will be faster than != for some of our most common values
## pass
## else:
## res[key] = (v1, v2)
## elif 0:
## #####@@@@@ see if *this* fixes my bug... 060302 955a
## # [it did for whole Numeric arrays, but it won't work for Numeric arrays inside tuples or lists]
## if v1 != v2:
## res[key] = (v1, v2)
## else:
if not same_vals(v1, v2):
res[key] = (v1, v2)
for key, v2 in d2.iteritems():
#e (optim note: i don't know how to avoid scanning common keys twice, just to find d2-only keys;
# maybe copying d1 and updating with d2 and scanning that would be fastest way?
# Or I could copy d2 and pop it in the above loop... i'm guessing that'd be slower if this was C, not sure about Python.)
# if d1 has a value, we handled this key already, and this is usually true, so test that first.
if not d1.has_key(key):
v1 = dflt
## if v1 == v2: # same order and test as above... oops, forgot to include the above's bugfix here, until 060303 11pm!
## pass
## else:
## res[key] = (v1, v2)
if not same_vals(v1, v2):
res[key] = (v1, v2)
if val_diff_func is None:
if whatret: #e should optimize by using loop variants above, instead ###@@@
ind = whatret - 1
for key, pair in res.iteritems():
## res[key] = copy_val(pair[ind]) #KLUGE: copy_val at all (contrary to docstring) -- see if it fixes any bugs [it didn't];
res[key] = pair[ind] #060303 11pm remove copying, no reason for it
# and KLUGE2 - not doing this unless whatret. [060302] results: didn't fix last night's bug.
else:
for key, pair in res.items():
res[key] = val_diff_func( pair, whatret )
#e We could optim by requiring caller to pass a func that already knows whatret!
# or letting whatrets 0,1,2 be special cases of val_diff_func (same idea), which takes (v1, v2) to what we want.
# or just letting whatret be either 0,1,2, or a function.
# But these optims are not needed, since the idea is to use this on a small number of attrvals with lots of data in
# each one (not just on a small number of attr *names*, but a small number of *values*). E.g. one attr for the entire
# assy which knows all atom positions or bond types in the assy. It's just a way of letting some attrs have their
# own specialcase diff algs and representations.
# [later note: as of 060330 this val_diff_func is only supplied by val_diff_func_for, which never supplies one
# except for a stub attrname we don't use, and I don't know if we'll use this scheme for A7.]
return res
# ==
##def priorstate_debug(priorstate, what = ""): #e call it, and do the init things
## "call me only to print debug msgs"
## ###@@@ this is to help me zap the "initial_state not being a StatePlace" kluges;
## # i'm not sure where the bad ones are made, so find out by printing the stack when they were made.
## try:
## stack = priorstate._init_stack # compact_stack(), saved by __init__, for StatePlace and StateSnapshot
## except:
## print "bug: this has no _init_stack: %r" % (priorstate,)
## else:
## if not env.seen_before( ("ghjkfdhgjfdk" , stack) ):
## print "one place we can make a %s priorstate like %r is: %s" % (what, priorstate, stack)
## return
def diff_and_copy_state(archive, assy, priorstate): #060228 (#e maybe this is really an archive method? 060408 comment & revised docstring)
"""
Figure out how the current actual model state (of assy) differs from the last model state we archived (in archive/priorstate).
Return a new StatePlace (representing a logically immutable snapshot of the current model state)
which presently owns a complete copy of that state (a mutable StateSnapshot which always tracks our most recent snapshot
of the actual state), but is willing to give that up (and redefine itself (equivalently) as a diff from a changed version of that)
when this function is next called.
"""
# background: we keep a mutable snapshot of the last checkpointed state. right now it's inside priorstate (and defines
# that immutable-state-object's state), but we're going to grab it out of there and modify it to equal actual current state
# (as derived from assy using archive), and make a diffobject which records how we had to change it. Then we'll donate it
# to a new immutable-state-object we make and return (<new>). But we don't want to make priorstate unusable
# or violate its logical immutability, so we'll tell it to define itself (until further notice) based on <new> and <diffobj>.
assert isinstance( priorstate, StatePlace) # remove when works, eventually ###@@@
new = StatePlace() # will be given stewardship of our maintained copy of almost-current state, and returned
# diffobj is not yet needed now, just returned from diff_snapshots_oneway:
## diffobj = DiffObj() # will record diff from new back to priorstate (one-way diff is ok, if traversing it also reverses it)
steal_lastsnap_method = priorstate.steal_lastsnap
lastsnap = steal_lastsnap_method( ) # and we promise to replace it with (new, diffobj) later, so priorstate is again defined
assert isinstance(lastsnap, StateSnapshot) # remove when works, eventually ###@@@
# now we own lastsnap, and we'll modify it to agree with actual current state, and record the changes required to undo this...
# 060329: this (to end of function) is where we have to do things differently when we only want to scan changed objects.
# So we do the old full scan for most kinds of things, but not for the 'atoms layer' (atoms, bonds, Chunk.atoms attr).
import foundation.undo_archive as undo_archive #e later, we'll inline this until we reach a function in this file
cursnap = undo_archive.current_state(archive, assy, use_060213_format = True, exclude_layers = ('atoms',)) # cur state of child objs
lastsnap_diffscan_layers = lastsnap.extract_layers( ('atoms',) ) # prior state of atoms & bonds, leaving only childobjs in lastsnap
diffobj = diff_snapshots_oneway( cursnap, lastsnap ) # valid for everything except the 'atoms layer' (atoms & bonds)
## lastsnap.become_copy_of(cursnap) -- nevermind, just use cursnap
lastsnap = cursnap
del cursnap
modify_and_diff_snap_for_changed_objects( archive, lastsnap_diffscan_layers, ('atoms',), diffobj, lastsnap._childobj_dict ) #060404
lastsnap.insert_layers(lastsnap_diffscan_layers)
new.own_this_lastsnap(lastsnap)
priorstate.define_by_diff_from_stateplace(diffobj, new)
new.really_changed = not not diffobj.nonempty() # remains correct even when new's definitional content changes
return new
def modify_and_diff_snap_for_changed_objects( archive, lastsnap_diffscan_layers, layers, diffobj, childobj_dict ): #060404
#e rename lastsnap_diffscan_layers
"""
[this might become a method of the undo_archive; it will certainly be generalized, as its API suggests]
- Get sets of changed objects from (our subs to) global changedicts, and clear those.
- Use those to modify lastsnap_diffscan_layers to cover changes tracked
in the layers specified (for now only 'atoms' is supported),
- and record the diffs from that into diffobj.
"""
assert len(layers) == 1 and layers[0] == 'atoms' # this is all that's supported for now
# Get the sets of possibly changed objects... for now, this is hardcoded as 2 dicts, for atoms and bonds,
# keyed by atom.key and id(bond), and with changes to all attrs lumped together (though they're tracked separately);
# these dicts sometimes also include atoms & bonds belonging to other assys, which we should ignore.
# [#e Someday we'll need to generalize this --
# how will we know which (or how many) dicts to look in, for changed objs/attrs?
# don't the attr decls that set up layers also have to tell us that?
# and does that get recorded somehow in this lastsnap_diffscan_layers object,
# which knows the attrs it contains? yes, that would be good... for some pseudocode
# related to this, see commented-out method xxx, just below.]
chgd_atoms, chgd_bonds = archive.get_and_clear_changed_objs()
if (env.debug() or DEBUG_PYREX_ATOMS):
print "\nchanged objects: %d atoms, %d bonds" % (len(chgd_atoms), len(chgd_bonds))
# discard wrong assy atoms... can we tell by having an objkey? ... imitate collect_s_children and (mainly) collect_state
keyknower = archive.objkey_allocator
_key4obj = keyknower._key4obj
changed_live = {} # both atoms & bonds; we'll classify, so more general (for future subclasses); not sure this is worth it
changed_dead = {}
archive._childobj_dict = childobj_dict # temporarily stored, for use by _oursQ and _liveQ methods (several calls deep) [060408]
for akey_unused, obj in chgd_atoms.iteritems():
# akey is atom.key, obj is atom
key = _key4obj.get(id(obj)) # inlined keyknower.key4obj; key is objkey, not atom.key
if key is None:
# discard obj unless it's ours; these submethods are also allowed to say 'no' for dead objs, even if they're one of ours
if archive.new_Atom_oursQ(obj):
key = 1 # kluge, only truth value matters; or we could allocate the key and use that, doesn't matter for now
if key:
if archive.trackedobj_liveQ(obj):
changed_live[id(obj)] = obj
else:
changed_dead[id(obj)] = obj
for idobj, obj in chgd_bonds.iteritems():
key = _key4obj.get(idobj)
if key is None:
if archive.new_Bond_oursQ(obj):
key = 1
if key:
if archive.trackedobj_liveQ(obj):
changed_live[id(obj)] = obj
else:
changed_dead[id(obj)] = obj
archive._childobj_dict = None
## print "changed_live = %s, changed_dead = %s" % (changed_live,changed_dead)
key4obj = keyknower.key4obj_maybe_new
diff_attrdicts = diffobj.attrdicts
for attrcode in archive.obj_classifier.dict_of_all_state_attrcodes.iterkeys():
acode = attrcode[1]
## if acode in ('Atom', 'Bond'):
if _KLUGE_acode_is_special_for_extract_layers(acode):
diff_attrdicts.setdefault(attrcode, {}) # this makes some diffs too big, but speeds up our loops ###k is it ok??
# if this turns out to cause trouble, just remove these dicts at the end if they're still empty
state_attrdicts = lastsnap_diffscan_layers.attrdicts
objclsfr = archive.obj_classifier
ci = objclsfr.classify_instance
# warning: we now have 3 similar but not identical 'for attrcode' loop bodies,
# handling live/dflt, live/no-dflt, and dead.
for idobj, obj in changed_live.iteritems(): # similar to obj_classifier.collect_state
key = key4obj(obj)
clas = ci(obj) #e could heavily optimize this if we kept all leaf classes separate; probably should
for attrcode, dflt in clas.attrcode_dflt_pairs:
attr, acode_unused = attrcode
## state_attrdict = state_attrdicts[attrcode] #k if this fails, just use setdefault with {} [it did; makes sense]
## state_attrdict = state_attrdicts.setdefault(attrcode, {})
state_attrdict = state_attrdicts[attrcode] # this should always work now that _archive_meet_class calls classify_class
##e could optim by doing this before loop (or on archive init?), if we know all changetracked classes (Atom & Bond)
val = getattr(obj, attr, dflt)
if val is dflt:
val = _UNSET_ # like collect_state; note, this is *not* equivalent to passing _UNSET_ as 3rd arg to getattr!
oldval = state_attrdict.get(key, _UNSET_) # see diffdicts and its calls, and apply_and_reverse_diff
# if we knew they'd be different (or usually different) we'd optim by using .pop here...
# but since we're lumping together all attrs when collecting changed objs, these vals are usually the same
if not same_vals(oldval, val):
# a diff! put a copy of val in state, and oldval (no need to copy) in the diffobj
if val is _UNSET_: # maybe this is not possible, it'll be dflt instead... i'm not positive dflt can't be _UNSET_, tho...
state_attrdict.pop(key, None) # like del, but works even if no item there ###k i think this is correct
else:
state_attrdict[key] = copy_val(val)
diff_attrdicts[attrcode][key] = oldval
dflt = None
del dflt
for attrcode in clas.attrcodes_with_no_dflt:
attr, acode_unused = attrcode
state_attrdict = state_attrdicts[attrcode]
val = getattr(obj, attr, _Bugval)
oldval = state_attrdict.get(key, _UNSET_) # not sure if _UNSET_ can happen, in this no-dflt case
if not same_vals(oldval, val):
state_attrdict[key] = copy_val(val)
try:
diff_attrdict = diff_attrdicts[attrcode]
except:
print "it failed, keys are", diff_attrdicts.keys() ####@@@@ should not happen anymore
print " and state ones are", state_attrdicts.keys()
## sys.exit(1)
diff_attrdict = diff_attrdicts[attrcode] = {}
diff_attrdict[key] = oldval #k if this fails, just use setdefault with {}
attrcode = None
del attrcode
for idobj, obj in changed_dead.iteritems():
#e if we assumed these all have same clas, we could invert loop order and heavily optimize
key = key4obj(obj)
clas = ci(obj)
for attrcode in clas.dict_of_all_state_attrcodes.iterkeys():
# (this covers the attrcodes in both clas.attrcode_dflt_pairs and clas.attrcodes_with_no_dflt)
attr, acode_unused = attrcode
state_attrdict = state_attrdicts[attrcode]
val = _UNSET_ # convention for dead objects
oldval = state_attrdict.pop(key, _UNSET_) # use pop because we know val is _UNSET_
if oldval is not val:
diff_attrdicts[attrcode][key] = oldval
attrcode = None
del attrcode
if 1:
from foundation.undo_archive import _undo_debug_obj, _undo_debug_message
obj = _undo_debug_obj
if id(obj) in changed_dead or id(obj) in changed_live:
# this means obj is not None, so it's ok to take time and print things
key = _key4obj.get(id(obj))
if key is not None:
for attrcode, attrdict in diff_attrdicts.iteritems():
if attrdict.has_key(key):
_undo_debug_message( "diff for %r.%s gets %r" % (obj, attrcode[0], attrdict[key]) )
#e also for old and new state, i guess, and needed in apply_and_reverse_diff as well
pass
return # from modify_and_diff_snap_for_changed_objects
#e for the future, this pseudocode is related to how to generalize the use of chgd_atoms, chgd_bonds seen above.
##def xxx( archive, layers = ('atoms',) ): #bruce 060329; is this really an undo_archive method?
## # ok, we've had a dict subscribed for awhile (necessarily), just update it one last time, then we have the candidates, not all valid.
## # it's sub'd to several things... which somehow reg'd themselves in the 'atoms' layer...
## for layer in layers: # perhaps the order matters, who knows
## #e find some sort of object which knows about that layer; do we ask our obj_classifier about this? does it last per-session?
## # what it knows is the changedict_processors, and a dict we have subscribed to them...
## # hmm, who owns this dict? the archive? i suppose.
## layer_obj = archive.layer_obj(layer)##@@ IMPLEM
## layer_obj.update_your_changedicts()##@@ IMPLEM
## layer_obj.get_your_dicts() # ....
## # ...
## return
# ==
class StatePlace:
"""
basically an lval for a StateSnapshot or a (diffobj, StatePlace) pair;
represents a logically immutable snapshot in a mutable way
"""
# WARNING: as of 060309 callers have a kluge that will break if anything sets the attr 'attrdicts' in this object. ##@@ fix
# update 060407: I think this was mostly fixed earlier today, but not entirely -- the only vestige of the problem
# is some compatibility try/except code, looking for something like data.attrdicts, which should never be needed anymore i think,
# but it's still there and still means we'd better not have that attr here until it's removed.
def __init__(self, lastsnap = None):
self.lastsnap = lastsnap # should be None or a mutable StateSnapshot
self.diff_and_place = None
return
def own_this_lastsnap(self, lastsnap):
assert self.lastsnap is None
assert self.diff_and_place is None
assert lastsnap is not None
self.lastsnap = lastsnap
return
def define_by_diff_from_stateplace(self, diff, place):
assert self.lastsnap is None
assert self.diff_and_place is None
self.diff_and_place = (diff, place)
def steal_lastsnap(self):
assert self.lastsnap is not None
res = self.lastsnap
self.lastsnap = None
res._childobj_dict = None # because caller is about to modify lastsnap, but won't modify this dict to match [060408]
return res
#e methods for pulling the snap back to us, too... across the pointer self.diff_and_place, i guess
def get_snap_back_to_self(self, accum_diffobj = None):
"""
[recursive (so might not be ok in practice)]
[if accum_diffobj is passed, accumulate the diffs we traversed into it]
"""
# predicted bug if we try this on the initial state, so, need caller to use StatePlace there ####@@@@
# (this bug never materialized, but I don't know why not!!! [bruce 060309])
# sanity check re that:
if 0 and env.debug(): #060309 # DEBUG_PYREX_ATOMS?
print "debug: fyi: calling get_snap_back_to_self", self
# note: typically called twice per undoable command --
# is this due to (guess no) begin and end cp, or (guess yes) one recursive call??
if self.lastsnap is None:
diff, place = self.diff_and_place
self.diff_and_place = None
# now, we need to get snap into place (the recursive part), then steal it, apply & reverse diff, store stuff back
place.get_snap_back_to_self(accum_diffobj = accum_diffobj) # permits steal_lastsnap to work on it
lastsnap = place.steal_lastsnap()
if accum_diffobj is not None: #060407 late, first implem on 060409 but not yet routinely called, for optimizing mash_attrs
accum_diffobj.accumulate_diffs(diff)
apply_and_reverse_diff(diff, lastsnap) # note: modifies diff and lastsnap in place; no need for copy_val
place.define_by_diff_from_stateplace(diff, self) # place will now be healed as soon as we are
self.lastsnap = lastsnap # inlined self.own_this_lastsnap(lastsnap)
return
#e and for access to it, for storing it back into assy using archive
def get_attrdicts_for_immediate_use_only(self): # [renamed, 060309]
"""
WARNING: these are only for immediate use without modification!
They are shared with mutable dicts which we *will* modify the next time some other stateplace
has this method called on it, if not sooner!
"""
self.get_snap_back_to_self()
return self.lastsnap.attrdicts
def get_attrdicts_relative_to_lastsnap(self): #060407 late, done & always used as of 060409
"""
WARNING: the return value depends on which stateplace last had this method
(or get_attrdicts_for_immediate_use_only, i.e. any caller of get_snap_back_to_self) run on it!!
[I think that's all, but whether more needs to be said ought to be analyzed sometime. ##k]
"""
accum_diffobj = DiffObj()
self.get_snap_back_to_self(accum_diffobj = accum_diffobj)
return accum_diffobj.attrdicts #e might be better to get more methods into diffobj and then return diffobj here
def _relative_RAM(self, priorplace): #060323
"""
Return a guess about the RAM requirement of retaining the diff data to let this state
be converted (by Undo) into the state represented by priorplace, also a StatePlace (??).
Temporary kluge: this must only be called at certain times, soon after self finalized... not sure of details.
"""
return 18 # stub
pass # end of class StatePlace
def apply_and_reverse_diff(diff, snap):
"""
Given a DiffObj <diff> and a StateSnapshot <snap> (both mutable), modify <snap> by applying <diff> to it,
at the same time recording the values kicked out of <snap> into <diff>, thereby turning it into a reverse diff.
Missing values in <snap> are represented as _UNSET_ in <diff>, in both directions (found or set in <snap>).
Return None, to remind caller we modify our argument objects.
(Note: Calling this again on the reverse diff we returned and on the same now-modified snap should undo its effect entirely.)
"""
for attrcode, dict1 in diff.attrdicts.items():
dictsnap = snap.attrdicts.setdefault(attrcode, {})
if 1:
# if no special diff restoring func for this attrcode:
for key, val in dict1.iteritems():
# iteritems is ok, though we modify dict1, since we don't add or remove items (though we do in dictsnap)
oldval = dictsnap.get(key, _UNSET_)
if val is _UNSET_:
del dictsnap[key] # always works, or there was no difference in val at this key!
# note: if dictsnap becomes empty, nevermind, it's ok to just leave it that way.
else:
dictsnap[key] = val
dict1[key] = oldval
# whether or not oldval is _UNSET_, it indicates a diff, so we have to retain the item
# in dict1 or we'd think it was a non-diff at that key!
else:
pass # use some specialized diff restoring func for this attrcode...
# this was WHERE I AM 060309 3:39pm.
# [note, 060407: the feature of attrcodes with specialized diff-finding or restoring functions
# might still be useful someday, but turned out to be not needed for A7 and is unfinished
# and not actively being developed. There might be partial support for it on the scanning side.]
# problem: this produces a val, ready to setattr into an object,
# but what we need is to be able to do that setattr or actually modify_attr ourselves. hmm.
# should we store smth that can be used to do it? not so simple i think... or at least less general
# than it would appear... let's see how this is called. similar issues exist on the scanning side
# (swept under the rug so far, due to what's still nim).
#
# maybe think from other end -- what _s_attr decls
# do we want, for being able to set this up the way we'd like?
# Maybe per-object diff & copy func, for obj owning attrcode's attr, and per-object apply_and_reverse func?
# Related (but purely an A8 issue): for binary mmp support, we still need to be able to save the snaps!
#
# ... this is called by get_snap_back_to_self, in get_attrdicts_for_immediate_use_only,
# in assy_become_scanned_state, in assy_become_state (undo_archive func), from assy.become_state,
# in SimpleDiff.apply_to. In theory, get_snap_back_to_self can pull it over multiple diffs,
# though in practice, it won't until we merge diffs. So would it work if the snap, for these attrs,
# was stored in the current state of the objects??
# ... Hmm, maybe it could work by creating snap-exprs consisting of "cur state plus these diffs"
# which then get stored by applying the diffs, maybe compressing them together as the expr is built?
#
return
# ==
# Terminology/spelling note: in comments, we use "class" for python classes, "clas" for Classification objects.
# In code, we can't use "class" as a variable name (since it's a Python keyword),
# so we might use "clas" (but that's deprecated since we use it for Classification objects too),
# or "class1", or something else.
class obj_classifier:
"""
Classify objects seen, and save the results, and provide basic uses of the results for scanning.
Probably can't yet handle "new-style" classes [this is being worked on as of 090206, see InstanceLike in the code].
Doesn't handle extension types (presuming they're not InstanceTypes) [not sure].
Note: the amount of data this stores is proportional to the number of classes and state-holding attribute declarations;
it doesn't (and shouldn't) store any per-object info. I.e. it soon reaches a small fixed size, regardless of number of objects
it's used to classify.
"""
def __init__(self):
self._clas_for_class = {}
# maps Python classes (values of obj.__class__ for obj an
# InstanceType/InstanceLike, for now) to Classifications
self.dict_of_all_state_attrcodes = {} # maps attrcodes to arbitrary values, for all state-holding attrs ever declared to us
self.dict_of_all_Atom_chunk_attrcodes = {} # same, only for attrcodes for .molecule attribute of UNDO_SPECIALCASE_ATOM classes
self.attrcodes_with_undo_setattr = {} # see doc in clas
return
def classify_instance(self, obj):
"""
Obj is known to be InstanceType or InstanceLike.
Classify it (memoizing classifications per class when possible).
It might be a StateHolder, Data object, or neither.
"""
if DEBUG_PYREX_ATOMS:
if not (type(obj) is InstanceType or isinstance(obj, InstanceLike)):
print "bug: type(%r) is not InstanceType or InstanceLike" % (obj,)
### too verbose if fails!! btw why is it a bug?
# [bruce 080221 re DEBUG_PYREX_ATOMS; comment might be obs
# since it's from before InstanceLike existed]
class1 = obj.__class__
try:
# easy & usual case: we recognize that __class__ -- just return the memoized answer.
# (Only correct if that class doesn't declare to us that we need to redo this per-object, but such a decl is nim.)
# (This is redundant with the same optimization in classify_class, which needs it for direct calls, but that doesn't matter.)
# (This is probably fast enough that we don't need to bother storing a map from id(obj) or objkey directly to clas,
# which might slow us down anyway by using more memory.)
# (#e future optim, though: perhaps store clas inside the object, and also objkey, as special attrs)
return self._clas_for_class[class1]
except KeyError:
return self.classify_class(class1)
pass
def classify_class(self, class1):
"""
Find or make (and return) an InstanceClassification for this class;
if you make it, memoize it and record info about its attr decls.
"""
try:
return self._clas_for_class[class1] # redundant when called from classify_instance, but needed for direct calls
except KeyError:
class_alias = getattr(class1, '_s_undo_class_alias', None)
if class_alias and class_alias is not class1:
clas = self._clas_for_class[class1] = self.classify_class(class_alias)
return clas
clas = self._clas_for_class[class1] = InstanceClassification(class1)
self.dict_of_all_state_attrcodes.update( clas.dict_of_all_state_attrcodes )
self.dict_of_all_Atom_chunk_attrcodes.update( clas.dict_of_all_Atom_chunk_attrcodes )
self.attrcodes_with_undo_setattr.update( clas.attrcodes_with_undo_setattr )
#bruce 060330 not sure if the following can be fully zapped, though most of it can. Not sure how "cats" are used yet...
# wondering if acode should be classname. ###@@@
# ... ok, most of it can be zapped; here's enough to say what it was about:
## # Store per-attrdict metainfo, which in principle could vary per-class but should be constant for one attrdict.
## ....
## self.kluge_attr2metainfo[attr] = attr_metainfo
## self.kluge_attr2metainfo_from_class[attr] = clas # only for debugging
if DEBUG_PYREX_ATOMS:
if not env.seen_before("DEBUG_PYREX_ATOMS"):
from utilities.GlobalPreferences import usePyrexAtomsAndBonds
on = usePyrexAtomsAndBonds()
print "\nDEBUG_PYREX_ATOMS: Pyrex atoms is", on and "ON" or "OFF"
print
print "DEBUG_PYREX_ATOMS: classify_class made InstanceClassification for %s" % (clas.class1.__name__,)
return clas
pass
def collect_s_children(self, val, deferred_category_collectors = {}, exclude_layers = ()): #060329/060404 added exclude_layers
"""
Collect all objects in val, and their s_children, defined as state-holding objects
found (recursively, on these same objects) in their attributes which were
declared S_CHILD or S_CHILDREN or S_CHILDREN_NOT_DATA using the state attribute decl system... [#doc that more precisely]
return them as the values of a dictionary whose keys are their python id()s.
Note: this scans through "data objects" (defined as those which define an '_s_isPureData' attribute on their class)
only once, but doesn't include them in the return value. This is necessary (I think) because
copy_val copies such objects. (Whether it's optimal is not yet clear.)
If deferred_category_collectors is provided, it should be a dict from attr-category names
(e.g. 'selection', 'view') to usually-empty dicts, into which we'll store id/obj items
which we reach through categorized attrs whose category names it lists, rather than scanning them
recursively as usual. (Note that we still scan the attr values, just not the objects found only inside them.)
If we reach one object along multiple attr-paths with different categories,
we decide what to do independently each time (thus perhaps recursivly scanning the same object
we store in a dict in deferred_category_collectors, or storing it in more than one of those dicts).
Caller should ignore such extra object listings as it sees fit.
"""
#e optimize for hitting some children lots of times, by first storing on id(obj), only later looking up key (if ever).
saw = {}
def func(obj):
saw[id(obj)] = obj
scan_val(val, func)
# now we have some objects to classify and delve into.
# for each one, we call this (more or less) on val of each child attribute.
# but we need to do this in waves so we know when we're done. and not do any obj twice.
# (should we detect cycles of children, which is presumably an error? not trivial to detect, so no for now.)
# ... this is just transitive closure in two steps, obj to some vals, and those vals scanned (all together).
# So write the obj to "add more objs to a dict" func. then pass it to a transclose utility, which takes care
# of knowing which objs are seen for first time.
data_objs = {}
# optimized attr accesses: [060315]
env_debug = env.debug()
classify_instance = self.classify_instance
def obj_and_dict(obj1, dict1): #e rename
"""
pass me to transclose; I'll store objs into dict1 when I reach them from a child attribute of obj; all objs are
assumed to be instances of the kind acceptable to classify_instance.
"""
# note, obj1 might be (what we consider) either a StateHolder or a Data object (or neither).
# Its clas will know what to do.
if 1: #bruce 090206 revised ## env_debug or DEBUG_PYREX_ATOMS:
#bruce 060314: realized there was a bug in scan_val -- it stops at all elements of lists, tuples, and dicts,
# rather than recursing as intended and only stopping at InstanceType/InstanceLike objects.
# (copy_val and same_vals (py implems anyway) don't have this bug.)
# Does this happen in practice in Undo, or do we so far only store child objs 1 level deep in lists or dicts?
# (Apparently it never happens, since following debug code doesn't print anything.)
# What would the following code do if it happened?
# Would it be most efficient/flexible/useful to decide this is a good feature of scan_val,
# and make this code tolerate it?
#bruce 060315: decided to fix scan_val.
##k Once this is tested, should this check depend on atom_debug?
# Maybe in classify_instance? (Maybe already there?) ###@@@
if not isinstance(obj1, InstanceLike): #bruce 080325, 090206 revised
print "debug: bug: scan_children hit obj at %#x of type %r" % (id(obj1), type(obj1))
clas = classify_instance(obj1)
if clas.obj_is_data(obj1):
data_objs[id(obj1)] = obj1
def func(obj):
dict1[id(obj)] = obj
clas.scan_children( obj1, func, #k is scan_children correct for obj1 being data?
deferred_category_collectors = deferred_category_collectors,
exclude_layers = exclude_layers )
allobjs = transclose( saw, obj_and_dict) #e rename both args
if DEBUG_PYREX_ATOMS: ## 0 and env.debug():
print "atom_debug: collect_s_children had %d roots, from which it reached %d objs, of which %d were data" % \
(len(saw), len(allobjs), len(data_objs))
# allobjs includes both state-holding and data-holding objects. Remove the latter.
for key in data_objs.iterkeys():
del allobjs[key]
return allobjs # from collect_s_children
def collect_state(self, objdict, keyknower, exclude_layers = ()): #060329/060404 added exclude_layers
"""
Given a dict from id(obj) to obj, which is already transclosed to include all objects of interest,
ensure all these objs have objkeys (allocating them from keyknower (an objkey_allocator instance) as needed),
and grab the values of all their state-holding attrs,
and return this in the form of a StateSnapshot object.
#e In future we'll provide a differential version too.
"""
key4obj = keyknower.key4obj_maybe_new # or our arg could just be this method
attrcodes = self.dict_of_all_state_attrcodes.keys()
if exclude_layers:
assert exclude_layers == ('atoms',) # the only one we support right here
attrcodes = filter( lambda (attr, acode):
## acode not in ('Atom', 'Bond'),
not _KLUGE_acode_is_special_for_extract_layers(acode),
attrcodes )
# this is required, otherwise insert_layers (into this) will complain about these layers already being there
snapshot = StateSnapshot(attrcodes)
# make a place to keep all the values we're about to grab
attrdicts = snapshot.attrdicts
len1 = len(objdict)
if DEBUG_PYREX_ATOMS:
print "\nDEBUG_PYREX_ATOMS: collect_state len(objdict) = %d" % len1
for obj in objdict.itervalues():
key = key4obj(obj)
clas = self.classify_instance(obj)
if DEBUG_PYREX_ATOMS: ## if 0 and 'ENABLE SLOW TEST CODE': # @@@@@@@ 080221
if exclude_layers:
assert exclude_layers == ('atoms',) # the only one we support right here
## print "remove when works, once this code is debugged -- too slow!" ### bruce 071114
## if not ( clas.class1.__name__ not in ('Atom', 'Bond') ):
if getattr(obj, '_s_undo_specialcase', None) in (UNDO_SPECIALCASE_ATOM,
UNDO_SPECIALCASE_BOND):
print "bug: exclude_layers didn't stop us from seeing", obj
# hmm, use attrs in clas or use __dict__? Either one might be way too big... start with smaller one? nah. guess.
# also we might as well use getattr and be more flexible (not depending on __dict__ to exist). Ok, use getattr.
# Do we optim dflt values of attrs? We ought to... even when we're differential, we're not *always* differential.
###e need to teach clas to know those, then.
for attrcode, dflt in clas.attrcode_dflt_pairs: # for attrs holding state (S_DATA, S_CHILD*, S_PARENT*, S_REF*) with dflts
attr, acode_unused = attrcode
if clas.exclude(attr, exclude_layers):
if env.debug() or DEBUG_PYREX_ATOMS:###@@@ rm when works
print "debug: collect_state exclude_layers1 excludes", attr, "of", obj
continue
val = getattr(obj, attr, dflt)
# note: this dflt can depend on key -- no need for it to be the same within one attrdict,
# provided we have no objects whose attrs all have default values and all equal them at once [060302]
if val is not dflt: # it's important in general to use 'is' rather than '==' (I think), e.g. for different copies of {}
# We might need to store a copy of val, or we might not if val == dflt and it's not mutable.
# There's no efficient perfect test for this, and it's not worth the runtime to even guess it,
# since for typical cases where val needn't be stored, val is dflt since instance didn't copy it.
# (Not true if Undo stored the val back into the object, but it won't if it doesn't copy it out!)
attrdicts[attrcode][key] = copy_val(val)
for attrcode in clas.attrcodes_with_no_dflt:
# (This kind of attr might be useful when you want full dicts for turning into Numeric arrays later. Not sure.)
# Does that mean the attr must always exist on obj? Or that we should "store its nonexistence"?
# For simplicity, I hope latter case can always be thought of as the attr having a default.
# I might need a third category of attrs to pull out of __dict__.get so we don't run __getattr__ for them... ##e
#val = getattr(obj, attr)
#valcopy = copy_val(val)
#attrdict = attrdicts[attr]
#attrdict[key] = valcopy
attr, acode_unused = attrcode
if clas.exclude(attr, exclude_layers):
if env.debug() or DEBUG_PYREX_ATOMS:###@@@ rm when works
print "debug: collect_state exclude_layers2 excludes", attr, "of", obj
continue
attrdicts[attrcode][key] = copy_val(getattr(obj, attr, _Bugval))
# We do it all in one statement, for efficiency in case compiler is not smart enough to see that local vars
# would not be used again; it might even save time due to lack of bytecodes to update linenumber
# to report in exceptions! (Though that will make bugs harder to track down, if exceptions occur.)
#
#bruce 060311 adding default of Bugval to protect us from bugs (nonexistence of attr) without entirely hiding
# them. In theory, if this ever happens in correct code, then this attrval (or whether it's set) shouldn't matter.
# I want to use something recognizable, but not _UNSET_ since that would (I think) conflict with its meaning
# in diffs (not sure). If it turns out this is not always a bug, I can make this act similarly to _UNSET_
# in that, when storing it back, I can unset the attr (this would make Undo least likely to introduce a bug).
# I'm not yet doing that, but I added a comment mentioning _Bugval next to the relevant setattr in undo_archive.py.
len2 = len(objdict)
if len1 != len2:
# this should be impossible
print "bug: collect_state in %r sees objdict (id %#x) modified during itervalues (len %d -> %d)" % \
(self, id(objdict), len1, len2)
if 0 and env.debug():
print "atom_debug: collect_state got this snapshot:", snapshot
## if 1: #####@@@@@
## snapshot.print_value_stats() # NIM
return snapshot # from collect_state
def reset_obj_attrs_to_defaults(self, obj):
"""
Given an obj we have saved state for, reset each attr we might save
to its default value (which might be "missing"??), if it has one.
[#e someday we might also reset S_CACHE attrs, but not for now.]
"""
from foundation.undo_archive import _undo_debug_obj, _undo_debug_message
clas = self.classify_instance(obj)
for (attr, acode_unused), dflt in clas.attrcode_dflt_pairs:
if obj is _undo_debug_obj:
_undo_debug_message("undo/redo: reset dflt %r.%s = %r" % (obj, attr, dflt))
setattr(obj, attr, dflt) #e need copy_val? I suspect not, so as of 060311 1030pm PST I'll remove it as an optim.
# [060302: i think copy_val is not needed given that we only refrain from storing val when it 'is' dflt,
# but i think it's ok unless some classes depend on unset attrs being a mutable shared class attr,
# which I think is bad enough style that we can safely say we don't support it (though detecting the danger
# would be nice someday #e).]
#e save this for when i have time to analyze whether it's safe:
## delattr(obj, attr) # save RAM -- ok (I think) since the only way we get dflts is when this would work... not sure
# not needed: for attr in clas.attrcodes_with_no_dflt: ...
return
pass # end of class obj_classifier
# ==
class InstanceLike(object):
"""
Common superclass for classes whose instances should be considered
"instancelike" by same_vals, copy_val, scan_vals, is_mutable, and Undo
(meaning they should participate in various APIs where they can define
special methods/attrs to get special behavior).
(Where old code checked type(obj) == InstanceType, new code can check
isinstance(obj, InstanceLike), so it works for new-style classes.)
"""
#bruce 090206; adding 'object' makes Atom, Bond, and all Nodes
# new-style classes, which seems to work, so it's being retained;
# some newer code depends on this now.
pass
class IdentityCopyMixin(InstanceLike): # by EricM
def _copyOfObject(self):
"""
Implements the copying of an object for copy_val. For objects
which care about their identity (which inherit from
IdentityCopyMixin), this will return a new reference to the
same object. There is no need to override this method.
Compare this with the behavior of DataMixin._copyOfObject().
"""
return self
def _isIdentityCopyMixin(self):
"""
This method acts as a flag allowing us to tell the difference
between things which inherit from DataMixin and those which
inherit from IdentityCopyMixin. Any given class should only
inherit from one of those two mixin interfaces (or from StateMixin,
which inherits from IdentityCopyMixin). So, both
_isIdentityCopyMixin and _s_isPureData should not be defined
on the same object. This can be used to check coverage of
types in _generalCopier().
"""
pass
pass
# Review: The only known classes which need IdentityCopyMixin but not StateMixin
# are Elem, AtomType, and Movie (as of 080321). In the case of Elem and AtomType,
# this is because they are immutable (EricM suggested an Immutable class
# to document that). In the case of Movie, it's not immutable, but it's free of
# any state known to Undo. If more examples of this arise, it will make sense
# to classify them and figure out if they should inherit from any new declarative
# classes. [bruce 080321 comment]
class StateMixin( _eq_id_mixin_, IdentityCopyMixin ):
"""
Convenience mixin for classes that contain state-attribute (_s_attr)
declarations, to help them follow the rules for __eq__,
to avoid debug warnings when they contain no attr decls yet,
and perhaps to provide convenience methods (none are yet defined).
Only useful for classes which contain undoable state, as of 071009.
"""
# try not having this:
## _s_attr__StateMixin__fake = S_IGNORE
# decl for fake attr __fake (name-mangled to _StateMixin__fake
# to be private to this mixin class),
# to avoid warnings about classes with no declared state attrs
# without requiring them to be registered (which might be nim)
# (which is ok, since if you added this mixin to them, you must
# have thought about whether they needed such decls)
def _undo_update(self):
"""
#doc [see docstring in chunk]
"""
return
pass
class DataMixin(InstanceLike):
"""
Convenience mixin for classes that act as 'data' when present in
values of declared state-holding attributes. Provides method stubs
to remind you when you haven't defined a necessary method. Makes
sure state system treats this object as data (and doesn't warn
about it). All such data-like classes which may be handled by
copy_val must inherit DataMixin.
"""
_s_isPureData = None # value is arbitrary; only presence of attr matters
# TODO: rename _s_isPureData -- current name is misleading (used to
# indicate mutability, but not all data is mutable; maybe this means
# we need to let it be overridden or introduce a third subclass?
# [bruce 090206 comment])
# note: presence of this attribute makes sure this object is treated as data.
# (this is a kluge, and an isinstance test might make more sense,
# but at the moment that might be an import cycle issue.)
# [by EricM, revised by Bruce 090206]
def _copyOfObject(self):
"""
This method must be defined in subclasses to implement
the copying of an object for copy_val. For data
objects (which inherit from DataMixin, or define
_s_isPureData), this should return a newly allocated object
which will be __eq__ to the original, but which will have a
different id(). Implementation of this method must be
compatible with the implementation of __eq__ for this class.
This method has a name which appears private, solely for performance
reasons. In particular, InvalMixin.__getattr__() has a fast
return for attributes which start with an underscore. Many
objects (like atoms) inherit from InvalMixin, and looking up
non-existent attributes on them takes significantly longer if
the attribute name does not start with underscore. In
general, such objects should inherit from IdentityCopyMixin as
well, and thus have _copyOfObject defined in order to avoid
exception processing overhead in _generalCopier(), so it
doesn't really matter. Should something slip through the
cracks, at least we're only imposing one slowdown on the copy,
and not two.
"""
print "_copyOfObject needs to be overridden in", self
print " (implem must be compatible with __eq__)"
return self
def __eq__(self, other):
print "__eq__ needs to be overridden in", self
print " (implem must be compatible with _copyOfObject; " \
"don't forget to avoid '==' when comparing Numeric arrays)"
return self is other
def __ne__(self, other):
return not (self == other)
# this uses the __eq__ above, or one which the specific class defined
pass
# ===
# test code
def _test():
print "testing some simple cases of copy_val"
from Numeric import array
map( _test1, [2,
3,
"string",
[4,5],
(6.0,),
{7:8,9:10},
array([2,3]),
None] )
print "done"
def _test1(obj): #e perhaps only works for non-pyobj types for now
if obj != copy_val(obj):
print "failed for %r" % (obj,)
if __name__ == '__main__':
_test()
# end
| NanoCAD-master | cad/src/foundation/state_utils.py |
# Copyright 2007-2008 Nanorex, Inc. See LICENSE file for details.
"""
FeatureDescriptor.py - descriptor objects for program features
@author: Bruce
@version: $Id$
@copyright: 2007-2008 Nanorex, Inc. See LICENSE file for details.
"""
# == constants
_KLUGE_PERMIT_INHERITED_CLASSNAMES = (
"Command",
"basicMode",
"Select_basicCommand", # abstract, but has to be in this list
# since it doesn't have its own featurename
"Select_Command",
"selectMode",
"TemporaryCommand_preMixin", # (should this count as abstract?)
"_minimalCommand", # (ditto)
## "_Example_TemporaryCommand_useParentPM", # abstract, but doesn't have its own featurename
)
# == global state
_descriptor_for_feature_class = {}
# maps feature class to FeatureDescriptor
_descriptor_for_featurename = {}
# maps featurename (canonicalized) to FeatureDescriptor;
# (used to warn about duplicate featurenames,
# other than by subclasses not overriding one from a superclass)
_feature_classes = {}
# maps feature class (abstract class) to descriptor constructor for it;
# only contains the classes directly passed to register_abstract_feature_class
# (with descriptor_constructors), not their subclasses.
_feature_class_tuple = () # tuple of feature classes, suitable for passing to issubclass;
# recreated automatically after each call of register_abstract_feature_class
_feature_kind_subclasses = [] # list of subclasses corresponding to specific
# kinds of features to report in special ways
# (todo: permit passing different more specific descriptor_constructors
# for those -- that doesn't work now, since _feature_class_tuple doesn't
# preserve registration order)
# TODO: use this in describing kind of feature, in export command table
# TODO: register EditCommand
# [bruce 080905]
# ==
def short_class_name( clas):
# todo: refile into utilities.constants; has lots of inlined uses
# todo: generalize to non-class things
# note: used for name-mangling too, so don't change what it returns
return clas.__name__.split('.')[-1]
def canonicalize_featurename( featurename, warn = False):
# refile? does part of this function already exist elsewhere?
featurename0 = featurename
featurename = featurename.strip()
# turn underscores to blanks [bruce 080717, to work around
# erroneous underscores used in some featurename constants;
# bad effect: it also removes at least one correct one,
# in featurename = "Test Command: PM_Widgets Demo";
# nonetheless this is necessary to make sure wiki help URLs
# don't coincide (since those contain '_' in place of ' ')]
featurename = featurename.replace('_', ' ')
if warn and featurename != featurename0:
msg = "developer warning: featurename %r was canonicalized to %r" % \
( featurename0, featurename )
print msg
return featurename
# ==
def register_abstract_feature_class( feature_class, descriptor_constructor = None ):
global _feature_class_tuple
if descriptor_constructor is None:
# assert that we're a subclass of an existing feature class
assert issubclass( feature_class, _feature_class_tuple )
# 080905: record this class (assume that more general classes get
# recorded first, since this function is called immediately
# after they're defined -- first verify that assumption)
for fc in _feature_kind_subclasses:
assert not issubclass(fc, feature_class), \
"subclass %r must be registered after superclass %r" % \
( fc, feature_class)
_feature_kind_subclasses.append( feature_class)
else:
_feature_classes[ feature_class ] = descriptor_constructor
_feature_class_tuple = tuple( _feature_classes.keys() )
return
# ==
def find_or_make_descriptor_for_possible_feature_object( thing):
"""
@param thing: anything which might be found as a global value in some module
@return: descriptor (found or made) for program feature represented
by thing, or None if thing doesn't represent a program feature.
@rtype: FeatureDescriptor or None
"""
# so far, all features are represented by subclasses of
# registered abstract feature classes.
# (someday there might be other kinds of features,
# e.g. plugins discovered at runtime and represented
# by instance objects or separately created descriptors.)
try:
foundone = issubclass( thing, _feature_class_tuple )
except:
# not a class
return None
if not foundone:
# not a subclass of a registered feature class
return None
# thing is a subclass of some class in _feature_class_tuple
return find_or_make_FeatureDescriptor( thing)
# ==
def find_or_make_FeatureDescriptor( thing):
"""
@param thing: the program object or class corresponding internally to a
specific program feature, or any object of the "same kind"
(presently, any subclass of an element of _feature_class_tuple)
@return: FeatureDescriptor for thing (found or made),
or None if thing does not describe a program feature.
@rtype: FeatureDescriptor or None
@note: fast, if descriptor is already known for thing
@see: find_or_make_descriptor_for_possible_feature_object, for when
you want to call something like this on an arbitrary Python object.
"""
try:
# note: thing is hashable, since it's a class
return _descriptor_for_feature_class[thing]
except KeyError:
pass
res = _determine_FeatureDescriptor( thing) # might be None
# However we got the description (even if we're reusing one),
# cache it, to optimize future calls and prevent redundant warnings.
_descriptor_for_feature_class[thing] = res
return res
def _determine_FeatureDescriptor( thing):
"""
Determine (find or make) and return the FeatureDescriptor
to use with thing. (If thing is abstract, return None.)
@param thing: an object or class corresponding internally
to a specific program feature, and for which
no descriptor is already cached
(though we might return one which was already
cached for a different thing, e.g. a superclass).
@note: for now, this can only handle classes,
and only if they have a superclass that has been registered
with register_abstract_feature_class.
"""
assert issubclass( thing, _feature_class_tuple), \
"wrong kind of thing: %r" % (thing,)
# note: this fails with some kind of exception for non-classes,
# and with AssertionError for classes that aren't a subclass of
# a registered class
clas = thing
del thing
featurename = clas.featurename # note: not yet canonicalized
# See if class is declared abstract.
# It declares that using a name-mangled attribute, __abstract_command_class,
# so its subclasses don't accidentally inherit that declaration.
# (This is better than using command_level == CL_ABSTRACT,
# since that can be inherited mistakenly. REVIEW whether that
# attr value should survive at all.)
# [bruce 080905 new feature]
short_name = short_class_name( clas)
mangled_abstract_attrname = "_%s__abstract_command_class" % short_name
abstract = getattr(clas, mangled_abstract_attrname, False)
if abstract:
return None
if clas in _feature_class_tuple:
print "\n*** possible bug: %r probably ought to define __abstract_command_class = True" % short_name
# if not true, after review, revise this print [bruce 080905]
return None
# see if featurename is inherited
inherited_from = None # might be changed below
for base in clas.__bases__: # review: use mro?
inherited_featurename = getattr( base, 'featurename', None)
if inherited_featurename == featurename:
inherited_from = base
break
if inherited_from is not None:
# decide whether this is legitimate (use inherited description),
# or not (warn, and make up a new description).
legitimate = short_name in _KLUGE_PERMIT_INHERITED_CLASSNAMES
# maybe: also add ways to register such classes,
# and/or to mark them using __abstract_command_class = True
# (made unique to that class by name-mangling).
# maybe: point out in warning if it ends with Mode or Command
# (but not with basicCommand) (probably not worth the trouble)
if legitimate:
return find_or_make_FeatureDescriptor( inherited_from)
# return that even if it's None (meaning we're an abstract class)
# (todo: review that comment -- abstract classes are now detected earlier;
# can this still be None at this point? Doesn't matter for now.)
# make it unique
featurename = canonicalize_featurename( featurename, warn = True)
featurename = featurename + " " + short_name
print
print "developer warning: auto-extending inherited featurename to make it unique:", featurename
print " involved classes: %r and its subclass %r" % \
( short_class_name( inherited_from),
short_class_name( clas) )
print " likely fixes: either add %r to _KLUGE_PERMIT_INHERITED_CLASSNAMES," % ( short_name, )
print " or define a featurename class constant for it,"
print " or declare it as abstract by defining __abstract_command_class = True in it."
pass # use new featurename to make a new description, below
else:
# not inherited
featurename = canonicalize_featurename( featurename, warn = True)
assert not short_name in _KLUGE_PERMIT_INHERITED_CLASSNAMES, short_name
pass
# use featurename (perhaps modified above) to make a new description
descriptor_constructor = _choose_descriptor_constructor( clas)
descriptor = descriptor_constructor( clas, featurename )
# warn if featurename is duplicated (but return it anyway)
if _descriptor_for_featurename.has_key( featurename):
print "developer warning: duplicate featurename %r for %r and %r" % \
( featurename,
_descriptor_for_featurename[ featurename ].thing,
descriptor.thing
)
else:
_descriptor_for_featurename[ featurename] = descriptor
return descriptor # from _determine_FeatureDescriptor
# ==
def _choose_descriptor_constructor( subclass):
"""
subclass is a subclass of something in global _feature_classes
(but not identical to anything in it);
find the corresponding descriptor_constructor;
if more than one matches, return the most specific (or error if we can't).
"""
candidates = [(feature_class, descriptor_constructor)
for feature_class, descriptor_constructor in _feature_classes.iteritems()
if issubclass( subclass, feature_class )
]
assert candidates
if len(candidates) == 1:
return candidates[0][1]
assert 0, "nim for multiple candidates (even when all values the same!): %r" % (candidates,)
return candidates[0][1]
# ===
class FeatureDescriptor(object):
"""
Abstract superclass for various kinds of feature descriptors.
"""
def __init__(self, thing, featurename):
self.thing = thing # rename?
self.featurename = featurename
pass
# ===
def command_package_part_of_module_name(name):
"""
Given a module name like "dna.commands.InsertDna.InsertDna_EditCommand",
return the command package part, "dna.commands.InsertDna".
Return a command package name itself unchanged.
For module names, not inside a command package (or equalling one),
return None.
"""
command_package = None # default return value
name_parts = name.split('.')
if 'commands' in name_parts:
where = name_parts.index('commands')
if where not in (0, 1):
print "unusual location for 'commands' in module name:", name
if where < len(name_parts) - 1:
command_package = '.'.join(name_parts[0:where+2])
return command_package
class CommandDescriptor(FeatureDescriptor):
"""
Abstract superclass for descriptors for various kinds of comands.
"""
command_package = None
pass
class otherCommandPackage_Descriptor( CommandDescriptor):
"""
Descriptor for a command presumed to exist in a given command_package
in which no actual command was found.
"""
def __init__(self, command_package):
CommandDescriptor.__init__( self, None, None )
self.command_package = command_package
return
def sort_key(self):
return ( 0, self.command_package )
def print_plain(self):
print "command_package:", self.command_package
print "type: command package (no command found)"
pass
class basicCommand_Descriptor( CommandDescriptor): # refile with basicCommand?
"""
Descriptor for a command feature defined by any basicCommand subclass.
"""
def __init__(self, command_class, featurename):
CommandDescriptor.__init__( self, command_class, featurename )
# initialize various metainfo
modulename = command_class.__module__
self.command_package = command_package_part_of_module_name( modulename)
self.feature_type = self._get_feature_type()
# todo: more
return
def _get_command_class(self):
return self.thing
command_class = property( _get_command_class)
def _get_feature_type(self):
for fc in (list(_feature_class_tuple) + _feature_kind_subclasses)[::-1]:
# list is most general first, so we scan it backwards
if issubclass( self.command_class, fc):
return short_class_name(fc) + " subclass"
return "unknown" # bug
def _get_porting_status(self):
# temporary code during the port to USE_COMMAND_STACK
porting_status = getattr(self.command_class,
'command_porting_status',
"bug: command_porting_status not defined" )
return porting_status or ""
def sort_key(self):
# revise to group dna commands together, etc? or subcommands of one main command?
# yes, when we have the metainfo to support that.
return ( 1, self.featurename, short_class_name( self.command_class) ) #e more?
def print_plain(self):
porting_status = self._get_porting_status()
# porting_status is temporary code during the port to USE_COMMAND_STACK
fully_ported = not porting_status
if fully_ported:
print "featurename: <b>%s</b>" % self.featurename
else:
print "featurename:", self.featurename
print "classname:", short_class_name( self.command_class)
print "command_package:", self.command_package
print "type:", self.feature_type
if self.command_class.is_fixed_parent_command():
# note: it works to call this classmethod directly on the class
print "level:", self.command_class.command_level, \
"(%s)" % (self.command_class.command_parent or "no command_parent")
else:
print "level:", self.command_class.command_level
if not fully_ported:
print "porting status:", porting_status
# todo: more
return
pass # end of class basicCommand_Descriptor
# end
| NanoCAD-master | cad/src/foundation/FeatureDescriptor.py |
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details.
"""
node_indices.py
@author: Bruce
@version: $Id$
@copyright: 2005-2008 Nanorex, Inc. See LICENSE file for details.
Utilities for finding node indices in a tree,
and using them to help move the nodes around.
(And other things useful for fixing bug 296 by moving jigs
after the chunks they connect to, or for related operations
on the node tree.)
Some of this should perhaps become Node and Group methods,
but most of it probably makes sense to leave here in its own module,
so those classes can stay small and to the point.
Module classification: in foundation, since deals with
Node trees in a sufficiently general way. [bruce 071214]
"""
from utilities.constants import common_prefix
from foundation.Group import Group # for isinstance in assertion
##from jigs import Jig # for isinstance [bruce 071214 removed this]
# quick try at fixing bug 296 for josh emergency use!
# doing it all in this file, though ideally some small part of this code
# (the part that says "isinstance", mainly) belongs in utility.py and/or
# jigs.py (but I have local mods in both those files which i am not
# ready to commit yet, thus it's easier for me to not change them now).
# [bruce 050111]
def node_position( node, root, inner_indices = []):
"""
Given a node which is somewhere in the node-subtree rooted at root, return its "extended index",
i.e. a list of 0 or more indices of subtrees in their dads,
from outer to inner (like a URL with components being individual indices).
If node is root, this extended index will be [].
If node is *not* found anywhere under root, raise a ValueError whose detail is node's highest existing dad
(which might be node but will not be root; it might *contain* root if root was not toplevel).
If node is already None, that's an error (don't call us for that).
Extend the retval by (a copy of) the given inner_indices, if supplied.
Note that these extended indices can be used by Python functions like <, >, sort, max, etc;
the positions that come later in the tree are larger, except that a child node position
is larger than a parent node position, according to these Python functions.
#doc better
"""
if node == root:
return list(inner_indices)
if not node.dad:
raise ValueError, node
ourindex = node.dad.members.index(node) # error if we're not in it! should never happen
return node_position( node.dad, root, [ourindex] + inner_indices)
def node_new_index(node, root, after_these):
"""
If the given node is not already after all the nodes in after_these (a list of nodes),
return the new position it should have (expressed in terms of current indices --
the extended index might be different after the original node is moved,
leaving a hole where the node used to be!). Note that this could be, but is not,
the next possible index... the best choice is subjective, and this design decision
of where to move it to is encoded in this function.
If it does not need to move, return None.
If it can't be properly moved (because it must come after things which
are not even in the tree rooted at root), raise ValueError, with detail
being an error message string explaining the problem.
"""
if not after_these:
# this test is needed, so max doesn't get a 0-length sequence
return None
try:
ourpos = node_position(node, root)
except ValueError:
raise ValueError, node ###stub; need a better detail
# (or make subr itself help with that? not sure it can)
try:
afterposns = map( lambda node1: node_position(node1, root), after_these)
except ValueError:
raise ValueError, node ###stub; need a better detail
last_after = max(afterposns) # last chunk of the ones node must come after
if ourpos > last_after:
res = None
else:
## res = just_after(last_after)
# instead let's put it at a higher level in the tree...
# as low as we can so that to get to its chunks, you go back
# and then down (not up first) (where down = more indices,
# up = fewer indices, back = last index smaller);
# and as far back as we can.
first_after = min(afterposns)
# As pointed out by Huaicai, diagnosing a bug found by Ninad
# (see Ninad's comment which reopened bug 296), the following
# is wrong when first_after == last_after:
## grouppos = common_prefix( first_after, last_after )
# Instead, we should find the innermost group equal to or containing
# the *groups containing* those leaf node positions:
grouppos = common_prefix( first_after[:-1], last_after[:-1] )
# put it in that group, just after the element containing
# the last of these two posns
ind = last_after[len(grouppos)] + 1
res = grouppos + [ind]
assert is_list_of_ints(res)
return res
def just_after(extended_index): # not presently used
"""
return the index of the position (which may not presently exist)
just after the given one, but at the same level
"""
assert is_list_of_ints(extended_index)
assert extended_index, "just_after([]) is not defined"
return extended_index[0:-1] + [extended_index[-1] + 1]
def fix_one_node(node, root):
"""
Given node somewhere under root, ask it whether it needs moving
(to be after some other nodes under root), and if so, move it.
Return value says whether we moved it.
Error if it needs moving after some nodes *not* under root;
in that case, raise ValueError with a suitable error message.
@param node: a Node which might say it must follow certain other
nodes in the internal model tree (by returning them from
node.node_must_follow_what_nodes())
@type: Node
@param root: top of a Node tree which caller says node is in and belongs in
@type: Node (usually or always a Group)
@note: moving this node changes the indices in the tree of many other nodes.
@return: 1 if we moved it, 0 if we did not. (Numbers, not booleans!)
"""
after_these = node.node_must_follow_what_nodes()
newpos = node_new_index( node, root, after_these)
# might raise ValueError -- that's fine, since it implements
# the part of our contract related to that (see its & our docstring)
if newpos is None:
return 0
move_one_node(node, root, newpos)
return 1
def move_one_node(node, root, newpos):
"""
Move node to newpos as measured under root;
error if node or newpos is not under root
(but we don't promise to check whether node is);
newpos is the old position, but it might not be the new one,
since removing the node will change indices in the tree.
"""
assert is_list_of_ints(newpos)
# First find a node to move it just before,
# or a group to make it the last child of
# (one of these is always possible
# unless the newpos is not valid),
# so we can use the node or group
# as a fixed marker, so when we remove node, this marker
# doesn't move even though indices move.
try:
marker = node_at(root, newpos)
where = 'before'
if marker == node:
# we are moving it to where it already is -- the one
# (known) correct case in which the following code
# can't work... fortunately it's also easy!
# It's also never supposed to happen in the initial
# use of this code (though it's legal for this func in general),
# so (for now) print an unobtrusive warning.
# (Later make that atom_debug only. #e)
print "(fyi: moving node %r to where it already is in model tree)" % (node,)
return
except IndexError: # nothing now at that pos (or pos itself doesn't exist)
marker = node_at(root, newpos[0:-1]) # use group as marker
where = 'end' # works for a full or empty group
# now remove node from where it is now
if node.dad: # guess: this is always true (not sure; doesn't matter)
node.dad.delmember(node)
# now put it where it belongs, relative to marker
if where == 'end':
assert isinstance(marker, Group)
marker.addchild(node) # adds at end by default
else:
# Warning: marker might or might not be a Group; current addmember can't handle that!
# (That is, we want what Node.addmember (addsibling) does, never what Group.addmember (addchild) does,
# whether or not marker is a Group.)
# We should (and will [and did]) fix addmember, by splitting it into new methods
# addchild and addsibling; but until we fix it, use this kluge here:
## Node.addmember(marker, node, before=True)
marker.addsibling( node, before = True) #bruce 050128 fix of reopened bug 296
# it was reopened by my commit to Utility a few days ago
# which changed the name of the addmember optional argument. Oops. [bruce 050128]
return
def is_list_of_ints(thing):
return type(thing) == type([]) and ((not thing) or \
(type(min(thing)) == type(1) == type(max(thing))))
def node_at(root, pos):
"""
Return the node at extended index pos, relative to root,
or raise IndexError if nothing is there but the group
containing that position exists,
or if the index goes too deep
"""
assert is_list_of_ints(pos)
if pos == []:
return root
ind1, rest = pos[0], pos[1:]
try:
child = root.members[ind1]
except AttributeError: # no .members
raise IndexError, "tried to index into a leaf node"
except IndexError: # ind1 out of range
raise IndexError, "nothing at that position in group"
return node_at(child, rest)
def fix_one_or_complain(node, root, errmsgfunc): # TODO: rename
"""
[public]
Move node to a different position under root, if it says it needs
to come after certain other nodes under root. See self.fix_one_node
for details. Return 1 if we moved it, 0 if we did not.
Unlike fix_one_node (which we call to do most of our work),
report errors by passing an error message string to errmsgfunc
rather than by raising ValueError.
@param node: a Node which might say it must follow certain other
nodes in the internal model tree (by returning them from
node.node_must_follow_what_nodes())
@type: Node
@param root: top of a Node tree which caller says node is in and belongs in
@type: Node (usually or always a Group)
@param errmsgfunc: function for error message output; will be passed an
error message string if an error occurs
@note: external code calls this to help place a new Jig, including a Plane
(a Jig subclass with no atoms). The Plane call is not needed (as long
as planes have no atoms) but is harmless. [info as of 071214]
"""
try:
return fix_one_node(node, root)
except ValueError, msg:
# removing this assert, since no longer needed, and has import issues
# re package classification: [bruce 071214]
## # redundant check to avoid disaster from bugs in this new code:
## assert isinstance(node, Jig), "bug in new code for move_jigs_if_needed -- it wants to delete non-Jig %r!" % node
if type(msg) != type(""):
msg = "error moving %r, deleting it instead" % msg #e improve this, or include error msg string in the exception
# REVIEW: if msg is not a string, why does this code think it is what we were moving?
# Is it using ValueError for what ought to be a custom exception class? [bruce 071214 Q]
errmsgfunc(msg)
node.kill() # delete it
return 0
pass
#bruce 051115 removed the following since it's no longer ever called (since workaround_for_bug_296 became a noop)
##def move_jigs_if_needed(root, errmsgfunc): # (this was the entry point for workaround_for_bug_296)
## """move all necessary jigs under root later in the tree under root;
## emit error messages for ones needing to go out of tree
## (by calling errmsgfunc on error msg strings),
## and delete them entirely;
## return count of how many were moved (without error).
## """
## count = 0
## for jig in find_all_jigs_under( root):
## count += fix_one_or_complain(jig, root, errmsgfunc)
## return count
##
##def find_all_jigs_under( root):
## res = []
## def grab_if_jig(node):
## if isinstance(node, Jig):
## #e logically we'd test node_must_follow_what_nodes here instead,
## # but that's slower and not yet needed
## res.append(node)
## root.apply2all( grab_if_jig)
## return res
# end
| NanoCAD-master | cad/src/foundation/node_indices.py |
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details.
"""
whatsthis_utilities.py
@author: Bruce
@version: $Id$
@copyright: 2005-2008 Nanorex, Inc. See LICENSE file for details.
"""
from PyQt4.Qt import QAction
from PyQt4.Qt import QWidget
from PyQt4.Qt import QMenu
from PyQt4.Qt import QMenuBar
import os
from utilities.icon_utilities import image_directory
import foundation.env as env
from platform_dependent.PlatformDependent import is_macintosh
_IS_MACINTOSH = is_macintosh()
#bruce 051227-29 code for putting hyperlinks into most WhatsThis texts
# (now finished enough for release, though needs testing and perhaps cleanup
# and documentation)
ENABLE_WHATSTHIS_LINKS = True # also used in an external file
# note: if this is False, it now disables not only whatsthis links,
# but the fixing of img tag pathnames in whatsthis text.
# Probably we should just remove the flag, inlining it as True.
# [bruce 081209 comment]
_DEBUG_WHATSTHIS_LINKS = False # DO NOT COMMIT with True # only used in this file
# ===
map_from_id_QAction_to_featurename = {}
# map from id(QAction) to the featurenames in their whatsthis text
# [bruce 060121 to help with Undo; renamed, bruce 080509]
# note: also used in undo_internals.py
def fix_whatsthis_text_and_links(parent):
#bruce 080509 removed old debug code for bugs 1421 and 1721; other cleanup
#bruce 060319 renamed this from fix_whatsthis_text_for_mac
#bruce 060120 revised this as part of fixing bug 1295
#bruce 051227-29 revised this
"""
[public]
Fix tooltips and whatsthis text and objects (for all OSes, not just macs
as this function once did).
This function does two things:
1. If the system is a Mac, this replaces all occurrences of 'Ctrl'
with 'Cmd' in all the tooltip and whatsthis text for all QAction or
QWidget objects that are children of parent.
2. For all systems, it replaces certain whatsthis text patterns with
hyperlinks, and [obsolete comment] adds MyWhatsThis objects to widgets
with text modified that way (or that might contain hyperlinks) or that
are QPopupMenus.
This should be called after all widgets (and their whatsthis text)
in the UI have been created. It's ok, but slow (up to 0.2 seconds per call
or more), to call it more than once on the main window. If you call it again
on something else, as of 060319 this would have caused bugs by clearing
_objects_and_text_that_need_fixing_later, but that can be easily fixed when
we need to support repeated calls on smaller widgets. (As of the next day,
that global list was no longer used, and on 080509 the code to maintain it
is being removed -- whether repeated calls would still cause any bugs ought
to be reviewed. I notice that there *are* repeated calls -- the main call
in ne1_ui/Ui_MainWindow.py is followed immediately by another one on a
smaller widget. So it's probably ok.)
Calling this on a single QAction works, but doesn't do enough to fix the
text again for toolbuttons (and I think menuitems) made by Qt from that
action (re bug 1421).
See also refix_whatsthis_text_and_links, which can be called to restore
tooltips and/or whatsthis text which Qt messed up for some reason, as
happens when you set tooltips or menutext for Undo and Redo actions
(bug 1421). (Note that it hardcodes the set of actions which need this.)
"""
if _DEBUG_WHATSTHIS_LINKS:
print "running fix_whatsthis_text_and_links"
if _IS_MACINTOSH or ENABLE_WHATSTHIS_LINKS:
# fix text in 1 or 2 ways for all QAction objects
# (which are not widgets)
# ATTENTION:
# objList only includes QAction widgets that appear in the Main Menu
# bar. This is a bug since some widgets only appear in toolbars on the
# Main Window, but not the main menu bar. --Mark and Tom 2007-12-19
objList = filter(lambda x: isinstance(x, QAction), parent.children())
for obj in objList:
fix_QAction_whatsthis(obj)
continue
pass
if ENABLE_WHATSTHIS_LINKS:
# partly obsolete comment (class MyWhatsThis no longer exists,
# instead see class QToolBar_WikiHelp):
# add MyWhatsThis objects to all widgets that might need them
# (and also fix their text if it's not fixed already --
# needed in case it didn't come from a QAction; maybe that never
# happens as of 060120)
objList = filter(lambda x: isinstance(x, QWidget), parent.children())
# this includes QMenuBar, QPopupMenu for each main menu and cmenu
# (I guess), but not menuitems themselves. (No hope of including
# dynamic cmenu items, but since we make those, we could set their
# whatsthis text and process it the same way using separate code
# (nim ###@@@).) [bruce 060120] In fact there is no menu item
# class in Qt that I can find! You add items as QActions or as
# sets of attrs. QActions also don't show up in this list...
if isinstance(parent, QWidget):
objList.append(parent) #bruce 081209
for obj in objList:
# note: the following code is related to
# fix_QAction_whatsthis(obj)
# but differs in several ways
text = whatsthis_text_for_widget(obj) # could be either "" or None
if text:
# in case text doesn't come from a QAction, modify it in the
# same ways as above, and store it again or pass it to the
# MyWhatsThis object; both our mods are ok if they happen
# twice -- if some hyperlink contains 'ctrl', so did the text
# before it got command names converted to links.
if _IS_MACINTOSH:
text = replace_ctrl_with_cmd(text)
text = turn_featurenames_into_links(text)
assert text # we'll just feed it to a MyWhatsThis object so we
# don't have to store it here
# BUG: since that was written, the code has been revised;
# there is no longer a class MyWhatsThis, and it's unclear
# whether it's *ever* ok to discard this text, as we are doing.
# The only known bug from this is for the GLPane's whatsthis text
# (about that, see the kluges and comments in the def and call
# of whats_this_text_for_glpane), but there might be others.
# [bruce 081209 comment]
else:
text = None # turn "" into None
## ismenu = isinstance(obj, QPopupMenu)
ismenu = isinstance(obj, QMenu)
try:
ismenubar = isinstance(obj, QMenuBar)
except:
# usual for non-Macs, I presume
ismenubar = False
if text is not None and (ismenu or ismenubar):
# assume any text (even if not changed here) might contain
# hyperlinks, so any widget with text might need a MyWhatsThis
# object; the above code (which doesn't bother storing
# mac-modified text) also assumes we're doing this
# [REVIEW: what code creates such an object? Is the above
# old comment still accurate? bruce 080509 questions]
print text
obj.setWhatsThis(text)
pass
continue
return # from fix_whatsthis_text_and_links
def fix_QAction_whatsthis(obj):
"""
[public, though external calls are rare]
Modify the Qt whatsthis and tooltip text assigned to obj
(which should be a QAction; not sure if that's required)
based on whether we're running on a Macintosh (determines
whether to replace Ctrl with Cmd in the texts),
and on the global flag ENABLE_WHATSTHIS_LINKS.
Also save info into the global map_from_id_QAction_to_featurename.
@see: our main caller, fix_whatsthis_text_and_links
"""
text = str(obj.whatsThis())
tooltip = str(obj.toolTip())
if _IS_MACINTOSH:
text = replace_ctrl_with_cmd(text)
tooltip = replace_ctrl_with_cmd(tooltip)
if ENABLE_WHATSTHIS_LINKS:
text = turn_featurenames_into_links( text,
savekey = id(obj),
saveplace = map_from_id_QAction_to_featurename
)
obj.setWhatsThis(text)
obj.setToolTip(tooltip)
return
def refix_whatsthis_text_and_links( ): #bruce 060319 part of fixing bug 1421
"""
[public]
"""
win = env.mainwindow()
fix_QAction_whatsthis(win.editUndoAction)
fix_QAction_whatsthis(win.editRedoAction)
return
def replace_ctrl_with_cmd(text):
# by Mark; might modify too much for text which uses Ctrl in unexpected ways
# (e.g. as part of longer words, or in any non-modifier-key usage)
"""
Replace all occurrences of Ctrl with Cmd in the given string.
"""
text = text.replace('Ctrl', 'Cmd')
text = text.replace('ctrl', 'cmd')
return text
def whatsthis_text_for_widget(widget): #bruce 060120 split this out of other code
"""
Return a Python string containing the WhatsThis text for
widget (perhaps ""), or None if we can't find that.
"""
try:
## original_text = widget.whatsThis() # never works for
## # widgets (though it would work for QActions)
text = str(widget.whatsThis())
#exception; don't know if it can be a QString
except:
# this happens for a lot of QObjects (don't know what they are), e.g.
# for <constants.qt.QObject object at 0xb96b750>
return None
else:
return str( text or "" )
# note: the 'or ""' above is in case we got None (probably never
# needed, but might as well be safe)
# note: the str() (in case of QString) might not be needed;
# during debug it seemed this was already a Python string
pass
def debracket(text, left, right): #bruce 051229 ##e refile this?
"""
If text contains (literal substrings) left followed eventually by
right (without another occurrence of left),return the triple
(before, between, after)
where before + left + between + right + after == text.
Otherwise return None.
"""
splitleft = text.split(left, 1)
if len(splitleft) < 2:
return None # len should never be more than 2
before, t2 = splitleft
splitright = t2.split(right, 1)
if len(splitright) < 2:
return None
between, after = splitright
assert before + left + between + right + after == text
if left in between:
return None # not sure we found the correct 'right' in this case
return (before, between, after)
def turn_featurenames_into_links(text, savekey = None, saveplace = None):
#bruce 051229; revised/renamed 060120; save args 060121; img tags 081205
"""
[public]
Given some nonempty whatsthis text, return identical or modified text
(e.g. containing a web help URL).
If savekey and saveplace are passed, and if the text contains a
featurename, set saveplace[savekey] to that featurename.
"""
# make all img source pathnames absolute, if they are not already.
# [bruce 081205 per mark]
#
# TODO: if this was improved to call get_image_path on the pathnames,
# it would catch errors in the pathnames at load time rather than
# only when they are first used to display images in whatsthis text.
#
# POSSIBLE BUGS: the current implementation will fail if the pathnames
# contain characters that don't encode themselves when parsed
# (by Qt) in this HTML-tag-attribute context -- at least true
# for '"' and r'\' (fortunately rare in pathnames), perhaps for
# other chars. This could probably be fixed by encoding them somehow.
# I also don't know whether unicode characters will be permitted.
# If not, this might be fixable by encoding them and/or by replacing
# this approach with one in which we supply a callback to Qt for
# interpreting these relative pathnames when they're needed.
# (Does Qt call that callback an "icon factory"??)
# WARNING: if these are real bugs, they'll prevent NE1 from starting
# when it's installed under certain pathnames.
# [bruce 081206 comments]
PAT1 = "<img source=\"ui/"
if PAT1 in text:
ui_dir = os.path.join( os.path.normpath( image_directory() ), "ui" )
### TODO: use the named constant for "ui" here
# (but not elsewhere in this function)
# replace "ui" with ui_dir in all occurrences of PAT1 in text
PAT2 = PAT1.replace("ui/", ui_dir + '/')
text = text.replace(PAT1, PAT2)
pass
# look for words between <u><b> and </b></u> to replace with a web help link
if text.startswith("<u><b>"): # require this at start, not just somewhere
# like debracket would
split1 = debracket(text, "<u><b>", "</b></u>")
if split1:
junk, name, rest = split1
featurename = name # might be changed below
if "[[Feature:" in rest: # it's an optim to test this first,
#since usually false
#Extract feature name to use in the link, when this differs
#from name shown in WhatsThis text; this name is usually given
#in an HTML comment but we use it w/o modifying the text
#whether or not it's in one.
# We use it in the link but not in the displayed WhatsThis text.
split2 = debracket(rest, "[[Feature:", "]]")
if not split2:
print "syntax error in Feature: link for WhatsThis text \
for %r" % name
return text
junk, featurename, junk2 = split2
#e should verify featurename is one or more capitalized words
# separated by ' '; could use split, isalpha (or so) ###@@@
if _DEBUG_WHATSTHIS_LINKS:
if featurename != name:
print "web help name for %r: %r" % (name, featurename,)
else:
print "web help name: %r" % (featurename,)
if saveplace is not None:
saveplace[savekey] = featurename
link = "Feature:" + featurename.replace(' ', '_')
# note: partly duplicates code in def wiki_help_url in wiki_help.py
# note: this link will only work if it's interpreted by
# class QToolBar_WikiHelp, which prepends wiki_prefix().
# Before we used Qt 4, this was done by the following code:
# ## from wiki_help import wiki_prefix
# ## text = "<a href=\"%s%s\">%s</a>" % \
# ## (wiki_prefix(), link, name) + rest
# working with class MyWhatsThis, which is still mentioned
# in some old comments. [bruce 081209 guess]
text = "<a href=\"%s\">%s</a>" % (link, name) + rest
return text
return text
# end
| NanoCAD-master | cad/src/foundation/whatsthis_utilities.py |
# Copyright 2005-2007 Nanorex, Inc. See LICENSE file for details.
"""
undo_internals.py - wrap our Qt slot methods with Undo checkpoints.
See also: undo_archive.py, undo_manager.py, undo_UI.py,
def wrap_callable_for_undo, and perhaps some undo-related
code in env.py, changes.py, HistoryWidget.py.
@author: Bruce
@version: $Id$
@copyright: 2005-2007 Nanorex, Inc. See LICENSE file for details.
Module classification: foundation.
"""
import foundation.env as env
from utilities.debug import register_debug_menu_command
from PyQt4.Qt import QObject ## , QWidget, SIGNAL
from utilities import debug_flags # for atom_debug
import utilities.EndUser as EndUser
# debug print options
DEBUG_PRINT_UNDO = False # DO NOT COMMIT with True -- causes lots of debug prints regardless of atom_debug
DEBUG_FEWER_ARGS_RETRY = True # debug prints for fewer-args retries; now True since these are deprecated [bruce 071004]
DEBUG_GETARGSPEC = False # DO NOT COMMIT with True -- debug prints related to USE_GETARGSPEC (only matters when that's true)
DEBUG_USE_GETARGSPEC_TypeError = False # print the few cases which args_info can't yet handle (### TODO: fix it for these ASAP)
# options that change behavior
USE_GETARGSPEC = True # bruce 071004
_use_hcmi_hack = True # enable signal->slot call intercepting code, to check for bugs that mess up other things [bruce 050922]
# i suspect this now has to be true (undo won't work without it) -- if true, remove this [bruce 071003 comment]
if not EndUser.enableDeveloperFeatures():
# end user case
DISABLE_SLOT_ARGCOUNT_RETRY = False
# be looser for end-users until we're sure we fixed all the bugs
# this would expose and turn into exceptions, as explained in the
# long comment in the other case.
# [bruce 071004, per team call]
NONERROR_STDERR_OK = False # in case we're on a Windows install for which prints to sys.stderr cause exceptions
# (that issue ought to be fixed more generally than in this file)
else:
# developer case
DISABLE_SLOT_ARGCOUNT_RETRY = True # bruce 071004 -- WHEN True, THIS WILL EXPOSE SOME BUGS as new TypeError exceptions.
#
# (see also a change to this for endusers, below.)
#
# To fix the bugs this exposes, add the proper argument declarations to slot
# methods which are raising TypeError due to being passed too many args by a
# signal connection inside fbmethod_0args.
#
# Or to temporarily work around them, set this flag to False in your local
# sources, in this case or below it (but be sure not to commit that change).
#
# Details:
#
# When True, this simulates a proposed simplification
# in which we only try calling slot methods with all the available args
# passed to them by PyQt.
#
# When False, as has always been effectively the case as of 071004, we
# retry them with fewer arguments if they raise TypeError to complain about
# too many (or, unfortunately, if they raise it for some other reason), in
# order to imitate a similar (but probably safer) behavior documented by
# PyQt3.
NONERROR_STDERR_OK = True
## DISABLE_SLOT_ARGCOUNT_RETRY = False # DO NOT COMMIT with this line enabled -- for testing of end user case code
if EndUser.enableDeveloperFeatures():
print "DISABLE_SLOT_ARGCOUNT_RETRY =", DISABLE_SLOT_ARGCOUNT_RETRY
# ==
def reload_undo(target = None):
# does this work at all, now that undo_UI was split out of undo_manager? [bruce 071217 Q]
import foundation.undo_archive as undo_archive
reload(undo_archive)
import foundation.undo_manager as undo_manager
reload(undo_manager)
import foundation.undo_internals as undo_internals
reload(undo_internals)
print "\nreloaded 3 out of 4 undo_*.py files; open a new file and we'll use them\n" #e (works, but should make reopen automatic)
register_debug_menu_command("reload undo", reload_undo)
# ==
def keep_under_key(thing, key, obj, attr):
"""
obj.attr[key] = thing, creating obj.attr dict if necessary
"""
if DEBUG_PRINT_UNDO and 0:
print "keepkey:",key,"keepon_obj:",obj # also print attr to be complete
# This shows unique keys, but just barely (name is deleg for lots of QActions)
# so we'll have to worry about it, and maybe force all keys unique during init.
# If some keys are not unique, result might be that some user actions
# (or for worse bugs, internal signals) silently stop working. [bruce 050921]
if not hasattr(obj, attr):
setattr(obj, attr, {})
dict1 = getattr(obj, attr)
dict1[key] = thing
return
class wrappedslot:
"""
WARNING: the details in this docstring are obsolete as of sometime before 071004.
Hold a boundmethod for a slot, and return callables (for various arglists)
which call it with our own code wrapping the call.
We don't just return a callable which accepts arbitrary args, and pass them on,
because we use this with PyQt which we suspect counts the accepted args
in order to decide how many args to pass,
and if we accepted all it had, some of our wrapped slots would receive more args
than they can handle.
Come to think of it, this probably won't be enough, because it *still* might
pass us too many based on the ones listed in the signal name. We might have to
revise this to count the args accepted by our saved slotboundmethod in __init__. ###k
"""
# default values of instance variables
args_info_result = None # cached return value from args_info
need_runtime_test = True # whether we need to test whether our signal passed too many args to our slot, at runtime
def __init__(self, slotboundmethod, sender = None, signal = ""):
self.slotboundmethod = slotboundmethod
if USE_GETARGSPEC:
# Print a warning if it looks like the signal and slot argument counts don't match;
# if DEBUG_GETARGSPEC then always print something about the analysis result.
# Also try to save info on self, so args_info needn't be called at runtime.
from utilities.debug import print_compact_traceback
try:
self.args_info_result = args_info(slotboundmethod)
success, minargs, maxargs, any_kws_ok = self.args_info_result
if success:
# if DEBUG_GETARGSPEC, args_info already printed basic info
if any_kws_ok and DEBUG_GETARGSPEC:
print "DEBUG_GETARGSPEC: surprised to see **kws in a slot method; ignoring this issue: %r" % (slotboundmethod,)
del any_kws_ok
signal_args = guess_signal_argcount(signal)
strict = True # whether our test is not certain (i.e. too loose); might be set to false below
if minargs is None:
# maybe this can never happen as of 071004
minargs = 0
strict = False
if maxargs is None:
if DEBUG_GETARGSPEC:
print "DEBUG_GETARGSPEC: note: %r accepts **args, unusual for a slot method" % (slotboundmethod,)
maxargs = max(999999, signal_args + 1)
strict = False
assert type(minargs) == type(1)
assert type(maxargs) == type(1)
ok = (minargs <= signal_args <= maxargs)
if not ok:
# print the warning which is the point of USE_GETARGSPEC
if minargs != maxargs:
print "\n * * * WARNING: we guess %r wants from %d to %d args, but signal %r passes %d args" % \
(slotboundmethod, minargs, maxargs, signal, signal_args)
else:
print "\n * * * WARNING: we guess %r wants %d args, but signal %r passes %d args" % \
(slotboundmethod, maxargs, signal, signal_args)
elif DEBUG_GETARGSPEC:
if minargs != maxargs:
print "DEBUG_GETARGSPEC: %r and %r agree about argcount (since %d <= %d <= %d)" % \
(slotboundmethod, signal, minargs, signal_args, maxargs)
else:
assert signal_args == minargs
print "DEBUG_GETARGSPEC: %r and %r agree about argcount %d" % \
(slotboundmethod, signal, minargs)
self.need_runtime_test = (not ok) or (not strict)
# REVIEW: also say "or any_kws_ok", or if kws passed at runtime?
else:
# args_info failed; it already printed something if DEBUG_GETARGSPEC
pass
except:
print_compact_traceback("USE_GETARGSPEC code failed for %r: " % slotboundmethod)
pass
self.__sender = sender #060121
self.__signal = signal #060320 for debugging
return # from __init__
def fbmethod_0args(self, *args, **kws):
"""
fake bound method with any number of args (misnamed)
"""
slotboundmethod = self.slotboundmethod
#e we'll replace these prints with our own begin/end code that's standard for slots;
# or we might call methods passed to us, or of known names on an obj passed to us;
# or we might call a func passed to us, passing it a callback to us which does the slot call.
if kws:
print "unexpected but maybe ok: some keywords were passed to a slot method:", slotboundmethod, kws # maybe never seen
if DEBUG_PRINT_UNDO:
print "(#e begin) calling wrapped version (with %d args) of" % len(args), slotboundmethod
mc = self.begin()
try:
if DISABLE_SLOT_ARGCOUNT_RETRY:
# call slotmethod with exactly the same args we were passed
# (this is known to often fail as of 071004, but we hope to fix that)
res = slotboundmethod(*args, **kws)
else:
# deprecated case to be removed soon, but in the meantime, rewritten to be more
# reliable [bruce 071004]
# try calling slotmethod with exactly the args we were passed,
# but if we get a TypeError, assume this probably means the slot method accepts
# fewer args than the signal passes, so try again with fewer and fewer args until
# we get no TypeError.
# If USE_GETARGSPEC and not self.need_runtime_test, assume self.args_info_result
# can be trusted to specify the possible numbers of args to pass;
# otherwise assume it could be any number (as the code before 071004 always did).
try:
res = slotboundmethod(*args, **kws)
except TypeError:
# it might be that we're passing too many args. Try to find out and fix. First, for debugging, print more info.
if DEBUG_FEWER_ARGS_RETRY:
print "args for %r from typeerror: args %r, kws %r" % (slotboundmethod, args, kws)
if USE_GETARGSPEC and not self.need_runtime_test:
if self.args_info_result is None:
self.args_info_result = args_info(slotboundmethod)
success, minargs, maxargs, any_kws_ok = self.args_info_result
del any_kws_ok # ignored for now
assert success
del success
assert type(minargs) == type(maxargs) == type(1)
# use minargs and maxargs to limit the calls we'll try
else:
# try any reduced number of args
minargs = 0
maxargs = len(args)
# Construct arglists to use for retrying this call.
# Note that there is no guarantee that the original TypeError was caused by an excessive arglist;
# if it was caused by some other bug, these repeated calls could worsen that bug.
arglists_to_try = [] # will hold pairs of (args, kws) to try calling it with.
if kws:
# first try zapping all the keywords (note: as far as I know, none are ever passed in the first place)
arglists_to_try.append(( args, {} ))
while args:
# then zap the args, one at a time, from the end (but consider minargs and maxargs)
args = args[:-1]
if minargs <= len(args) <= maxargs:
arglists_to_try.append(( args, kws ))
# Retry it with those arglists (zero or more of them to try)
worked = False
from utilities.debug import print_compact_traceback
for args, kws in arglists_to_try:
try:
res = slotboundmethod(*args, **kws)
worked = True
if DEBUG_FEWER_ARGS_RETRY:
print " retry with fewer args (%d) worked" % len(args)
break # if no exceptions
except TypeError:
# guessing it's still an arg problem
if DEBUG_FEWER_ARGS_RETRY:
if NONERROR_STDERR_OK:
print_compact_traceback("assuming this is a slot argcount problem: ")
print "args for %r from typeerror, RETRY: args %r, kws %r" % (slotboundmethod, args, kws)
continue
# other exceptions are treated as errors, below
if not worked:
# TODO (maybe): retry with first arglist? more likely to be the real error...
print "will try to reraise the last TypeError" # always print this, since we're about to print a traceback
raise
assert 0, "tried to reraise the last TypeError"
pass
pass
pass
except:
self.error()
self.end(mc)
if DEBUG_PRINT_UNDO:
print "(#e end) it had an exception"
print "bug: exception in %r%r (noticed in its undo wrapper); reraising it:" % (slotboundmethod, args)
raise #k ok? optimal??
else:
self.end(mc)
if DEBUG_PRINT_UNDO:
print "(#e end) it worked" ## it returned", res
# Note that slot retvals are probably ignored, except when they're called directly
# (not via connections), but we don't intercept direct calls anyway.
# So don't bother printing them for now.
return res
pass
def begin(self):
## if 1: # 060121 debug code
## try:
## se = self.sender() # this can only be tried when we inherit from QObject, but it always had this exception.
## except RuntimeError: # underlying C/C++ object has been deleted [common, don't yet know why, but have a guess]
## print "no sender"
## pass
## else:
## print "sender",se
## cp_fn = None # None, or a true thing enabling us to call undo_checkpoint_after_command
if 1: #060127
in_event_loop = env._in_event_loop
mc = env.begin_op("(wr)") # should always change env._in_event_loop to False (or leave it False)
assert not env._in_event_loop
if in_event_loop: #060121, revised 060127 and cond changed from 1 to in_event_loop
#e if necessary we could find out whether innermost op_run in changes.py's stack still *wants* a cmdname to be guessed...
# this would be especially important if it turns out this runs in inner calls and guesses it wrong,
# overwriting a correct guess from somewhere else...
# also don't we need to make sure that the cmd_seg we're guessing for is the right one, somehow???
# doesn't that mean the same as, this begin_op is the one that changed the boundary? (ie call came from event loop?)
sender = self.__sender
##print "sender",sender # or could grab its icon for insertion into history
from foundation.whatsthis_utilities import map_from_id_QAction_to_featurename
fn = map_from_id_QAction_to_featurename.get(id(sender))
# When we used sender rather than id(sender), the UI seemed noticably slower!!
# Possible problem with using id() is for temporary items -- when they're gone,
# newly allocated ones with same id might seem to have those featurenames.
# Perhaps we need to verify the name is still present in the whatsthis text?
# But we don't have the item itself here! We could keep it in the value, and then
# it would stick around forever anyway so its id wouldn't be reused,
# but we'd have a memory leak for dynamic menus. Hmm... maybe we could add our own
# key attribute to these items? And also somehow remove temporary ones from this dict
# soon after they go away, or when new temp items are created for same featurename?
# ... Decision [nim]: use our own key attr, don't bother removing old items from dict,
# the leak per-cmenu is smaller than others we have per-user-command. ####@@@@ DOIT
if fn:
if 1: #experiment 060121
from utilities.debug import print_compact_traceback
try:
win = env.mainwindow()
assert win.initialised # make sure it's not too early
assy = win.assy
except:
if debug_flags.atom_debug:
print_compact_traceback("atom_debug: fyi: normal exception: ")
pass # this is normal during init... or at least I thought it would be -- I never actually saw it yet.
else:
## begin_retval = assy.undo_checkpoint_before_command(fn)
## cp_fn = fn, begin_retval #e this should include a retval from that method, but must also always be true
if 1: #060127
# note, ideally this assy and the one that subscribes to command_segment changes
# should be found in the same way (ie that one should sub to this too) -- could this just iterate over
# same list and call it differently, with a different flag?? ##e
assy.current_command_info(cmdname = fn) #e cmdname might be set more precisely by the slot we're wrapping
if 0:
print " featurename =", fn
# This works! prints correct names for toolbuttons and main menu items.
# Doesn't work for glpane cmenu items, but I bet it will when we fix them to have proper WhatsThis text.
# Hmm, how will we do that? There is presently no formal connection between them and the usual qactions
# or toolbuttons or whatsthis features for the main UI for the same method. We might have to detect the
# identity of the bound method they call as a slot! Not sure if this is possible. If not, we have to set
# command names from inside the methods that implement them (not the end of the world), or grab them from
# history text (doable).
else:
#060320 debug code; note, this shows signals that might not need undo cp's, but for almost all signals,
# they might in theory need them in the future for some recipients, so it's not usually safe to exclude them.
# Instead, someday we'll optimize this more when no changes actually occurred (e.g. detect that above).
if 0 and env.debug():
print "debug: wrappedslot found no featurename, signal = %r, sender = %r" % (self.__signal, sender)
## return cp_fn, mc #060123 revised retval
return mc
def error(self):
"""
called when an exception occurs during our slot method call
"""
###e mark the op_run as having an error, or at least print something
if debug_flags.atom_debug:
print "atom_debug: unmatched begin_op??"
return
def end(self, mc):
## cp_fn, mc = fn_mc
env.end_op(mc)
## if 1: #060123
## if cp_fn:
## fn, begin_retval = cp_fn
## win = env.mainwindow()
## assy = win.assy
## assy.undo_checkpoint_after_command( begin_retval)
return
pass
class hacked_connect_method_installer: #e could be refactored into hacked-method-installer and hacked-method-code to call origmethod
"""
Provide methods which can hack the connect and disconnect methods of some class (assumed to be QWidget or QObject)
by replacing them with our own version, which calls original version but perhaps with modified args.
Other methods or public attrs let the client control what we do
or see stats about how many times we intercepted a connect-method call.
"""
def __init__(self):
self.conns = {} # place to keep stats for debug
def hack_connect_method(self, qclass):
"""
Call this on QWidget or QObject class -- ONLY ONCE -- to hack its connect method.
"""
#e in __init__?
self.qclass = qclass #k not yet used in subsequent methods, only in this one
replace_static_method_in_class( qclass, 'connect', self.fake_connect_method )
replace_static_method_in_class( qclass, 'disconnect', self.fake_disconnect_method )
return
def fake_connect_method(self, origmethod, *args):
"""
This gets called on all QWidgets instead of the static method QObject.connect,
with the original implem of that method followed by the args from the call
(not including the instance it was called on, since it replaces a static method),
and must pretend to do the same thing, but it actually modifies some of the args
before calling the origmethod.
"""
# keep stats on len(args)
self.conns.setdefault(len(args),0)
self.conns[len(args)] += 1
# call origmethod, perhaps wrapped with our own code
if len(args) != 3:
# The last two args are an object and a slotname. We might *like* to wrap that slot,
# but we can't, unless we figure out how to turn the object and slotname into an equivalent bound method
# which we could use instead of those last two args.
# So for now, let's just print the args and hope we didn't need to wrap them.
if DEBUG_PRINT_UNDO:
print "not wrapping connect-slot since args not len 3:",args###@@@
newargs = args
else:
# figure out what connection is being made, and whether we want to wrap its slot
sender, signal, slotboundmethod = args
signal = normalize_signal(signal) # important for slotmap, below; better than not, for the other uses
newmethod = self.maybe_wrapslot(sender, signal, slotboundmethod)
# newmethod is either slotboundmethod, or wraps it and is already kept (no need for us to preserve a ref to it)
newargs = sender, signal, newmethod
# record mapping from old to new slot methods (counting them in case redundant conns), for use by disconnect;
# keep this map on the sender object itself
try:
slotmap = sender.__slotmap
except AttributeError:
slotmap = sender.__slotmap = {}
key = (signal, slotboundmethod) # slotboundmethod has different id each time, but is equal when it needs to be
slotmap.setdefault(key, []).append( newmethod ) # ok if newmethod is slotboundmethod (in fact, better to add it than not)
# redundant connections result in slotmap values of len > 1, presumably with functionally identical but unequal objects
res = origmethod(*newargs) # pass on any exceptions
if res is not True:
print "likely bug: connect retval is not True:", res
print " connect args (perhaps modified) were:", newargs
return res
def fake_disconnect_method(self, origmethod, *args):
if len(args) != 3:
if DEBUG_PRINT_UNDO:
print "not wrapping DISconnect-slot since args not len 3:",args###@@@ let's hope this happens only when it did for connect
newargs = args
else:
sender, signal, slotboundmethod = args
signal = normalize_signal(signal)
try:
slotmap = sender.__slotmap
except AttributeError:
# should never happen unless there's a disconnect with no prior connect
slotmap = sender.__slotmap = {}
key = (signal, slotboundmethod)
try:
lis = slotmap[key] # fails only if there's a disconnect with no prior connect
except KeyError:
# this case added by bruce 070615
print "likely bug: disconnect with no prior connect", key #e need better info?
newargs = args # still call disconnect -- ok?? I guess so -- it returns False, but no other apparent problem.
else:
newmethod = lis.pop() # should never fail, due to deleting empty lists (below)
if not lis:
del slotmap[key] # not really needed but seems better for avoiding memory leaks
newargs = sender, signal, newmethod
res = origmethod(*newargs) # pass on any exceptions
if res is not True: ##k
print "likely bug: disconnect retval is not True:", res
print " disconnect args (perhaps modified) were:", newargs
return res
def debug_print_stats(self, msg = '?'):
self.stage = msg
if DEBUG_PRINT_UNDO:
print "hcmi %r: %r" % (self.stage, self.conns)
def maybe_wrapslot(self, sender, signal, slotboundmethod, keepcache_object = None):
"""
Caller is about to make a connection from sender's signal to slotboundmethod.
Based on sender and signal, decide whether we want to wrap slotboundmethod with our own code.
If so, return the wrapped slot (a python callable taking same args as slotboundmethod),
but first make sure it won't be dereferenced too early, by storing it in a dict
at keepcache_object._keep_wrapslots (keepcache_object defaults to sender)
using a key formed from the names(??) of signal and slotboundmethod.
If not, just return slotboundmethod unchanged.
"""
## nice, but not needed, for keepkey; no longer needed for decide: signal = normalize_signal(signal)
# want to wrap it?
shouldwrap = self.decide(sender, signal) # always True, for now [clean up this code ###@@@]
if not shouldwrap:
if DEBUG_PRINT_UNDO:
print "not wrapping %s from %s to %s" % (signal,sender,slotboundmethod) ###@@@
return slotboundmethod
# make object which can wrap it
wr = wrappedslot(slotboundmethod, sender = sender, signal = signal) #060121 added sender arg #060320 added signal arg
# decide which api to call it with (#e this might be done inside the wrapper class)
if 1: ## or signal == SIGNAL("activated()"):
method = wr.fbmethod_0args
else:
assert 0 # use other methods
# keep things that PyQt might need but not hold its own refs to
keepkey = (signal, slotboundmethod.__name__) #k
keepon = keepcache_object or sender
# We keep wr, in case method's ref to it is not enough at some future time (eg if method dies and wr is still wanted).
# And we keep method, since we does not contain a ref to it, since bound methods are remade each time they're asked for.
# For now, the code would work even if we didn't keep wr, but keeping method is essential.
keepwhat = (wr, method)
keep_under_key(keepwhat, keepkey, keepon, '_keep_wrapslots')
# return the wrapped slotboundmethod
return method
def decide(self, sender, signal):
"""
should we wrap the slot for this signal when it's sent from this sender?
"""
if 'treeChanged' in str(signal): ###@@@ kluge: knowing this [bruce 060320 quick hack to test-optimize Undo checkpointing]
if env.debug():
###@@@ kluge: assuming what we're used for, in this message text
print "debug: note: not wrapping this signal for undo checkpointing:", signal
return False
return True # try wrapping them all, for simplicity
pass # end of class hacked_connect_method_installer
_hcmi = None
def hack_qwidget_pre_win_init(): # call this once, or more times if you can't avoid it; you must call it before main window is inited
global _hcmi
if _hcmi:
print "redundant call of hack_qwidget_pre_win_init ignored"
return
_hcmi = hacked_connect_method_installer()
qclass = QObject # works with QWidget; also works with QObject and probably gets more calls
_hcmi.hack_connect_method(qclass)
return
# ==
# app startup code must call these at the right times:
def call_asap_after_QWidget_and_platform_imports_are_ok():
if not _use_hcmi_hack: return
hack_qwidget_pre_win_init()
_hcmi.debug_print_stats('first call')
return
def just_before_mainwindow_super_init():
if not _use_hcmi_hack: return
_hcmi.debug_print_stats('just before mwsem super init')
return
def just_after_mainwindow_super_init():
if not _use_hcmi_hack: return
_hcmi.debug_print_stats('just after mwsem super init')
return
def just_before_mainwindow_init_returns():
# note, misnamed now -- called when its "init in spirit" returns, not its __init__ [060223 comment]
if 1:
#bruce 060223; logically this would be better to call directly from MWsemantics, but I don't want to modify that file right now
import foundation.env as env
win = env.mainwindow()
win.assy.clear_undo_stack() # necessary to tell it it's safe to make its initial checkpoint, and start recording more
#k is the following still active? [060223 question]
if not _use_hcmi_hack: return
_hcmi.debug_print_stats('mwsem init is returning')
return
# ==
# Code for replacing QWidget.connect or QObject.connect
# (in either case, the same builtin static method of QObject, not a regular instance method)
# with our own function, which calls the original implem inside our own wrapping code.
# Note that the origmethod is just a builtin function, and our wrapping code has no knowledge
# of which QWidget instance it was called on, since this is not easy to get and not needed
# (or even permitted) to pass to the origmethod.
class fake_static_method_supplier:
"""
[private helper class for replace_static_method_in_class]
"""
def __init__(self, origmethod, insertedfunc):
self.args = origmethod, insertedfunc
self.fsm = self._fake_static_method_implem
# memoize one copy of this self-bound method,
# which pretends to be another class's static method
def fake_static_method_and_keep_these(self):
"""
Return the pair (fake_static_method, keep_these) [see calling code for explanation].
fake_static_method need not be wrapped in staticmethod since it's not a user-defined function
or any other object which will be turned into a bound method when retrieved from the class it
will be installed in. (If things change and use of staticmethod becomes necessary, it should
be done in this method, so the caller still won't need to do it.)
"""
# terminology note: fsm is a *bound* method of this instance,
# which is a fake *unbound, static* method of clas in replace_static_method_in_class().
return self.fsm, self
# self is enough for caller to keep a reference to,
# since self has its own reference to self.fsm;
# if we remade self.fsm on each call, we'd have to return both in keep_these.
# (Presumably even self.fsm would be enough for caller to keep.)
def _fake_static_method_implem(self, *args, **kws):
"""
this is what runs in place of origmethod(*args, **kws)
"""
origmethod, insertedfunc = self.args
return insertedfunc(origmethod, *args, **kws) # or pass on any exceptions it might raise
pass
def replace_static_method_in_class(clas, methodname, insertedfunc):
"""
Replace a class's static instance-method (clas.methodname) with a new one (created herein)
which intercepts all calls meant for the original method --
these have the form instance.methodname(*args, **kws) for some instance of clas or a subclass,
and like all static methods would have been called like origmethod(*args, **kws)
(ignoring instance), where origmethod is the original static method we're replacing --
and instead calls insertedfunc(origmethod, *args, **kws)
(and returns what it returns, or raises any exceptions it raises).
Return the pair (origmethod, keepthese),
where keepthese is an object or list which the caller might need to keep a reference to
in case clas itself won't keep a reference to methods we insert into it.
"""
origmethod = getattr(clas, methodname)
# a static method (equivalent to an ordinary function -- arglist doesn't contain the instance)
wr = fake_static_method_supplier( origmethod, insertedfunc)
fakemethod, keepthese = wr.fake_static_method_and_keep_these()
setattr(clas, methodname, fakemethod)
return origmethod, keepthese
# ==
def normalize_signal(signal):
"""
normalize whitespace in signal string, which should be SIGNAL(xx) or (untested) PYSIGNAL(xx)
"""
try:
# this fails with AttributeError: normalizeSignalSlot [bruce 050921]
return QObject.normalizeSignalSlot(signal)
# (this should call a static method on QObject)
except AttributeError:
# Use my own hacked-up kluge, until the above lost method gets found.
# I think it doesn't matter if this has same output as Qt version,
# as long as it makes some canonical form with the same equivalence classes
# as the Qt version, since it's only used to prepare keys for my own dicts.
words = signal[1:].split() # this ignores whitespace at start or end of signal[1:]
def need_space_between(w1, w2):
def wordchar(c):
return c in "_" or c.isalnum() #k guess
return wordchar(w1[-1]) and wordchar(w2[0])
res = signal[0] + words[0]
for w1, w2 in zip(words[:-1],words[1:]):
# for every pair of words; w1 has already been appended to res
if need_space_between(w1, w2):
res += ' '
res += w2
## if signal != res and DEBUG_PRINT_UNDO and 0:
## print "hack converted %r to %r" % (signal, res) # they all look ok
return res
pass
# ==
def _count(substring, signal):
"""
return the number of times substring occurs in signal
"""
return len(signal.split(substring)) - 1
def guess_signal_argcount(signal):
"""
guess the number of arguments in a Qt signal string such as
"func()" or "func(val)" or "func(val, val)"
"""
assert signal and type(signal) == type("")
commas = _count(',', signal)
if commas:
return commas + 1
# if no commas, is it () or (word)?
# a quick kluge to ignore whitespace:
signal = ''.join(signal.split())
if _count("()", signal):
return 0
else:
return 1
pass
def args_info(func1): #bruce 071004 revised implem and return value format
"""
Given a function or method object, try to find out what argument lists
it can accept (ignoring the issue of the names of arguments that could
be passed either positionally or by name).
Return value is a tuple (success, minargs, maxargs, any_kws_ok),
where success is a boolean saying whether we succeeded in finding out anything for sure
(if not, the other tuple elements are "loose guesses" or are None);
minargs is the minimum number of positional args the function can be called with;
maxargs is the maximum number of positional args the function can be called with
(assuming no keyword arguments fill those positions);
and any_kws_ok says whether the function can accept arbitrary keyword arguments
due to use of **kws in its argument declaration.
If we can't determine any of those values, they are None; as of 071004 this can only
happen when success is True for maxargs (due to use of *args in the declaration).
"""
DEFAULT_RETVAL = False, None, None, True
if USE_GETARGSPEC:
# try using inspect.getargspec.
# TODO: there might be a smarter wrapper
# for that function (which knows about bound methods specifically)
# in some other introspection module.
from utilities.debug import print_compact_traceback
try:
import inspect
try:
res = inspect.getargspec(func1)
except TypeError:
# failed for <built-in method setEnabled of QGroupBox object at 0x7e476f0>:
# exceptions.TypeError: arg is not a Python function
# (happens for close, quit, setEnabled)
if DEBUG_USE_GETARGSPEC_TypeError:
print "USE_GETARGSPEC TypeError for %r: " % (func1,)
return DEFAULT_RETVAL
else:
if DEBUG_GETARGSPEC:
print "inspect.getargspec(%r) = %r" % (func1, res)
# now analyze the results to produce our return value
args, varargs, varkw, defaults = res
# Python 2.1 documentation:
# args is a list of the argument names (it may contain nested lists).
# varargs and varkw are the names of the * and ** arguments or None.
# defaults is a tuple of default argument values or None if there are no default arguments;
# if this tuple has n elements, they correspond to the last n elements listed in args.
if args and args[0] == 'self':
# kluge: assume it's a bound method in this case
### TODO: verify this by type check,
# or better, use type check to test this in the first place
args0 = args
args = args[1:]
if defaults is not None and len(defaults) == len(args0):
# a default value for self in a bound method would be too weird to believe
print "USE_GETARGSPEC sees default value for self in %r argspec %r" % (func1, res)
# but handle it anyway
defaults = defaults[1:]
if DEBUG_GETARGSPEC:
print "removed self, leaving %r" % ((args, varargs, varkw, defaults),) # remove when works
pass
else:
assert type(args) == type([])
# first arg is missing (no args) or is not 'self'
if DEBUG_GETARGSPEC:
print "USE_GETARGSPEC sees first arg not self:", args # other info was already printed
# now use args, varargs, varkw, defaults to construct return values
success = True
maxargs = minargs = len(args)
if defaults:
minargs -= len(defaults)
if varargs:
maxargs = None # means infinity or "don't know" (infinity in this case)
any_kws_ok = not not varkw
return success, minargs, maxargs, any_kws_ok
except:
print_compact_traceback("USE_GETARGSPEC failed for %r: " % (func1,) )
pass
# if we didn't return something above, just return the stub value
# which represents complete uncertainty
return DEFAULT_RETVAL
# end
| NanoCAD-master | cad/src/foundation/undo_internals.py |
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details.
"""
undo_archive.py - Collect and organize a set
of checkpoints of model state and diffs between them,
providing undo/redo ops which apply those diffs to the model state.
@author: Bruce
@version: $Id$
@copyright: 2005-2008 Nanorex, Inc. See LICENSE file for details.
[060223: out of date status info from this docstring was
mostly moved to undo_archive-doc.text,
not in cvs; can clean up and commit later #e]
"""
import time
from utilities import debug_flags
from utilities.debug import print_compact_traceback, print_compact_stack, safe_repr
from utilities.debug_prefs import debug_pref, Choice_boolean_False, Choice_boolean_True
import foundation.env as env
import foundation.state_utils as state_utils
from foundation.state_utils import objkey_allocator, obj_classifier, diff_and_copy_state
from foundation.state_utils import transclose, StatePlace, StateSnapshot
from foundation.state_constants import _UNSET_
from foundation.state_constants import UNDO_SPECIALCASE_ATOM, UNDO_SPECIALCASE_BOND
from foundation.state_constants import ATOM_CHUNK_ATTRIBUTE_NAME
from utilities.prefs_constants import historyMsgSerialNumber_prefs_key
from foundation.changes import register_postinit_object
import foundation.changedicts as changedicts # warning: very similar to some local variable names
destroy_bypassed_redos = True # whether to destroy the Redo stack to save RAM,
# when redos become inaccessible since a new operation is done after an Undo.
# (The reason you might *not* want to is if you wanted to give UI access
# to "abandoned alternate futures". We have no plans to do that,
# but the low-level Undo code would support it just fine.)
# [As of 060301 this was not yet implemented, but the flag shows where to
# implement it. As of 060309 this is partly implemented.]
debug_undo2 = False # do not commit with true
debug_change_indicators = False # do not commit with true
# ==
_undo_debug_obj = None
# a developer can set this to an undoable object
# (in a debugger or perhaps using a debug menu op (nim))
# to print history messages about what we do to this
# specific object
_nullMol = None
def set_undo_nullMol(null_mol):
global _nullMol
assert _nullMol is None
_nullMol = null_mol
def _undo_debug_message( msg):
from utilities.Log import _graymsg, quote_html
## env.history.message_no_html( _graymsg( msg ))
## -- WRONG, message_no_html would mess up _graymsg
env.history.message( _graymsg( quote_html( msg )))
# ==
def mmp_state_from_assy(archive, assy,
initial = False,
use_060213_format = False,
**options): #bruce 060117 prototype-kluge
"""
Return a data-like python object encoding all the undoable state in assy
(or in nE-1 while it's using assy)
(it might contain refs to permanent objects like elements or atomtypes,
and/or contain Numeric arrays)
"""
if use_060213_format:
# [guess as of 060329: initial arg doesn't matter for this
# scanning method, or, we never scan it before fully initialized anyway]
return mmp_state_by_scan(archive, assy, **options)
## return ('scan_whole', mmp_state_by_scan(archive, assy, **options) )
assert 0 # 060301, zapped even the commented-out alternative, 060314
def mmp_state_by_scan(archive, assy, exclude_layers = ()):
#060329/060404 added exclude_layers option
"""
[#doc better:]
Return a big StateSnapshot object listing all the undoable data
reachable from assy, except the data deemed to be in layers
listed in exclude_layers (e.g. atoms and bonds and certain sets of those),
in a data-like form (but including live objrefs), mostly equivalent to
a list of objkey/attr/value triples,
suitable for mashing it back into assy
(i.e. mainly and whatever undoable objs it contains), at a later time.
You can pass exclude_layers = ('atoms') to skip the atom-layer attrs
of Chunks (though not yet of Jigs),
and the entire contents of Atoms & Bonds.
"""
#e misnamed, since other things refer to this as the non-mmp option
scanner = archive.obj_classifier
start_objs = [assy] #k need any others?
viewdict = {}
# kluge: defer collection of view-related objects and discard them,
# but don't bother deferring selection-related objects,
# since for now there aren't any (and looking for them would
# slow us down until atoms are processed separately in a faster way).
childobj_dict = scanner.collect_s_children(
start_objs,
deferred_category_collectors = {'view': viewdict},
exclude_layers = exclude_layers
)
if 0 and env.debug():
print "debug: didn't bother scanning %d view-related objects:" % \
len(viewdict), viewdict.values() # works [060227]; LastView
#e figure out which ones are new or gone? not needed until we're
# differential, unless this is a good place to be sure the gone ones
# are properly killed (and don't use up much RAM). if that change anything
# we might have to redo the scan until nothing is killed, since subobjects
# might die due to this.
state = scanner.collect_state( childobj_dict,
archive.objkey_allocator,
exclude_layers = exclude_layers )
state._childobj_dict = childobj_dict
#060408 this replaces the _lastscan kluge from ~060407
# (still a kluge, but less bad);
# it's used (via a reference stored temporarily in the archive
# by a method-like helper function) by archive.childobj_liveQ,
# and discarded from archive before it becomes invalid,
# and discarded from the state we're returning when its StatePlace
# is asked to give it up by steal_lastsnap, which means it's
# becoming mutable (so _childobj_dict would also become invalid).
return state # a StateSnapshot
# ==
# assy methods, here so reload works
# [but as of 060407, some or all of them should not be assy methods anyway,
# it turns out]
def assy_become_state(self, stateplace, archive):
#e should revise args, but see also the last section
# which uses 'self' a lot [060407 comment]
"""
[self is an assy]
replace our state with some new state (in an undo-private format)
saved earlier by an undo checkpoint,
using archive to interpret it if necessary
"""
#bruce 060117 kluge for non-modular undo;
# should be redesigned to be more sensible
assert isinstance(stateplace, StatePlace), \
"should be a StatePlace: %r" % (stateplace,)
if debug_change_indicators:
print "assy_become_state begin, chg ctrs =", \
archive.assy.all_change_indicators()
try:
assy_become_scanned_state(archive, self, stateplace)
# that either does self.update_parts()
# or doesn't need it done (or both)
except:
#060410 protect against breaking the session (though exceptions in
# there can end up breaking it anyway, a bit more slowly)
###e bring in redmsg code for "see traceback in console",
# in undo_manager.py, do_main_menu_op
###e and generalize that to a helper function to use for
# most of our debug prints
msg = "bug: exception while restoring state after Undo or Redo: "
print_compact_traceback( msg)
self.changed() #k needed? #e not always correct! (if we undo or redo to
# where we saved the file)
#####@@@@@ review after scan_whole 060213
# the following stuff looks like it belongs in some sort of
# _undo_update method for class assy,
# or one of the related kinds of methods,
# like registered undo updaters ####@@@@ [060407 comment]
assert self.part # update_parts was done already
self.o.set_part( self.part) # try to prevent exception in GLPane.py:1637
self.w.win_update() # precaution; the mt_update might be needed
if debug_change_indicators:
print "assy_become_state end, chg ctrs =", \
archive.assy.all_change_indicators()
return
def assy_clear(self): #bruce 060117 draft
"""
[self is an assy]
become empty of undoable state (as if just initialized)
"""
# [note: might be called as assy.clear() or self.clear() in this file.]
self.tree.destroy() # not sure if these work very well yet;
# maybe tolerable until we modularize our state-holding objects
#bruce 060322 comments [MOSTLY WRONG but useful anyway, as explained in next lines]:
# - as I wrote the following, I was thinking that this was an old method of assy, so they might be partly misguided.
# I'm not even sure it's ever called, and if it is, it might not be tantamount to assy.destroy as I think I was assuming.
# - it's unclear whether general semantics of .destroy would condone this destroying their children (as it now does);
# guess: desirable to destroy solely-owned children or (more generally) children all of whose parents are destroyed,
# but not others (e.g. not Node.part unless all its Nodes are -- note, btw, Part.destroy is not a true destroy method in
# semantics).
# - current implem is not enough (re refs from undo archive). Furthermore, letting the archive handle it might be best,
# but OTOH we don't want to depend on their being one, so some more general conventions for destroy (of objs with children)
# might actually be best.
# - ###@@@ why doesn't this destroy self.um if it exists??? guess: oversight; might not matter a lot
# since i think the next one destroys it (need to verify). [MISGUIDED, see above]
self.shelf.destroy()
self.root = None # memory leak?
self.tree = self.shelf = None
self._last_current_selgroup = None # guess, to avoid traceback
#e viewdata?
#e selection?
#e parts?
#e glpane state?
#e effect on MT?
#e selatom?
#e mode?
#e current_movie?
#e filename?
#e modified?
self.changed()
return
# ==
def assy_become_scanned_state(archive, assy, stateplace):
"""
[obs docstring?: stateplace is returned by mmp_state_by_scan,
and is therefore presumably a StateSnapshot or StatePlace object]
"""
# TODO: guess as of 060407: this should someday be an archive method,
# or more specifically a method of AssyUndoArchive,
# with some of the helpers being stateplace or state methods.
assert assy is archive.assy
# in future, maybe an archive can support more than one assy
# at a time (so this will need to be more general); who knows
# note [060407] (obs now): the following mashes *all* attrs, changed or not.
# If we want to make Undo itself faster (as opposed to making checkpoints
# faster), we have to only mash the changed ones (and maybe do only the
# needed updates as well).
# update 060409: The debug_pref else clause tries to do this...
# and is now the official code for this.
if not debug_pref("use differential mash_attrs?", Choice_boolean_True):
# i think it's ok to decide this independently each time...
# NOTE: this case is deprecated; it ought to work, and might be useful
# for debugging, but is no longer tested or supported [060409]
# (it might have problems if the atom.mol = None in differential
# case is bad in this case which needs _nullMol for some reason)
attrdicts = stateplace.get_attrdicts_for_immediate_use_only()
modified = {} # key->obj for objects we modified
# these steps are in separate functions for clarity,
# and so they can be profiled individually
mash_attrs( archive, attrdicts, modified, 'fake invalmols, bug if used')
fix_all_chunk_atomsets( attrdicts, modified)
_call_undo_update( modified)
call_registered_undo_updaters( archive)
final_post_undo_updates( archive)
else:
# NOTE: This is the only tested and supported case as of 060409.
#
# We ask stateplace to pull lastsnap to itself
# (like the above does now) (??)
# but to tell us a smaller set of attrdicts
# containing only objects we need to change.
#
# I think that this revision in the algorithm (i.e. changing it
# to use differential mash_attrs) means that some implicit assumptions
# which are true now, but are only required by some of the code,
# have become required by more of the code -- namely, an assumption
# about the stateplace which has lastsnap being the same one
# that matches the current model state (as it was last mashed
# into or checkpointed out of).
# WARNING: the following has an implicit argument which is
# "which stateplace has the lastsnap". (More info above.)
attrdicts = stateplace.get_attrdicts_relative_to_lastsnap()
# for this new differential scheme to be correct, we had to
# ditch the prior code's support for dflt vals for attrs...
modified = {}
# key->obj for objects we modified and which are still alive
# (the only objs we modify that are not still alive are newly
# dead atoms, whose .molecule we set to None,
# and perhaps newly dead chunks, removing atoms and invalidating
# (see comments in _fix_all_chunk_atomsets_differential))
# these steps are in separate functions for clarity, and so they can be
# profiled individually
## oldmols = {}
# kluge [later: I'm not sure if 'kluge' refers to this assignment,
# or to its being commented out!],
# described elsewhere -- search for 'oldmols' and this date 060409
invalmols = {}
mash_attrs( archive, attrdicts, modified, invalmols,
differential = True )
_fix_all_chunk_atomsets_differential( invalmols)
_call_undo_update( modified)
# it needs to only call it for live objects! so we only pass them.
call_registered_undo_updaters( archive)
final_post_undo_updates( archive)
return # from assy_become_scanned_state
def mash_attrs( archive, attrdicts, modified, invalmols, differential = False ):
#060409 added differential = True support
"""
[private helper for assy_become_scanned_state:]
Mash new (or old) attrvals into live objects, using setattr or
_undo_setattr_;
record which objects are modified in the given dict, using objkey->obj
(this will cover all reachable objects in the entire state,
even if only a few *needed* to be modified,
in the present implem [still true even with differential atom/bond scan,
as of 060406] [NOT TRUE ANYMORE if differential passed; 060409])
@param invalmols: a dictionary mapping id(mol) -> mol, to which we should
add any mol (i.e. any object we find or store in
atom.molecule for some atom we're modifying) from whose
.atoms dict we successfully remove atom, or to whose
.atoms dict we add atom. We assume mol.atoms maps atom.key
to atom. By "atom" we mean an instance of a class whose
_s_undo_specialcase == UNDO_SPECIALCASE_ATOM.
@type invalmols: a mutable dictionary which maps id(mol) -> mol, where
mol can be any object found or stored as atom.molecule,
which has an .atoms dict (i.e. a Chunk).
"""
# use undotted localvars, for faster access in inner loop:
modified_get = modified.get
obj4key = archive.obj4key
classifier = archive.obj_classifier
reset_obj_attrs_to_defaults = classifier.reset_obj_attrs_to_defaults
attrcodes_with_undo_setattr = classifier.attrcodes_with_undo_setattr
from foundation.state_utils import copy_val as copy
# OPTIM: ideally, copy_val might depend on attr (and for some attrs
# is not needed at all), i.e. we should grab one just for each attr
# farther inside this loop, and have a loop variant without it
# (and with the various kinds of setattr/inval replacements too --
# maybe even let each attr have its own loop if it wants,
# with atoms & bonds an extreme case)
for attrcode, dict1 in attrdicts.items():
##e review: items might need to be processed in a specific order
attr, acode = attrcode
might_have_undo_setattr = attrcodes_with_undo_setattr.has_key(attrcode)
#060404; this matters now (for hotspot)
for key, val in dict1.iteritems(): # maps objkey to attrval
obj = modified_get(key)
if obj is None:
# first time we're touching this object -- if differential,
# we might have seen it before but we never got a val other
# than _UNSET_ for it
obj = obj4key[key]
# TODO: optim: for differential it'd be faster to always
# do it this way here; for non-diff this helps us know
# when to reset attrs
if not differential:
modified[key] = obj
##@@ unclear whether we sometimes need this in
# differential case, but probably we do
# [... now i think we don't]
reset_obj_attrs_to_defaults(obj)
# Note: it's important that that didn't mess with attrs
# whose _undo_setattr_xxx we might call, in case that
# wants to see the old value. As an initial kluge,
# we assert this combination never occurs (in other
# code, once per class). When necessary, we'll fix
# that by sophisticating this code. [bruce 060404]
pass
if differential and archive.attrcode_is_Atom_chunk(attrcode):
## was: differential and attrcode == ('molecule', 'Atom'):
attrname = attrcode[0] # this is 'molecule' as of 071114,
# or more generally it's ATOM_CHUNK_ATTRIBUTE_NAME,
# but it's correct to grab it from attrcode this way
_mash_attrs_Atom_chunk(key, obj, attrname, val, modified, invalmols)
continue
if differential and val is _UNSET_:
# this differential case can't support attrs with dflts at all,
# so they're turned off now [060409]
## dflts = classifier.classify_instance(obj).attrcode_defaultvals
continue # because no need to setattr.
# However, we *do* need to do the Atom.molecule kluge
# (above) first (which skips this code via 'continue'
# if it happens). And we probably also need to update
# modified first (as we do before that).
modified[key] = obj # redundant if not differential (nevermind)
val = copy(val)
# TODO: possible future optim: let some attrs declare that this
# copy is not needed for them [MIGHT BE A BIG OPTIM]
if might_have_undo_setattr: # optim: this flag depends on attr name
# note: '_undo_setattr_' is hardcoded in several places
setattr_name = '_undo_setattr_' + attr
# only support attr-specific setattrs for now,
# for speed and subclass simplicity
try:
method = getattr(obj, setattr_name)
# OPTIM: possible future optim:
# store unbound method for this class and attr
except AttributeError:
pass # fall thru, use normal setattr
else:
if obj is _undo_debug_obj:
msg = "undo/redo: %r.%s = %r, using _undo_setattr_" % \
(obj, attr, val)
_undo_debug_message( msg)
try:
method(val, archive) # note: val might be _Bugval
except:
# catch exceptions (so single-object bugs don't break
# Undo for the rest of the session);
# continue with restoring other state,
# or at least other attrs
msg = "exception in %s for %s; continuing: " % \
(setattr_name, safe_repr(obj))
print_compact_traceback( msg) #060410
# possible TODO items:
# - also emit redmsg if not seen_before for this attr
# (or attrcode?) and this undo op;
# - revise all these debug prints to use safe_repr
# - have an outer exception catcher which still protects
# the session, since if this one is uncaught, we get a
# redmsg about bug in undo, and after that, we keep
# getting checkpoint tracebacks;
# see other comments of this date, or referring to
# redmsg, print_compact_traceback, seen_before
continue
# else, or if we fell through:
if obj is _undo_debug_obj:
_undo_debug_message("undo/redo: %r.%s = %r" % (obj, attr, val))
setattr(obj, attr, val) ##k might need revision in case:
# dflt val should be converted to missing val, either as optim
# or as requirement for some attrs (atomtype?) #####@@@@@
# dflt vals were not stored, to save space, so missing keys
# should be detected and something done about them ...
# this is done for objects we modify, but what about reachable
# objs with all default values?
# either look for those on every value we copy and store,
# or make sure every obj has one attr with no default value
# or whose actual value is never its default value.
# This seems true of all our current objs,
# so I'll ignore this issue for now!
# ###k (also this remains unreviewed for issues of
# which objs "exist" and which don't... maybe it only matters
# as space optim...)
# SOMEDAY: check if val is _Bugval, maybe print error message
# or delete it or both (can share code with deleting it if it's
# attr-specific dflt, once we clean up decls so different dflts
# mean different attrdicts) [bruce 060311 comment]
continue
continue
return # from mash_attrs
# ==
def _mash_attrs_Atom_chunk(key, obj, attrname, val, modified, invalmols):
"""
Special case for differential mash_attrs when changing an
Atom.molecule attribute. (Private helper function for mash_attrs.)
@param key: the index of obj in modified. (For more info, see calling code.)
Warning: not the same as obj.key.
@param obj: an atom (i.e. an instance of a class whose _s_undo_specialcase
attribute equals UNDO_SPECIALCASE_ATOM).
@param attrname: 'molecule', or whatever attribute name we change it to.
@param val: the new value we should store into obj.molecule.
@param modified: same as in mash_attrs.
@param invalmols: same as in mash_attrs.
"""
#bruce 071114 split this out of mash_attrs,
# with slight changes to comments and attrname
# (which used to be hardcoded as 'molecule',
# and is still hardcoded as obj.molecule gets and sets,
# and as method names containing 'molecule' in comments).
assert attrname == 'molecule', "oops, you better revise " \
"obj.molecule gets/sets in this function"
assert attrname == ATOM_CHUNK_ATTRIBUTE_NAME # that should match too
# obj is an Atom and we're changing its .molecule.
# This is a kluge to help update mol.atoms for its old and
# new mol.
#
# We might be able to do this with Atom._undo_setattr_molecule
# (though I recall rejecting that approach in the past,
# for non-differential mash_attrs, but I forget why),
# but for efficiency and simplicity we do it with this
# specialcase kluge, which comes in two parts -- this one
# for removal [of atom from old mol, i guess as of 071113],
# and the other one below, findable by searching for the
# ' "add to new mol.atoms" part of the kluge '.
# [comment revised, bruce 071114]
mol = obj.molecule
# mol might be _UNSET_, None, or _nullMol, in which case,
# we should do nothing, or perhaps it might be a real mol
# but with obj not in mol.atoms, which might deserve a
# debug print, but we should still do nothing --
# simplest implem of all this is just to not be bothered
# by not finding .atoms, or obj.key inside it.
try:
del mol.atoms[obj.key]
except:
if mol is not None and mol is not _nullMol and env.debug():
# remove when works!
print_compact_traceback(
"debug fyi: expected exception in del " \
"mol.atoms[obj.key] for mol %r obj %r" % \
(mol, obj) )
pass
else:
# this is only needed if the del succeeds (even if it fails
# for a real mol, due to a bug), since only then did we
# actually change mol
invalmols[id(mol)] = mol
if val is _UNSET_:
# I'm not fully comfortable with leaving invalid mol refs in
# dead atoms, so I make this exception to the general rule
# in this case. (If we later add back
# Atom._undo_setattr_molecule, this would mess it up, but
# hopefully it would take over this kluge's job entirely.)
## val = None -- this caused e.g.
## exception in _undo_update for X21; skipping it:
## exceptions.AttributeError:
## 'NoneType' object has no attribute 'havelist'
## which calls into question calling _undo_update on dead
## objs, or more likely, this one's implem,
## but maybe it's fine (not reviewed now) and this is why
## we have _nullMol, so store that instead of None.
## val = _nullMol
## (Let's hope this can only happen when
## _nullMol is not None!!! Hmm, not true! Fix it here,
## or let _undo_update init _nullMol and store it??
## Neither: store None, and let _undo_update do no
## invals at all in that case. Come to think of it,
## the non-differential mash_attrs probably only called
## _undo_update on live objects!! Should we?
## #####@@@@@ issue on bonds too
val = None
obj.molecule = val
# but we *don't* do modified[key] = obj, since
# we only want that for live objects
if obj is _undo_debug_obj:
msg = "undo/redo: %r.%s = %r, and removed it " \
"from %r.atoms" % \
(obj, attrname, val, mol)
_undo_debug_message( msg)
pass
else:
# this is the "add to new mol.atoms" part of the kluge.
# obj is always a live atom (I think), so val should
# always be a real Chunk with .atoms.
mol = val
obj.molecule = mol
mol.atoms[obj.key] = obj
invalmols[id(mol)] = mol
modified[key] = obj
if obj is _undo_debug_obj:
msg = "undo/redo: %r.%s = %r, and added it " \
"to .atoms" % \
(obj, attrname, mol)
_undo_debug_message( msg)
pass
# this was part of try1 of fixing mol.atoms,
# but it had an unfixable flaw of being non-incremental,
# as well as minor flaws with _UNSET_:
## # kluge to help fix_all_chunk_atomsets in differential case
## oldmols = differential['oldmols'] # part of the kluge
## mol = obj.molecule
## oldmols[id(mol)] = mol
return
# ==
def fix_all_chunk_atomsets( attrdicts, modified):
#060409 NOT called for differential mash_attrs;
# for that see _fix_all_chunk_atomsets_differential
"""
[private helper for assy_become_scanned_state:]
Given the set of live atoms (deduced from attrdicts)
and their .molecule attributes (pointing from each atom to its
owning chunk, which should be a live chunk),
replace each live chunk's .atoms attr (dict from atom.key to atom,
for all its atoms) with a recomputed value, and do all(??)
necessary invals.
WARNING: CURRENT IMPLEM MIGHT ONLY BE CORRECT IF ALL LIVE CHUNKS
HAVE ATOMS. [which might be a bug, not sure ##@@@@]
"""
modified_get = modified.get
#bruce 060314 this turned out not to be useful; leave it as an example
# until analoguous stuff gets created (since the principle of organization
# is correct, except for being nonmodular)
## if 1:
## ####@@@@ KLUGE: somehow we happen to know that we need to process mols_with_invalid_atomsets here,
## # before running any obj._undo_update functions (at least for certain classes).
## # We can know this since we're really (supposed to be) a method on AssyUndoArchive (so we know all about
## # the kinds of data in assys). [bruce 060313]
## for chunk in archive.mols_with_invalid_atomsets.values():
## # chunk had one or more atoms leave it or join it, during the setattrs above
## # (actually, during some _undo_setattr_molecule on those atoms; they kept its .atoms up to date,
## # but saved invals for here, for efficiency)
## chunk.invalidate_atom_lists()
## # this effectively inlines that part of chunk.addatom and delatom not done by atom._undo_setattr_molecule
## # (we could just add chunk into modified (localvar), but this is more efficient (maybe))
# that has replaced .molecule for lots of atoms; need to fix the .atoms dicts of those mols [060314]
## mols = oldmols or {} #060409 kluge [; note, if caller passes {} this uses a copy instead, but that's ok.]
#### if oldmols and env.debug():
#### print "got oldmols = %r" % (oldmols,) #### contains _UNSET_, not sure how that's possible ... i guess from new atoms?? no...
#### # anyway, nevermind, just zap it.
## if oldmols:
## from model.chunk import _nullMol
## for badmol in (None, _nullMol, _UNSET_):
## # I don't know why these can be in there, but at least _nullMol can be; for now just work around it. ###@@@
## if env.debug():
## if mols.has_key(id(badmol)):
## print "debug: zapping %r from oldmols" % (badmol,)
## oldmols.pop(id(badmol),None)
## if env.debug():
## print "debug: repaired oldmols is", oldmols
## pass
## ## mols = {}
mols = {}
molcode = (ATOM_CHUNK_ATTRIBUTE_NAME, 'Atom') # KLUGE; we want attrcode for Atom.molecule;
# instead we should just get the clas for Atom and ask it!
### how will I know if this breaks? debug code for it below
# can't be left in...
assert 0, "this code is no longer valid, since Atom might have subclasses soon (with different names)"
# not worth the effort to port it re those changes,
# since it never runs anymore [bruce 071114]
moldict = attrdicts.get(molcode, {})
# Note: the value {} can happen, when no Chunks (i.e. no live atoms)
# were in the state!
# (this is a dict from atoms' objkeys to their mols,
# so len is number of atoms in state;
# i think they are all *found* atoms but as of 060330
# they might not be all *live* atoms,
# since dead mol._hotspot can be found and preserved.
# [no longer true, 060404])
# warning: moldict is not a dict of mols, it's a dict from
# atom-representatives to those atoms' mols;
# it contains entries for every atom represented in the state
# (i.e. only for our, live atoms, if no bugs)
if 0 and env.debug():
###@@@ disable when works; reenable whenever acode scheme changes
print "debug: len(moldict) == %d; if this is always 0 we have a bug, " \
"but 0 is ok when no atoms in state" % (len(moldict))
for atom_objkey, mol in moldict.iteritems():
mols[id(mol)] = mol
######@@@@@@ 060409 BUG in differential case: mols doesn't include
# chunks that lost atoms but didn't gain any!
# (though moldict does include the atoms which they lost.)
# (could we resolve it by recording diffs in number of atoms per
# chunk, so we'd know when to recompute set of atoms??
# or is it better just to somehow record the old vals of atom.molecule
# that we change in mash_attrs? caller could pass that in.)
# Ok, we're having caller pass in oldmols, for now.
for badmol in (None, _nullMol):
if mols.has_key(id(badmol)):
# should only happen if undoable state contains killed or
# still-being-born atoms; I don't know if it can
# (as of 060317 tentative fix to bug 1633 comment #1, it can --
# _hotspot is S_CHILD but can be killed [wrong now...])
# 060404: this is no longer true, so zap the
# ' and badmol is None:' from the following:
if env.debug():
print "debug: why does some atom in undoable state " \
"have .molecule = %r?" % (badmol,)
del mols[id(badmol)]
# but we also worry below about the atoms that had it
# (might not be needed for _nullMol; very needed for None)
for mol in mols.itervalues():
mol.atoms = {}
for atom_objkey, mol in moldict.iteritems():
if mol not in (None, _nullMol):
atom = modified_get(atom_objkey)
mol.atoms[ atom.key ] = atom # WARNING: this inlines some of
# Chunk.addatom; note: atom.key != atom_objkey
for mol in mols.itervalues():
try: # try/except is just for debugging
mol.invalidate_atom_lists() # inlines some more of Chunk.addatom
except:
msg = "bug: why does this happen, for mol %r atoms %r? " % \
(mol, mol.atoms)
print_compact_traceback( msg)
return # from fix_all_chunk_atomsets
def _fix_all_chunk_atomsets_differential(invalmols):
"""
Fix or properly invalidate the passed chunks, whose .atoms dicts
the caller has directly modified (by adding or removing atoms which entered
or left those chunks) without doing any invalidations or other changes.
@param invalmols: a dict mapping id(mol) -> mol
where mol can be anything found or stored in atom.molecule
which has an .atoms dict of the same kind as Chunk.
(As of 071114, such a mol is always a Chunk.)
"""
for mol in invalmols.itervalues():
if mol is not _nullMol:
# we exclude _nullMol in case it has an invalidate_atom_lists method
# that we shouldn't call
# [implem revised, bruce 071105; should be equivalent to prior code]
try:
method = \
mol._f_invalidate_atom_lists_and_maybe_deallocate_displist
#bruce 071105 changing this to call this new method
# _f_invalidate_atom_lists_and_maybe_deallocate_displist
# rather than just invalidate_atom_lists.
# Unfortunately it has no clear specification in the API
# so we can't give it a general name. Ideally we'd have a
# separate scan to call something like undo_update_dead
# on all newly dead objects, but for now, that's not easy to
# add (since I'd have to analyze the code to figure out
# where), and I only need it for chunks for one purpose.
except:
## if mol is None or mol is _UNSET_:
# not sure which of these can happen;
# not worth finding out right now
if mol is _UNSET_:
# actually i think that's the only one that can happen
print "fyi: mol is _UNSET_ in _fix_all_chunk_atomsets_differential"
#bruce 071114 - caller looks like it prevents this, let's find out
# (if not, fix the docstrings I just added which imply this!)
continue
msg = "bug (or fyi if this is None or _nullMol), " \
"for mol %r: " % (mol,)
print_compact_traceback( msg)
else:
method() # WARNING: inlines some of Chunk.addatom
# I think we can call this on newly dead Chunks;
# I'm not 100% sure that's ok, but I can't see a
# problem in the method and I didn't find a bug in
# testing. [060409]
return
def _call_undo_update(modified):
#060409 for differential mash_attrs, it's safe, but are the objs it's
# called on enough? #####@@@@@
"""
[private helper for assy_become_scanned_state:]
Call the _undo_update method of every object we might have modified and
which is still alive (i.e. which is passed in the <modified> arg),
which has that method.
[Note: caller (in differential mash_attrs case)
presently only modifies dead objects in one special case
(atom.molecule = None), but even if that changes, _undo_update should
only be called on live objects; it might be useful to have other
_undo_xxx methods called on newly dead objects, or on dead ones
being brought back to life (before mash_attrs happens on them). #e]
"""
# OPTIM: two big missing optims:
# - realizing we only modified a few objects, not all of them in the state
# (up to the caller) [this is now implemented as of 060409]
# - knowing that some classes don't define this method,
# and not scanning their instances looking for it
for key, obj in modified.iteritems():
###e zap S_CACHE attrs? or depend on update funcs to
# zap/inval/update them? for now, the latter. Review this later. ###@@@
try:
method = obj._undo_update
##e btw, for chunk, it'll be efficient to know which attrs
# need which updating... which means, track attrs used, now!
##e in fact, it means, do attrs in layers and do the updates
# of objs for each layer. if inval-style, that's fine.
# objs can also reg themselves to be updated again
# in later layers. [need to let them do that]
# not yet sure how to declare attrlayers... maybe
# test-driven devel will show me a simple way.
except AttributeError:
pass
else:
try:
method()
except:
msg = "exception in _undo_update for %s; skipping it: " % \
safe_repr(obj)
print_compact_traceback( msg)
#e should also print once-per-undo-op history message;
# maybe env.seen_before plus a user-op counter
# can be packaged into a once-er-user-op-message
# helper function?
# [060410 suggestion; search for other places to do it]
pass
continue
return # from _call_undo_update
updaters_in_order = []
def call_registered_undo_updaters(archive):
#060409 seems likely to be safe/sufficient for differential mash_attrs,
# but too slow ###@@@
"""
[private helper for assy_become_scanned_state:]
Call the registered undo updaters (on the overall model state, not the ones
on individual changed objects, which should already have been called),
in the proper order.
[#doc more?]
"""
assy = archive.assy
for func in updaters_in_order:
try:
func(archive, assy)
except:
msg = "exception in some registered updater %s; skipping it: " % \
safe_repr(func)
print_compact_traceback( msg)
continue
return # from call_registered_undo_updaters
def final_post_undo_updates(archive):
#060409 seems likely to be safe/sufficient for differential mash_attrs ##k
"""
[private helper for assy_become_scanned_state:]
#doc
"""
assy = archive.assy
#e now inval things in every Part, especially selatoms, selmols,
# molecules, maybe everything it recomputes [###k is this nim or not?]
for node in assy.topnodes_with_own_parts():
node.part._undo_update_always() # KLUGE, since we're supposed to call
# this on every object that defines it
assy.update_parts()
# I thought "we don't need this, since [true] we're about to do it
# as a pre-checkpoint update", but although we are, we need this
# one anyway, since it calls assy.changed() and we have to get that
# over with before restoring the change counters after Undo/Redo.
# [060406]
if 1:
#060302 4:45pm fix some unreported bugs about undo when hover
# highlighting is active -> inappropriate highlighting
win = env.mainwindow()
glpane = win.glpane
glpane.selatom = glpane.selobj = None
# this works; but is there a better way (like some GLPane method)?
# if there is, not sure it's fully safe!
# [there is set_selobj, but its updates might be unsafe for
# this -- not reviewed, but seems likely. -- bruce 060726]
#e Also, ideally glpane should do this itself in
# _undo_update_always, which we should call.
return # from final_post_undo_updates
# ==
# [comment during development:]
# We'll still try to fit into the varid/vers scheme for multiple
# out of order undo/redo, since we think this is highly desirable
# for A7 at least for per-part Undo. But varids are only needed
# for highlevel things with separate ops offered.
# So a diff is just a varid-ver list and a separate operation...
# which can ref shared checkpoints if useful. It can ref both
# cps and do the diff lazily if you want. It's an object,
# which can reveal these things... it has facets... all the
# same needs come up again... maybe it's easier to let the facets
# be flyweight and ref shared larger pieces and just remake themselves?
_cp_counter = 0
class Checkpoint:
"""
Represents a slot to be filled (usually long after object is first created)
with a snapshot of the model's undoable state. API permits snapshot data (self.state) to be
filled in later, or (if implem supports) defined by a diff from the state of another checkpoint.
Some things done in the calling code (esp. fill_checkpoint) will probably be moved into methods of this class eventually.
Self.state will not exist until the state-data is fully defined (unless a future revision supports its being
some sort of lazily-valued expr which indicates a lazily filled diff and a prior checkpoint).
Self.complete is a boolean which is True once the snapshot contents are fully defined. As of 060118 this is
the same as hasattr(self, 'state'), but if self.state can be lazy then it might exist with self.complete still being false.
The main varid_ver might exist right away, but the ones that depend on the difference with prior state won't exist
until the set of differences is known.
Note: it is good to avoid letting a checkpoint contain a reference to its assy or archive or actual model objects,
since those would often be cyclic refs or refs to garbage, and prevent Python from freeing things when it should.
"""
def __init__(self, assy_debug_name = None):
# in the future there might be a larger set of varid_ver pairs based on the data changes, but for now there's
# one main one used for the entire undoable state, with .ver also usable for ordering checkpoints; this will be retained;
# cp.varid will be the same for any two checkpoints that need to be compared (AFAIK).
self.varid = make_assy_varid(assy_debug_name)
global _cp_counter
_cp_counter += 1
self.cp_counter = _cp_counter ###k not yet used; might help sort Redos, but more likely we'll use some metainfo
## self.ver = 'ver-' + `_cp_counter` # this also helps sort Redos
self.ver = None # not yet known (??)
self.complete = False # public for get and set
if debug_undo2:
print "debug_undo2: made cp:", self
return
def store_complete_state(self, state):
self.state = state # public for get and set and hasattr; won't exist at first
self.complete = True
return
def __repr__(self):
self.update_ver_kluge()
if self.complete:
return "<Checkpoint %r varid=%r, ver=%r, state id is %#x>" % (self.cp_counter, self.varid, self.ver, id(self.state))
else:
#e no point yet in hasattr(self, 'state') distinction, since always false as of 060118
assert not hasattr(self, 'state')
return "<Checkpoint %r varid=%r, ver=%r, incomplete state>" % (self.cp_counter, self.varid, self.ver)
pass
def varid_ver(self): ###@@@ this and the attrs it returns will be WRONG when there's >1 varid_ver pair
"""Assuming there is one varid for entire checkpoint, return its varid_ver pair.
Hopefully this API and implem will need revision for A7 since assumption will no longer be true.
"""
self.update_ver_kluge()
if self.ver is None:
if debug_flags.atom_debug:
print "atom_debug: warning, bug?? self.ver is None in varid_ver() for", self ###@@@
return self.varid, self.ver
def update_ver_kluge(self):
try:
mi = self.metainfo # stored by outside code (destined to become a method) when we're finalized
except:
pass
else:
self.ver = mi.assy_change_indicators
return
def next_history_serno(self):#060301
try:
mi = self.metainfo # stored by outside code (destined to become a method) when we're finalized
except:
return -1
else:
return mi.next_history_serno
pass
pass # end of class Checkpoint
class SimpleDiff:
"""
Represent a diff defined as going from checkpoint 0 to checkpoint 1
(in that order, when applied);
also considered to be an operation for applying that diff in that direction.
Also knows whether that direction corresponds to Undo or Redo,
and might know some command_info about the command that adds changes to this diff
(we assume only one command does that, i.e. this is a low-level unmerged diff and commands are fully tracked/checkpointed),
and (maybe, in future) times diff was created and last applied, and more.
Neither checkpoint needs to have its state filled in yet, except for our apply_to method.
Depending on how and when this is used they might also need their varid_ver pairs for indexing.
[Note: error of using cp.state too early, for some checkpoint cp, are detected by that attr not existing yet.]
"""
default_opts = dict()
default_command_info = dict(cmdname = "operation", n_merged_changes = 0)
# client code will directly access/modify self.command_info['n_merged_changes'] [060312], maybe other items;
# but to save ram, we don't do _offered = False or _no_net_change = False, but let client code use .get() [060326]
suppress_storing_undo_redo_ops = False
assert_no_changes = False
destroyed = False
def __init__(self, cp0, cp1, direction = 1, **options):
"""direction +1 means forwards in time (apply diff for redo), -1 means backwards (apply diff for undo),
though either way we have to keep enough info for both redo and undo;
as of 060126 there are no public options, but one private one for use by self.reverse_order().
"""
self.cps = cp0, cp1
self.direction = direction
self.options = options # self.options is for use when making variants of self, like reverse_order
self.opts = dict(self.default_opts)
self.opts.update(options) # self.opts is for extracting actual option values to use (could be done lazily) [not yet used]
self.key = id(self) #e might be better to not be recycled, but this should be ok for now
# self.command_info: make (or privately be passed) a public mutable dict of command metainfo,
# with keys such as cmd_name (#e and history sernos? no, those should be in checkpoint metainfo),
# shared between self and self.reverse_order() [060126]
self.command_info = self.options.get('_command_info_dict', dict(self.default_command_info) )
self.options['_command_info_dict'] = self.command_info # be sure to pass the same shared dict to self.reverse_order()
return
def finalize(self, assy):
####@@@@ 060123 notyetcalled; soon, *this* is what should call fill_checkpoint on self.cps[1]!
# let's finish this, 060210, so it can be tested, then done differently in BetterDiff
"""Caller, which has been letting model changes occur which are meant to be recorded in this diff,
has decided it's time to finalize this diff by associating current model state with self.cps[1]
and recording further changes in a new diff, which we(?) will create and return (or maybe caller will do that
after we're done, not sure; hopefully caller will, and it'll also cause changetracking to go there --
or maybe it needs to have already done that??).
But first we'll store whatever we need to about the current model state, in self and/or self.cps.
So, analyze the low-level change records to make compact diffs or checkpoint saved state,
and record whatever else needs to be recorded... also make up new vers for whatever varids we think changed
(the choice of coarseness of which is a UI issue) and record those in this diff and also in whatever model-related place
records the "current varid_vers" for purposes of making undo/redo available ops for menus.
"""
assert self.direction == 1
#e figure out what changed in model, using scan of it, tracking data already in us, etc
#####@@@@@ call fill_checkpoint
#e make up vers for varids changed
# *this* is what should call fill_checkpoint on self.cps[1]!
def RAM_guess_when_finalized(self): #060323
"""Return a rough guess about the RAM usage to be attributed to keeping this Undoable op on the Undo stack.
This must be called only when this diff has been finalized and is still undoable.
Furthermore, as a temporarily kluge, it might be necessary to call it only once and/or in the proper Undo stack order.
"""
# Current implem is basically one big kluge, starting with non-modularity of finding the DiffObj to ask.
# When the Undo stack becomes a graph (or maybe even per-Part stacks), we'll have to replace this completely.
assert self.direction == -1 # we should be an Undo diff
s1 = self.cps[1].state
s0 = self.cps[0].state
return s0._relative_RAM(s1)
def reverse_order(self):#####@@@@@ what if merged??
return self.__class__(self.cps[1], self.cps[0], - self.direction, **self.options)
def you_have_been_offered(self): #bruce 060326 re bug 1733
"""You (self) have been offered in the UI (or made the Undo toolbutton become enabled),
so don't change your mind and disappear, even if it turns out you get merged
with future ops (due to lack of autocp) and you represent no net changes;
instead, just let this affect your menu_desc. One important reason for this is
that without it, a user hitting Undo could inadvertently Undo the *prior* item
on the Undo stack, since only at that point would it become clear that you contained
no net changes.
"""
self.command_info['_offered'] = True # this alone is not enough. we need a real change counter difference
# or we have no way to get stored and indexed. Well, we have one (for awhile anyway -- that's why we got offered),
# but we also need a real diff, or they'll get reset... we may even need a real but distinct change counter for this
# since in future we might reset some chg counters if their type of diff didn't occur.
# (Or would it be correct to treat this as a model change? no, it would affect file-mod indicator... should it??? yes...)
# What we ended up doing is a special case when deciding how to store the diff... search for _offered to find it.
return
def menu_desc(self):#####@@@@@ need to merge self with more diffs, to do this?? -- yes, redo it inside the MergingDiff ##e
main = self.optype() # "Undo" or "Redo"
include_history_sernos = not not env.prefs[historyMsgSerialNumber_prefs_key] #060309; is this being checked at the right time??
if include_history_sernos:
hist = self.history_serno_desc()
else:
hist = ""
if self.command_info['n_merged_changes']:
# in this case, put "changes" before hist instead of cmdname after it
# (maybe later it'll be e.g. "selection changes")
##e should we also always put hist in parens?...
# now, hist also sees this flag and has special form BUT ONLY WHILE BEING COLLECTED... well, it could use parens always...
##e in case of only one cmd so far, should we not do this different thing at all?
# (not sure, it might help you realize autocp is disabled)
changes_desc = self.changes_desc()
if changes_desc:
main += " %s" % (changes_desc,)
if hist:
# this assumes op_name is present; if not, might be better to remove '.' and maybe use parens around numbers...
# but i think it's always present (not sure) [060301]
main += " %s" % (hist,)
if not self.command_info['n_merged_changes']:
op_name = self.cmdname()
if op_name:
main += " %s" % (op_name,)
else:
# [bruce 060326 re bug 1733:]
# merged changes might end up being a noop, but if they were offered in the UI, we can't retract that
# or we'll have bug 1733, or worse, user will Undo the prior op without meaning to. So special code elsewhere
# detects this, keeps the diff in the undo stack, and stores the following flag:
if self.command_info.get('_no_net_change', False):
main += " (no net change)"
return main
def history_serno_desc(self): #060301; split out 060312
"describe history serno range, if we have one; always return a string, maybe null string; needs to work when last serno unknown"
if self.direction == 1:
start_cp, end_cp = self.cps
else:
end_cp, start_cp = self.cps
s1 = start_cp.next_history_serno()
s2 = end_cp.next_history_serno()
if s2 == -1: #060312 guess for use when current_diff.command_info['n_merged_changes'], and for direction == -1
if not self.command_info['n_merged_changes'] and env.debug():
print "debug: that's weird, command_info['n_merged_changes'] ought to be set in this case"
return "(%d.-now)" % (s1,)
n = s2 - s1
if n < 0:
print "bug in history serno order", s1, s2, self.direction, self
# this 's1,s2 = s2,s1', the only time it happened, just made it more confusing to track down the bug, so zap it [060312]
pass
## n = -n
## s1,s2 = s2,s1
range0 = s1
range1 = s2 - 1
if n == 0:
## hist = ""
#060304 we need to know which history messages it came *between*. Not sure what syntax should be...
# maybe 18 1/2 for some single char that looks like 1/2?
## hist = "%d+" % range1 # Undo 18+ bla... dubious... didn't look understandable.
hist = "(after %d.)" % range1 # Undo (after 18.) bla
if debug_pref("test unicode one-half in Undo menutext", Choice_boolean_False): #060312
hist = u"%d\xbd" % range1 # use unicode "1/2" character instead ( without u"", \xbd error, \x00bd wrong )
elif n == 1:
assert range0 == range1
hist = "%d." % range1
else:
hist = "%d.-%d." % (range0, range1) #bruce 060326 added the first '.' of the two, for uniformity
if self.command_info['n_merged_changes'] and hist and hist[0] != '(' and hist[-1] != ')':
return "(%s)" % (hist,)
return hist
def cmdname(self):
return self.command_info['cmdname']
def changes_desc(self):#060312
return "changes" #e (maybe later it'll be e.g. "selection changes")
def varid_vers(self):#####@@@@@ need to merge self with more diffs, to do this??
"list of varid_ver pairs for indexing"
return [self.cps[0].varid_ver()]
def apply_to(self, archive):###@@@ do we need to merge self with more diffs, too? [see class MergingDiff]
"apply this diff-operation to the given model objects"
cp = self.cps[1]
assert cp.complete
assy = archive.assy
#bruce 060407 revised following call, no longer goes thru an assy method
assy_become_state(assy, cp.state, archive)
###e this could use a print_compact_traceback with redmsg...
# note: in present implem this might effectively redundantly do some of restore_view() [060123]
# [as of 060407 i speculate it doesn't, but sort of by accident, i.e. due to defered category collection of view objs]
cp.metainfo.restore_view(assy)
# note: this means Undo restores view from beginning of undone command,
# and Redo restores view from end of redone command.
#e (Also worry about this when undoing or redoing a chain of commands.)
# POSSIBLE KLUGE: with current [not anymore] implem,
# applying a chain of several diffs could be done by applying the last one.
# The current code might perhaps do this (and thus become wrong in the future)
# when the skipped diffs are empty or have been merged -- I don't know.
# This will need fixing once we're merging any nonempty diffs. ##@@ [060123]
# Update 060407: I don't think this is an issue now (for some time), since we traverse diffs on the stack
# to reach checkpoints, and then tell those "restore the state you saved",
# and those do this by merging a chain of diffs but that's their business.
cp.metainfo.restore_assy_change_indicators(assy) # change current-state varid_vers records
archive.set_last_cp_after_undo(cp) #060301
return
def optype(self):
return {1: "Redo", -1: "Undo"}[self.direction]
def __repr__(self):
if self.destroyed:
return "<destroyed SimpleDiff at %#x>" % id(self) #untested [060301]
return "<%s key %r type %r from %r to %r>" % (self.__class__.__name__, self.key, self.optype(), self.cps[0], self.cps[1])
def destroy(self):
self.destroyed = True
self.command_info = self.opts = self.options = self.cps = None
# no cycles, i think, but do the above just in case
###e now, don't we need to remove ourself from archive.stored_ops?? ###@@@
pass # end of class SimpleDiff
# ==
# commenting out SharedDiffopData and BetterDiff 060301 1210pm
# since it looks likely SimpleDiff will use differentially-defined states without even knowing it,
# just by having states in the checkpoints which know it themselves.
##class SharedDiffopData: ##@@ stub, apparently, as of 060216;....
# ==
_last_real_class_for_name = {}
def undo_classname_for_decls(class1):
"""
Return Undo's concept of a class's name for use in declarations,
given either the class or the name string. For dotted classnames (of builtin objects)
this does not include the '.' even if class1.__name__ does (otherwise the notation
"classname.attrname" would be ambiguous). Note that in its internal object analysis tables,
classnames *do* include the '.' if they have one (if we have any tables of names,
as opposed to tables of classes).
"""
try:
res = class1.__name__ # assume this means it's a class
except AttributeError:
res = class1 # assume this means it's a string
else:
res = res.split('.')[-1] # turn qt.xxx into xxx
assert not '.' in res # should be impossible here
#e should assert it's really a class -- need to distinguish new-style classes here??
_last_real_class_for_name[res] = class1 ##k not sure if this is enough to keep that up to date
assert type(res) == type("name")
assert not '.' in res, 'dotted classnames like %r are not allowed, since then "classname.attrname" would be ambiguous' % (res,)
return res
class _testclass: pass
assert undo_classname_for_decls(_testclass) == "_testclass"
assert undo_classname_for_decls("_testclass") == "_testclass"
_classname_for_nickname = {}
def register_class_nickname(name, class1): # not used as of before 080702, but should be kept
"""
Permit <name>, in addition to class1.__name__ (or class1 itself if it's a string),
to be used in declarations involving class1 to the Undo system.
This should be used only (or mainly) when the actual class name is deprecated and the class is slated
to be renamed to <name> in the future, to permit declarations to use the preferred name in advance.
Internally (for purposes of state decls), the Undo system will still identify all classes by their value of __name__
(or its last component [I think], if __name__ contains a '.' like for some built-in or extension classes).
"""
assert not '.' in name
realname = undo_classname_for_decls(class1)
_classname_for_nickname[name] = realname
return
#e refile, in this file or another? not sure.
def register_undo_updater( func, updates = (), after_update_of = () ):
# Note: as of 060314, this was nim, with its effect kluged elsewhere.
# But a month or two before 071106 that changed; ericm made it active
# in order to remove an import cycle.
"""
Register <func> to be called on 2 args (archive, assy) every time some AssyUndoArchive mashes some
saved state into the live objects of the current state (using setattr) and needs to fix things that might
have been messed up by that or might no longer be consistent.
The optional arguments <updates> and <after_update_of> affect the order in which the registered funcs
are run, as described below. If not given for some func, its order relative to the others is arbitrary
(and there is no way for the others to ask to come before or after it, even if they use those args).
The ordering works as follows: to "fully update an attr" means that Undo has done everything it might
need to do to make the values (and presence or absense) of that attr correct, in all current objects of
that attr's class (including objects Undo needs to create as part of an Undo or Redo operation).
The things it might need to do are, in order: its own setattrs (or delattrs?? ###k) on that attr;
that class's _undo_update function; and any <funcs> registered with this function (register_undo_updater)
which list that attr or its class(??) in their optional <updates> argument (whose syntax is described below).
That defines the work Undo needs to do, and some of the ordering requirements on that work. The only other
requirement is that each registered <func> which listed some attrs or classes in its optional
<after_update_of> argument can't be run until those attrs or classes have been fully updated.
(To "fully update a class", listed in <after_update_of>, means to update every attr in it which has been
declared to Undo, except any attrs listed individually in the <updates> argument. [###k not sure this is right or practical])
(If these requirements introduce a circularity, that's a fatal error we'll detect at runtime.)
The syntax of <updates> and <after_update_of> is a sequence of classes, classnames, or "classname.attrname"
strings. (Attrname strings can't be given without classnames, even if only one class has a declared attribute
of that name.)
Classnames don't include module names (except for builtin objects whose __class__._name__ includes them).
All classes of the same name are considered equivalent. (As of 060223 the system isn't designed
to support multiple classes of the same name, but it won't do anything to stop you from trying. In the future
we might add explicit support for runtime reloading of classes and upgrading of their instances to the new versions
of the same classes.)
Certain classes have nicknames (like 'Chunk' for class molecule,
before it was renamed) which can be used here because they've been
specifically registered as permitted, using register_class_nickname.
[This docstring was written before the code was, and when only one <func> was being registered so far,
so it's subject to revision from encountering reality (or to being out of date if that revision hasn't
happened yet). ###k]
"""
global updaters_in_order
# the following single line implements a simpler registration than
# the one envisioned by the pseudocode, but adequate for current
# usage.
updaters_in_order += [func]
## print "register_undo_updater ought to register %r but it's nim, or maybe only use of the registration is nim" % func
# pseudocode
if "pseudocode":
from utilities.constants import noop
somethingYouHaveToDo = progress_marker = canon = this_bfr_that = noop
task = somethingYouHaveToDo(func)
for name in updates:
name = progress_marker(canon(name), 'target')
this_bfr_that(task, name)
for name in after_update_of:
name = progress_marker(canon(name), 'requirement')
# elsewhere we'll set up "attr (as target) comes before class comes before attr (as requirement)" and maybe
# use different sets of attrs on each side (S_CACHE or not)
this_bfr_that(name, task)
return
""" example:
register_undo_updater( _undo_update_Atom_jigs,
updates = ('Atom.jigs', 'Bond.pi_bond_obj'),
after_update_of = ('Assembly', Node, 'Atom.bonds') # Node also covers its subclasses Chunk and Jig.
# We don't care if Atom is updated except for .bonds, nor whether Bond is updated at all,
# which is good because *we* are presumably a required part of updating both of those classes!
)
"""
# ==
# These three functions go together as one layer of the API for using checkpoints.
# They might be redundant with what's below or above them (most likely above, i.e.
# they probably ought to be archive methods),
# but for now it's clearest to think about them in this separate layer.
def make_empty_checkpoint(assy, cptype = None):
"""
Make a new Checkpoint object, with no state.
Sometime you might want to call fill_checkpoint on it,
though in the future there will be more incremental options.
"""
cp = Checkpoint(assy_debug_name = assy._debug_name)
# makes up cp.ver -- would we ideally do that here, or not?
cp.cptype = cptype #e put this inside constructor? (i think it's always None or 'initial', here)
cp.assy_change_indicators = None # None means they're not yet known
return cp
def current_state(archive, assy, **options):
"""
Return a data-like representation of complete current state of the given assy;
initial flag [ignored as of bfr 060407] means it might be too early (in assy.__init__) to measure this
so just return an "empty state".
On exception while attempting to represent current state, print debug error message
and return None (which is never used as return value on success).
Note [060407 ###k]: this returns a StateSnapshot, not a StatePlace.
"""
try:
#060208 added try/except and this debug_pref
pkey = "simulate one undo checkpoint bug"
if debug_pref("simulate bug in next undo checkpoint", Choice_boolean_False, prefs_key = pkey):
env.prefs[pkey] = False
assert 0, "this simulates a bug in this undo checkpoint"
data = mmp_state_from_assy(archive, assy, **options)
assert isinstance( data, StateSnapshot) ###k [this is just to make sure i am right about it -- i'm not 100% sure it's true - bruce 060407]
except:
print_compact_traceback("bug while determining state for undo checkpoint %r; subsequent undos might crash: " % options )
###@@@ need to improve situation in callers so crash warning is not needed (mark checkpoint as not undoable-to)
# in the meantime, it might not be safe to print a history msg now (or it might - not sure); I think i'll try:
from utilities.Log import redmsg # not sure this is ok to do when this module is imported, tho probably it is
env.history.message(redmsg("Bug: Internal error while storing Undo checkpoint; it might not be safe to Undo to this point."))
data = None
return data
def fill_checkpoint(cp, state, assy): #e later replace calls to this with cp method calls
"""
@type assy: assembly.assembly
"""
if not isinstance(state, StatePlace):
if env.debug():
print "likely bug: not isinstance(state, StatePlace) in fill_cp, for", state #060407
env.change_counter_checkpoint() ###k here?? store it??
assert cp is not None
assert not cp.complete
cp.store_complete_state(state)
# Note: we store assy.all_change_indicators() in two places, cp and cp.metainfo, both of which are still used as of 060227.
# Each of them is used in more than one place in this file, I think (i.e. 4 uses in all, 2 for each).
# This ought to be fixed but I'm not sure how is best, so leaving both places active for now. [bruce 060227]
cp.assy_change_indicators = assy.all_change_indicators() #060121, revised to use all_ 060227
cp.metainfo = checkpoint_metainfo(assy) # also stores redundant assy.all_change_indicators() [see comment above]
# this is only the right time for this info if the checkpoint is filled at the right time.
# We'll assume we fill one for begin and end of every command and every entry/exit into recursive event processing
# and that ought to be enough. Then if several diffs get merged, we have lots of cp's to combine this info from...
# do we also need to save metainfo at specific diff-times *between* checkpoints? Not sure yet -- assume no for now;
# if we need this someday, consider "internal checkpoints" instead, since we might need to split the diffsequence too.
return
# ==
class checkpoint_metainfo:
"""
Hold the metainfo applicable at some moment in the undoable state...
undecided whether one checkpoint and/or diff might have more than one of these.
E.g. for a diff we might have this at start of first command in it, at first and last diff in it, and at end of command;
for checkpoint we might have it for when we finalize it.
Don't hold any ref to assy or glpane itself!
"""
def __init__(self, assy):
"""
@type assy: assembly.assembly
"""
self.set_from(assy) #e might not always happen at init??
def set_from(self, assy):
try:
glpane = assy.o # can fail even at end of assy.__init__, but when it does, assy.w.glpane does too
except:
self.view = "initial view not yet set - stub, will fail if you undo to this point"
if env.debug():#060301 - does this ever happen (i doubt it) ###@@@ never happens; someday analyze why not [060407]
print "debug:", self.view
else:
self.view = glpane.current_view_for_Undo(assy) # Csys object (for now), with an attribute pointing out the current Part
###e should this also save the current mode, considered as part of the view??? [060301]
self.time = time.time()
#e cpu time?
#e glpane.redraw_counter? (sp?)
self.assy_change_indicators = assy.all_change_indicators()
# history serno that will be used next
self.next_history_serno = env.last_history_serno + 1 # [060301]
###e (also worry about transient_msgs queued up, re this)
#e current cmd on whatever stack of those we have? re recursive events if this matters? are ongoing tasks relevant??
#e current part or selgroup or its index [#k i think this is set in current_view_for_Undo]
return
def restore_view(self, assy):
"restore the view & current part from self (called at end of an Undo or Redo command)"
# also selection? _modified? window_caption (using some new method on assy that knows it needs to do that)?
glpane = assy.o
glpane.set_view_for_Undo(assy, self.view)
# doesn't animate, for now -- if it does, do we show structure change before, during, or after?
#e sets current selgroup; doesn't do update_parts; does it (or caller) need to?? ####@@@@
#e caller should do whatever updates are needed due to this (e.g. gl_update)
def restore_assy_change_indicators(self, assy):
#e ... and not say it doesn't if the only change is from a kind that is not normally saved.
if debug_change_indicators:
print "restore_assy_change_indicators begin, chg ctrs =", assy.all_change_indicators()
assy.reset_changed_for_undo( self.assy_change_indicators) # never does env.change_counter_checkpoint() or the other one
if debug_change_indicators:
print "restore_assy_change_indicators end, chg ctrs =", assy.all_change_indicators()
pass
# ==
from idlelib.Delegator import Delegator
# print "Delegator",Delegator,type(Delegator),`Delegator`
class MergingDiff(Delegator): ###@@@ this is in use, but has no effect [as of bfr 060326].
"""
A higher-level diff, consisting of a diff with some merging options
which cause more diffs to be applied with it
"""
# When this actually merges, it needs to override menu_desc & cp's, too. ####@@@@
def __init__(self, diff, flags = None, archive = None):
Delegator.__init__(self, diff) # diff is now self.delegate; all its attrs should be constant since they get set on self too
self.flags = flags
self.archive = archive # this ref is non-cyclic, since this kind of diff doesn't get stored anywhere for a long time
def apply_to(self, archive):
res = self.delegate.apply_to(archive)
# print "now we should apply the diffs we would merge with", self #####@@@@@
return res
# def __del__(self):
# print "bye!" # this proves it's readily being deleted...
pass
# ==
class AssyUndoArchive: # modified from UndoArchive_older and AssyUndoArchive_older # TODO: maybe assy.changed will tell us...
"""
#docstring is in older code... maintains a series (or graph) of checkpoints and diffs connecting them....
At most times, we have one complete ('filled') checkpoint, and a subsequent incomplete one (subject to being modified
by whatever changes other code might next make to the model objects).
Even right after an undo or redo, we'll have a checkpoint
that we just forced the model to agree with, and another one to hold whatever changes the user might make next
(if they do anything other than another immediate undo or redo). (Hopefully we'll know whether that other one has
actually changed or not, but in initial version of this code we might have to guess; maybe assy.changed will tell us.)
If user does an undo, and then wants to change the view before deciding whether to redo, we'd better not make that
destroy their ability to redo! So until we support out-of-order undo/redo and a separate undo stack for view changes
(as it should appear in the UI for this), we won't let view changes count as a "new do" which would eliminate the redo stack.
Each AssyUndoArchive is created by an AssyUndoManager, in a 1 to 1
relationship. An AssyUndoManager is created by an assembly, also
in a 1 to 1 relationship, although the assembly may choose to not
create an AssyUndoManager. So, there is essentially one
AssyUndoArchive per assembly, if it chooses to have one.
"""
next_cp = None
current_diff = None
format_options = dict(use_060213_format = True)
# Note: this is never changed and use_060213_format = False is not supported, but preserve this until we have another format option
# that varies, in case we need one for diffing some attrs in special ways. Note: use_060213_format appears in state_utils.py too.
# [bruce 060314]
copy_val = state_utils.copy_val #060216, might turn out to be a temporary kluge ###@@@
_undo_archive_initialized = False
def __init__(self, assy):
"""
@type assy: assembly.assembly
"""
self.assy = assy # represents all undoable state we cover (this will need review once we support multiple open files)
self.obj_classifier = obj_classifier()
self.objkey_allocator = oka = objkey_allocator()
self.obj4key = oka.obj4key # public attr, maps keys -> objects
####@@@@ does this need to be the same as atom keys? not for now, but maybe yes someday... [060216]
self.stored_ops = {} # map from (varid, ver) pairs to lists of diff-ops that implement undo or redo from them;
# when we support out of order undo in future, this will list each diff in multiple places
# so all applicable diffs will be found when you look for varid_ver pairs representing current state.
# (Not sure if that system will be good enough for permitting enough out-of-order list-modification ops.)
self.subbing_to_changedicts_now = False # whether this was initially False or True wouldn't matter much, I think...
self._changedicts = [] # this gets extended in self._archive_meet_class;
#060404 made this a list of (changedict, ourdict) pairs, not just a list of changedicts
# list of pairs of *external* changedicts we subscribe to -- we are not allowed to directly modify them! --
# and ourdicts for them, the appropriate one of self.all_changed_Atoms or self.all_changed_Bonds)
# (in fact it might be better to just list their cdp's rather than the dicts themselves; also more efficient ##e)
## self.all_changed_objs = {} # this one dict subscribes to all changes on all attrs of all classes of object (for now)
self.all_changed_Atoms = {} # atom.key -> atom, for all changed Atoms (all attrs lumped together; this could be changed)
self.all_changed_Bonds = {} # id(bond) -> bond, for all changed Bonds (all attrs)
self.ourdicts = (self.all_changed_Atoms, self.all_changed_Bonds,) #e use this more
# rest of init is done later, by self.initial_checkpoint, when caller is more ready [060223]
###e not sure were really initialized enough to return... we'll see
return
def sub_or_unsub_changedicts(self, subQ): #060329, rewritten 060330
if self.subbing_to_changedicts_now != subQ:
del self.subbing_to_changedicts_now # we'll set it to a new value (subQ) at the end;
# it's a bug if something wants to know it during this method, so the temporary del is to detect that
ourdicts = self.ourdicts
if subQ:
for ourdict in ourdicts:
if ourdict:
print "bug: archive's changedict should have been empty but contains:", ourdict
cd_ods = self._changedicts[:]
for changedict, ourdict in cd_ods:
self.sub_or_unsub_to_one_changedict(subQ, changedict, ourdict)
continue
#e other things to do in some other method with each changedict:
# or update?
# cdp.process_changes()
if not subQ:
for ourdict in ourdicts:
ourdict.clear() # not sure we need this, but if we're unsubbing we're missing future changes
# so we might as well miss the ones we were already told about (might help to free old objects?)
assert map(id, cd_ods) == map(id, self._changedicts) # since new cds during that loop are not supported properly by it
# note, this is comparing ids of tuples, but since the point is that we didn't change _changedicts,
# that should be ok. We can't use == since when we get to the dicts in the tuples, we have to compare ids.
self.subbing_to_changedicts_now = subQ
return
def sub_or_unsub_to_one_changedict(self, subQ, changedict, ourdict):
subkey = id(self)
cdp = changedicts._cdproc_for_dictid[id(changedict)]
if subQ:
cdp.subscribe(subkey, ourdict)
else:
cdp.unsubscribe(subkey)
return
def _clear(self):
"""
[private helper method for self.clear_undo_stack()]
Clear our main data stores, which are set up in __init__,
and everything referring to undoable objects or objkeys.
(But don't clear our obj_classifier.)
Then take an initial checkpoint of all reachable data.
"""
self.current_diff.destroy()
self.current_diff = None
self.next_cp = None
self.stored_ops = {}
self.objkey_allocator.clear() # after this, all existing keys (in diffs or checkpoints) are nonsense...
# ... so we'd better get rid of them (above and here):
self._undo_archive_initialized = False
self.initial_checkpoint() # should we clean up the code by making this the only way to call initial_checkpoint?
return
def initial_checkpoint(self):
"""
Make an initial checkpoint, which consists mainly of
taking a complete snapshot of all undoable state
reachable from self.assy.
"""
# WARNING: this is called twice by our undo_manager's clear_undo_stack
# method (though only once by self's method of that name)
# the first time it runs, but only once each subsequent time.
# (The first call is direct, when the undo_manager is not initialized;
# the second call always occurs, via self.clear_undo_stack, calling
# self._clear, which calls this method.)
#
# If the assy has lots of data by that time, it will thus be fully
# scanned twice, which is slow.
#
# Older comments imply that this situation arose unintentionally
# and was not realized at first. In principle, two calls of this are not
# needed, but in practice, it's not obvious how to safely remove
# one of them. See the clear_undo_stack docstring for advice about
# not wasting runtime due to this, which is now followed in our
# callers. [bruce 080229 comment]
assert not self._undo_archive_initialized
assy = self.assy
cp = make_empty_checkpoint(assy, 'initial') # initial checkpoint
#e the next three lines are similar to some in self.checkpoint --
# should we make self.set_last_cp() to do part of them? compare to do_op's effect on that, when we code that. [060118] [060301 see also set_last_cp_after_undo]
cursnap = current_state(self, assy, initial = True, **self.format_options)
# initial = True is ignored; obs cmt: it's a kluge to pass initial; revise to detect this in assy itself
state = StatePlace(cursnap) #####k this is the central fix of the initial-state kluge [060407]
fill_checkpoint(cp, state, assy)
if self.pref_report_checkpoints():
self.debug_histmessage("(initial checkpoint: %r)" % cp)
self.last_cp = self.initial_cp = cp
## self.last_cp_arrival_reason = 'initial' # why we got to the situation of model state agreeing with this, when last we did
#e note, self.last_cp will be augmented by a desc of varid_vers pairs about cur state;
# but for out of order redo, we get to old varid_vers pairs but new cp's; maybe there's a map from one to the other...
###k was this part of UndoManager in old code scheme? i think it was grabbed out of actual model objects in UndoManager.
self.sub_or_unsub_changedicts(False) # in case we've been called before (kluge)
self._changedicts = [] # ditto
self.sub_or_unsub_changedicts(True)
self.setup_changedicts() # do this after sub_or_unsub, to test its system for hearing about redefined classes later [060330]
self._undo_archive_initialized = True # should come before _setup_next_cp
self._setup_next_cp() # don't know cptype yet (I hope it's 'begin_cmd'; should we say that to the call? #k)
## self.notify_observers() # current API doesn't permit this to do anything during __init__, since subs is untouched then
return
def setup_changedicts(self):
assert not self._changedicts, "somehow setup_changedicts got called twice, since we already have some, "\
"and calling code didn't kluge this to be ok like it does in initial_checkpoint in case it's called from self._clear"
register_postinit_object( '_archive_meet_class', self )
# this means we are ready to receive callbacks (now and later) on self._archive_meet_class,
# telling us about new classes whose instances we might want to changetrack
return
def _archive_meet_class(self, class1):
"""
[private]
class1 is a class whose instances we might want to changetrack.
Learn how, and subscribe to the necessary changedicts.
"""
### TODO: if we've been destroyed, we ought to raise an exception
# to get us removed from the pairmatcher that calls this method --
# maybe a special exception that's not an error in its eyes.
# In current code, instead, we'll raise AttributeError on _changedicts
# or maybe on its extend method. [I ought to verify this is working.]
changedicts0 = changedicts._changedicts_for_classid[ id(class1) ]
# maps name to dict, but names are only unique per-class
changedicts_list = changedicts0.values() # or .items()?
# someday we'll want to use the dictnames, i think...
# for now we don't need them
## self._changedicts.extend( changedicts_list ) -- from when ourdict
## was not in there (zap this commented out line soon)
ourdicts = {UNDO_SPECIALCASE_ATOM: self.all_changed_Atoms,
UNDO_SPECIALCASE_BOND: self.all_changed_Bonds}
# note: this is a dict, but self.ourdicts is a list
## specialcase_type = class1.__name__
## assert specialcase_type in ('Atom', 'Bond')
## # only those two classes are supported, for now
# Each changetracked class has to be handled by specialcase code
# of a type we recognize. For now there are two types, for use by
# subclasses of Atom & Bond.
# [Before 071114 only those two exact classes were supported.]
#
# This also tells us which specific set of registered changedicts
# to monitor for changes to instances of that class.
# (In the future we might just ask the class for those changedicts.)
assert hasattr(class1, '_s_undo_specialcase')
specialcase_type = class1._s_undo_specialcase
assert specialcase_type in (UNDO_SPECIALCASE_ATOM,
UNDO_SPECIALCASE_BOND)
ourdict = ourdicts[specialcase_type]
for cd in changedicts_list:
self._changedicts.append( (cd, ourdict) )
# no reason to ever forget about changedicts, I think
# (if this gets inefficient, it's only for developers who often
# reload code modules -- I think; review this someday ##k)
if self.subbing_to_changedicts_now:
for name, changedict in changedicts0.items():
del name
self.sub_or_unsub_to_one_changedict(True, changedict, ourdict)
#e Someday, also pass its name, so sub-implem can know what
# we think about changes in it? Maybe; but more likely,
# ourdict already was chosen using the name, if name needs
# to be used at all.
continue
# optimization: ask these classes for their state_attrs decls now,
# so later inner loops can assume the attrdicts exist.
###e WARNING: this won't be enough to handle new classes created at
# runtime, while this archive continues to be in use,
# if they contain _s_attr decls for new attrs! For that, we'd also
# need to add the new attrs to existing attrdicts in state or diff
# objects (perhaps lazily as we encounter them, by testing
# len(attrdicts) (?) or some version counter).
self.obj_classifier.classify_class( class1)
return
def childobj_oursQ(self, obj):
"""
Is the given object (allowed to be an arbitrary Python object,
including None, a list, etc)
known to be one of *our* undoable state-holding objects?
(Also True in present implem if obj has ever been one of ours;
this needs review if this distinction ever matters,
i.e. if objs once ours can later become not ours,
without being destroyed.)
WARNING: this is intended only for use on child-scanned
(non-change-tracked) objects
(i.e. all but Atoms or Bonds, as of 060406);
it's not reliable when used on change-tracked objects when deciding
for the first time whether to consider them ours; instead, as part
of that decision, they might query this for child-scanned objects
which own them, e.g. atoms might query it for atom.molecule.
"""
#e possible optim: store self.objkey_allocator._key4obj.has_key
# as a private attr of self
return self.objkey_allocator._key4obj.has_key(id(obj))
def new_Atom_oursQ(self, atom): #060405; rewritten 060406
"""
Is a newly seen Atom object one of ours?
[it's being seen in a changed-object set;
the implem might assume that, I'm not sure]
"""
#e we might optim by also requiring it to be alive;
# review callers if this might be important
##e maybe we should be calling a private Atom method for this;
# not sure, since we'd need to pass self for self.childobj_oursQ
return self.childobj_oursQ( atom.molecule )
# should be correct even if atom.molecule is None or _nullMol
# (says False then)
def new_Bond_oursQ(self, bond): #060405
"""
Is a newly seen Bond object one of ours?
(optional: also ok to return False if it's not alive)
"""
##e maybe we should be calling a private Bond method for this
if not self.trackedobj_liveQ(bond):
return False # seems reasonable (for any kind of object)...
# should review/doc/comment in caller ###e
# motivation for the above:
# surviving that test implies a1 & a2 are not None,
# and bond is in a1.bonds etc
a1 = bond.atom1
a2 = bond.atom2
return self.new_Atom_oursQ(a1) or self.new_Atom_oursQ(a2)
# Notes:
# - either of these conditions ought to imply the other one;
# 'or' is mainly used to make bugs more likely noticed;
# I'm not sure which of 'or' and 'and' is more robust
# (if no bugs, they're equivalent, and so is using only one cond).
# ONCE THIS WORKS, I SHOULD PROBABLY CHANGE FROM 'or' TO 'and'
# (and retest) FOR ROBUSTNESS. ######@@@@@@
# - the atoms might not be "new", but (for a new bond of ours)
# they're new enough for that method to work.
def attrcode_is_Atom_chunk(self, attrcode): #bruce 071114
"""
Say whether an attrcode should be treated as Atom.molecule,
or more precisely, as pointing to the owning chunk
of an "atom", i.e. an instance of a class whose
_s_undo_specialcase attribute is UNDO_SPECIALCASE_ATOM.
"""
res = self.obj_classifier.dict_of_all_Atom_chunk_attrcodes.has_key(attrcode)
# make sure this is equivalent to the old hardcoded version (temporary):
assert res == (attrcode == (ATOM_CHUNK_ATTRIBUTE_NAME, 'Atom'))
# remove when works -- will become wrong when we add Atom subclasses
return res
def childobj_liveQ(self, obj):
"""
Is the given object (allowed to be an arbitrary Python object,
including None, a list, etc) (?? or assumed ourQ??)
a live child-scanned object in the last-scanned state
(still being scanned, in fact), assuming it's a child-scanned object?
WARNING: it's legal to call this for any object, but for a
non-child-scanned but undoable-state-holding object
which is one of ours (i.e. an Atom or Bond as of 060406),
the return value is undefined.
WARNING: it's not legal to call this except during a portion of
scate-scanning which briefly sets self._childobj_dict
to a correct value, which is a dict from id(obj) -> obj for all
live child objects as detected earlier in the scan.
[#e Note: if we generalize the scanning to have ordered layers
of objects, then this dict might keep accumulating newly found
live objects (during a single scan), so that each layer can use
it to know which objects in the prior layer are alive.
Right now we use it that way but with two hardcoded layers,
"child objects" and "change-tracked objects".]
"""
# Note: this rewritten implem is still a kluge, but not as bad
# as ~060407's. self._childobj_dict is stored by caller (several call
# levels up), then later set to None before it becomes invalid.
# [060408]
return self._childobj_dict.has_key(id(obj))
def trackedobj_liveQ(self, obj):
"""
Assuming obj is a legitimate object of a class we change-track
(but not necessarily that we're the archive that tracks that instance,
or that any archive does),
but *not* requiring that we've already allocated an objkey for it
(even if it's definitely ours),
then ask its class-specific implem of _undo_aliveQ to do the following:
- if it's one of ours, return whether it's alive
(in the sense of being part of the *current* state of the model
handled by this archive, where "current" means according to the
archive (might differ from model if archive is performing an undo
to old state(??)));
- if it's not one of ours, return either False, or whether it's alive
in its own archive's model (if it has one)
(which of these to do is up to the class-specific implems,
and our callers must tolerate either behavior).
Never raise an exception; on errors, print message and return False.
###e maybe doc more from Atom._undo_aliveQ docstring?
Note: we can assume that the caller is presently performing either a
checkpoint, or a move to an old checkpoint,
and that self knows the liveQ status of all non-tracked objects,
and is askable via childobj_liveQ.
(#e Someday we might generalize this so there is some order among
classes, and then we can assume it knows the liveQ status of all
instances of prior classes in the order.)
"""
try:
return obj._undo_aliveQ(self)
except:
msg = "exception in _undo_aliveQ for %s; assuming dead: " % \
safe_repr(obj)
print_compact_traceback( msg)
return False
def get_and_clear_changed_objs(self, want_retval = True):
"""
Clear, and (unless want_retval is false) return copies of,
the changed-atoms dict (key -> atom) and changed-bonds dict (id -> bond).
"""
for changedict, ourdict_junk in self._changedicts:
cdp = changedicts._cdproc_for_dictid[id(changedict)]
cdp.process_changes() # this is needed to add the latest changes to our own local changedict(s)
if want_retval:
res = dict(self.all_changed_Atoms), dict(self.all_changed_Bonds) #e should generalize to a definite-order list, or name->dict
else:
res = None
self.all_changed_Atoms.clear()
self.all_changed_Bonds.clear()
return res
def destroy(self): #060126 precaution
"""
free storage, make doing of our ops illegal
(noop with bug warning; or maybe just exception)
"""
if self.pref_report_checkpoints():
self.debug_histmessage("(destroying: %r)" % self)
self.sub_or_unsub_changedicts(False)
# this would be wrong, someone else might be using them!
##for cd,odjunk in self._changedicts:
## cd.clear()
# it's right to clear our own, but that's done in the method we just called.
del self._changedicts
self.next_cp = self.last_cp = self.initial_cp = None
self.assy = None
self.stored_ops = {} #e more, if it can contain any cycles -- design was that it wouldn't, but true situation not reviewed lately [060301]
self.current_diff = None #e destroy it first?
self.objkey_allocator.destroy()
self.objkey_allocator = None
return
def __repr__(self):
return "<AssyUndoArchive at %#x for %r>" % (id(self), self.assy) # if destroyed, assy will be None
# ==
def _setup_next_cp(self):
"""
[private method, mainly for begin_cmd_checkpoint:]
self.last_cp is set; make (incomplete) self.next_cp, and self.current_diff to go between them.
Index it... actually we probably can't fully index it yet if that depends on its state-subset vers.... #####@@@@@
"""
assert self._undo_archive_initialized
assert self.next_cp is None
self.next_cp = make_empty_checkpoint(self.assy) # note: we never know cptype yet, tho we might know what we hope it is...
self.current_diff = SimpleDiff(self.last_cp, self.next_cp) # this is a slot for the actual diff, whose content is not yet known
# this attr is where change-trackers would find it to tell it what changed (once we have any)
# assume it's too early for indexing this, or needing to -- that's done when it's finalized
return
def set_last_cp_after_undo(self, cp): # 060301
"""
We're doing an Undo or Redo command which has just caused actual current state (and its change_counters) to equal cp.state.
Therefore, self.last_cp is no longer relevant, and that attr should be set to cp so further changes are measured relative to that
(and self.current_diff and next_cp should be freshly made, forking off from it -- or maybe existing self.next_cp can be recycled for this;
also we might need some of the flags in self.current_diff... but not the changes, I think, which either don't exist yet or are
caused by tracking changes applied by the Undo op itself).
(Further modelstate changes by this same Undo/Redo command are not expected and might cause bugs -- if they result in a diff,
it would be a separate undoable diff and would prevent Redo from being available. So we might want to assert that doesn't happen,
but if such changes *do* happen it's the logically correct consequence, so we don't want to try to alter that consequence.)
"""
assert self._undo_archive_initialized
assert self.last_cp is not None
assert self.next_cp is not None
assert self.current_diff is not None
assert cp is not None
assert self.last_cp is not cp # since it makes no sense if it is, though it ought to work within this routine
# Clear the changed-object sets (maintained by change-tracking, for Atoms & Bonds), since whatever's in them will just slow down
# the next checkpoint, but nothing needs to be in them since we're assuming the model state and archived state are identical
# (caller has just stored archived state into model). (This should remove a big slowdown of the first operation after Undo or Redo.) [bruce 060407]
self.clear_changed_object_sets()
# not sure this is right, but it's simplest that could work, plus some attempts to clean up unused objects:
self.current_diff.destroy() # just to save memory; might not be needed (probably refdecr would take care of it) since no ops stored from it yet
self.last_cp.end_of_undo_chain_for_awhile = True # not used by anything, but might help with debugging someday; "for awhile" because we might Redo to it
self.last_cp = cp
###@@@ we might need to mark cp for later freeing of its old Redo stack if we depart from it other than by another immediate Undo or Redo...
# tho since we might depart from somewhere else on that stack, never mind, we should instead
# just notice the departure and find the stuff to free at that time.
# next_cp can be recycled since it's presently empty, I think, or if not, it might know something useful ####@@@@ REVIEW
self.current_diff = SimpleDiff(self.last_cp, self.next_cp)
self.current_diff.assert_no_changes = True # fyi: used where we check assy_change_indicators
return
def clear_changed_object_sets(self): #060407
self.get_and_clear_changed_objs(want_retval = False)
def clear_undo_stack(self): #bruce 060126 to help fix bug 1398 (open file left something on Undo stack) [060304 removed *args, **kws]
# note: see also: comments in self.initial_checkpoint,
# and in undo_manager.clear_undo_stack
assert self._undo_archive_initialized
# note: the same-named method in undo_manager instead calls
# initial_checkpoint the first time
if self.current_diff: #k probably always true; definitely required for it to be safe to do what follows.
self.current_diff.suppress_storing_undo_redo_ops = True # note: this is useless if the current diff turns out to be empty.
if 1: #060304 try to actually free storage; I think this will work even with differential checkpoints...
self._clear()
## #obs comments? [as of 060304]
## #e It might be nice to also free storage for all prior checkpoints and diffs
## # (by erasing them from stored_ops and from any other refs to them),
## # but I'm worried about messing up too many things
## # (since this runs from inside some command whose diff-recording is ongoing).
## # If we decide to do that, do it after the other stuff here, to all cp's/diffs before last_cp. #e
## # This shouldn't make much difference in practice
## # (for existing calls of this routine, from file open & file close), so don't bother for now.
## #
## # In order to only suppress changes before this method is called, rather than subsequent ones too,
## # we also do a special kind of checkpoint here.
## self.checkpoint( cptype = "clear")
## #
## self.initial_cp = self.last_cp # (as of 060126 this only affects debug_undo2 prints)
pass
return
def pref_report_checkpoints(self): #bruce 060127 revised meaning and menutext, same prefs key
"""
whether to report all checkpoints which see any changes from the prior one
"""
res = debug_pref("undo/report changed checkpoints", Choice_boolean_False,
prefs_key = "_debug_pref_key:" + "undo/report all checkpoints")
return res
def debug_histmessage(self, msg):
env.history.message(msg, quote_html = True, color = 'gray')
def update_before_checkpoint(self): # see if this shows up in any profiles [split this out 060309]
"#doc"
##e these need splitting out (and made registerable) as "pre-checkpoint updaters"... note, they can change things,
# ie update change_counters, but that ought to be ok, as long as they can't ask for a recursive checkpoint,
# which they won't since only UI event processors ever ask for those. [060301]
self.assy.update_parts() # make sure assy has already processed changes (and assy.changed has been called if it will be)
#bruce 060127, to fix bug 1406 [definitely needed for 'end...' cptype; not sure about begin, clear]
# note: this has been revised from initial fix committed for bug 1406, which was in assembly.py and
# was only for begin and end cp's (but was active even if undo autocp pref was off).
#bruce 060313: we no longer need to update mol.atpos for every chunk. See comments in chunk.py docstring about atom.index.
# [removed commented-out code for it, 060314]
# this was never tested -- it would be needed for _s_attr__hotspot,
# but I changed to _s_attr_hotspot and _undo_setattr_hotspot instead [060404]
## chunks = self.assy.allNodes(Chunk) # note: this covers all Parts, whereas assy.molecules only covers the current Part
## for chunk in chunks:
## chunk.hotspot # make sure _hotpot is valid (not killed) or None [060404]
## # this is required for Undo Atom change-tracking to work properly, so that dead atoms (as value of _hotspot)
## # never need to be part of the undoable state.
## # (Needless to say, this should be made more modular by being somehow declared in class Chunk,
## # perhaps giving it a special _undo_getattr__hotspot or so (though support for that is nim, i think). ###e)
return
def checkpoint(self, cptype = None, cmdname_for_debug = "", merge_with_future = False ): # called from undo_manager
"""
When this is called, self.last_cp should be complete, and self.next_cp should be incomplete,
with its state defined as equalling the current state, i.e. as a diff (which is collecting current changes) from last_cp,
with that diff being stored in self.current_diff.
And this diff might or might not be presently empty. (#doc distinction between changes we record but don't count as nonempty,
like current view, vs those we undo but would not save in a file (selection), vs others.)
We should finalize self.next_cp with the current state, perhaps optimizing this if its defining diff is empty,
and then shift next_cp into last_cp and a newly created checkpoint into next_cp, recreating a situation like the initial one.
In other words, make sure the current model state gets stored as a possible point to undo or redo to.
Note [060301]: similar but different cp-shifting might occur when an Undo or Redo command is actually done.
Thus, if this is the end-checkpoint after an Undo command, it might find last_cp being the cp "undone to" by that command
(guess as of 060301 1159am, nim #k ###@@@).
"""
if not self._undo_archive_initialized:
if env.debug():
print_compact_stack("debug note: undo_archive not yet initialized (maybe not an error)")
return
## if 0: # bug 1440 debug code 060320, and 1747 060323
## print_compact_stack("undo cp, merge=%r: " % merge_with_future)
if not merge_with_future:
#060312 added 'not merge_with_future' cond as an optim; seems ok even if this would set change_counters,
# since if it needs to run, we presume the user ops will run it on their own soon enough and do another
# checkpoint.
self.update_before_checkpoint()
assert cptype
assert self.last_cp is not None
assert self.next_cp is not None
assert self.current_diff is not None
# Finalize self.next_cp -- details of this code probably belong at a lower level related to fill_checkpoint #e
###e specifically, this needs moving into the new method (to be called from here)
## self.current_diff.finalize(...)
# as of sometime before 060309, use_diff is only still an option for debugging/testing purposes:
use_diff = debug_pref("use differential undo?", Choice_boolean_True, prefs_key = 'A7-devel/differential undo')
## , non_debug = True) #bruce 060302 removed non_debug
# it works, 122p 060301, so making it default True and non_debug.
# (It was supposed to traceback when undoing to initial_state, but it didn't,
# so I'm "not looking that gift horse in the mouth" right now. ###@@@)
#k see also comments mentioning 'differential'
# maybe i should clean up the following code sometime...
debug3 = 0 and env.debug() # for now [060301] if 0 060302; this is here (not at top of file) so runtime env.debug() affects it
if debug3:
print "\ncp", self.assy.all_change_indicators(), env.last_history_serno + 1
if merge_with_future:
#060309, partly nim; 060312 late night, might be done!
if 0 and env.debug():
print "debug: skipping undo checkpoint"
### Here's the plan [060309 855pm]:
# in this case, used when auto-checkpointing is disabled, it's almost like not doing a checkpoint,
# except for a few things:
# + we update_parts (etc) (done above) to make sure change counters are accurate
# (#e optim: let caller flag say if that's needed)
# - we make sure that change counter info (or whatever else is needed)
# will be available to caller's remake_UI_menuitems method
# - that might include what to say in Undo, like "undo to what history serno for checkpoint or disabling", or in Redo
# - #e someday the undo text might include info on types of changes, based on which change counters got changed
# - ideally we do that by making current_diff know about it... not sure if it gets stored but still remains current...
# if so, and if it keeps getting updated in terms of end change counters, do we keep removing it and restoring it???
# or can we store it on a "symbolic index" meaning "current counters, whatever they may be"??? ###
# - but i think a simpler way is just to decide here what those menu items should be, and store it specially, for undo,
# and just trash redo stack on real change but let the ui use the stored_ops in usual way otherwise, for Redo.
# Note that Redo UI won't be updated properly until we implement trashing the redo stack, in that case!
# - we trash the redo stack based on what changed, same as usual (nim, this case and other case, as of 060309 852p).
if self.last_cp.assy_change_indicators == self.assy.all_change_indicators():
pass # nothing new; Undo being enabled can be as normal, based on last_cp #####@@@@@
else:
# a change -- Undo should from now on always be enabled, and should say "to last manual cp" or
# "to when we disabled autocp"
####k Note: we need to only pay attention to changes that should be undoable, not e.g. view changes,
# in the counters used above for these tests! Otherwise we'd enable Undo action
# and destroy redo stack when we shouldn't. I think we're ok on this for now, but only because
# calls to the view change counter are nim. ####@@@@ [060312 comment]
if destroy_bypassed_redos:
self.clear_redo_stack( from_cp = self.last_cp, except_diff = self.current_diff ) # it's partly nim as of 060309
# this is needed both to save RAM and (only in this merge_with_future case) to disable the Redo menu item
# set some flags about that, incl text based on what kind of thing has changed
self.current_diff.command_info['n_merged_changes'] += 1 # this affects find_undoredos [060312 10:16pm]
# starts out at 0 for all diffs; never set unless diff w/ changes sticks around for more
####@@@@ this number will be too big until we store those different change_counters and compare to *them* instead;
# just store them right now, and comparison above knows to look for them since n_merged_changes > 0;
# but there's no hurry since the menu text is fine as it is w/o saying how many cmds ran ###e
##e this would be a good time to stash the old cmdname in the current_diff.command_info(sp?)
# and keep the list of all of them which contributed to the changes,
# for possible use in coming up with menu_desc for merged changes
# (though change_counters might be just as good or better)
pass
else:
if 0 and env.debug():
print "debug: NOT skipping undo checkpoint" # can help debug autocp/manualcp issues [060312]
# entire rest of method
if self.last_cp.assy_change_indicators == self.assy.all_change_indicators():
# no change in state; we still keep next_cp (in case ctype or other metainfo different) but reuse state...
# in future we'll still need to save current view or selection in case that changed and mmpstate didn't ####@@@@
if debug_undo2 or debug3:
print "checkpoint type %r with no change in state" % cptype, self.assy.all_change_indicators(), env.last_history_serno + 1
if env.last_history_serno + 1 != self.last_cp.next_history_serno():
print "suspicious: env.last_history_serno + 1 (%d) != self.last_cp.next_history_serno() (%d)" % \
( env.last_history_serno + 1 , self.last_cp.next_history_serno() )
really_changed = False
state = self.last_cp.state
#060301 808p part of bugfix for "bug 3" [remove this cmt in a few days];
# necessary when use_diff, no effect when not
else:
# possible change in state;
# false positives are not common enough to optimize for, but common enough to try to avoid/workaround bugs in
## assert not self.current_diff.assert_no_changes...
# note, its failure might no longer indicate a bug if we have scripting and a script can say,
# inside one undoable operation, "undo to point P, then do op X".
if self.current_diff.assert_no_changes: #060301
msg = "apparent bug in Undo: self.current_diff.assert_no_changes is true, but change_counters were changed "\
"from %r to %r" % (self.last_cp.assy_change_indicators, self.assy.all_change_indicators())
# we don't yet know if there's a real diff, so if this happens, we might move it later down, inside 'if really_changed'.
print msg
from utilities.Log import redmsg # not sure this is ok to do when this module is imported, tho probably it is
env.history.message(redmsg(msg))
if debug_undo2:
print "checkpoint %r at change %r, last cp was at %r" % (cptype, \
self.assy.all_change_indicators(), self.last_cp.assy_change_indicators)
if not use_diff:
# note [060407]: this old code predates StatePlace, and hasn't been tested recently, so today's initial-state cleanup
# might break it, since current_state really returns cursnap, a StateSnapshot, but last_cp.state will be a StatePlace,
# unless something about 'not use_diff' makes it be a StateSnapshot, which is possible and probably makes sense.
# But a lot of new asserts of isinstance(state, StatePlace) will probably break in that case,
# so reviving the 'not use_diff' (as a debug_pref option) might be useful for debugging but would be a bit of a job.
state = current_state(self, self.assy, **self.format_options) ######@@@@@@ need to optim when only some change_counters changed!
really_changed = (state != self.last_cp.state) # this calls StateSnapshot.__ne__ (which calls __eq__) [060227]
if not really_changed and env.debug():
print "debug: note: detected lack of really_changed using (state != self.last_cp.state)"
###@@@ remove when works and no bugs then
else:
#060228
assert self.format_options == dict(use_060213_format = True), "nim for mmp kluge code" #e in fact, remove that code when new bugs gone
state = diff_and_copy_state(self, self.assy, self.last_cp.state)
#obs, it's fixed now [060301]
## # note: last_cp.state is no longer current after an Undo!
## # so this has a problem when we're doing the end-cmd checkpoint after an Undo command.
## # goal: diffs no longer form a simple chain... rather, it forks.
## # hmm. we diff against the cp containing the current state! (right?) ### [060301]
really_changed = state.really_changed
if not really_changed and debug3: # see if this is printed for bug 3, 060301 8pm [it is]
print "debug: note: detected lack of really_changed in diff_and_copy_state"
###@@@ remove when works and no bugs then
# ok, let's get drastic (make debug pref if i keep it for long):
## state.print_everything() IMPLEM ####@@@@
print "state.lastsnap.attrdicts:", state.lastsnap.attrdicts # might be huge; doesn't contain ids of mutables
elif debug3: # condition on rarer flag soon,
# or have better debug pref for undo stats summary per chgcounter'd cp ###e doit
print "debug: note: real change found by diff_and_copy_state"
if not really_changed and self.current_diff.command_info.get('_offered', False):
# [bruce 060326 to fix bug 1733:]
###e should explain; comments elsewhere from this day and bug have some explanation of this
really_changed = True # pretend there was a real change, so we remain on undo stack, and don't reset change counters
self.current_diff.command_info['_no_net_change'] = True # but make sure menu_desc is able to know what's going on
if not really_changed:
# Have to reset changed_counters, or undo stack becomes disconnected, since it uses them as varid_vers.
# Not needed in other case above since they were already equal.
# Side benefit: will fix file-modified indicator too (though in some cases its transient excursion to "modified" during
# a drag, and back to "unmodified" at the end, might be disturbing). Bug: won't yet fix that if sel changed, model didn't.
#####e for that, need to reset each one separately based on what kind of thing changed. ####@@@@ doit!
self.assy.reset_changed_for_undo( self.last_cp.assy_change_indicators)
if really_changed:
self.current_diff.empty = False # used in constructing undo menu items ####@@@@ DOIT!
if destroy_bypassed_redos:
# Destroy the Redo stack to save RAM, since we just realized we had a "newdo".
# WARNING: ####@@@@ In the future this will need to be more complicated, e.g. if the new change
# was on one of those desired sub-stacks (for view or sel changes) in front of the main one (model changes),
# or in one Part when those have separate stacks, etc...
# Design scratch, current code: the stuff to destroy (naively) is everything linked to from self.last_cp
# except self.current_diff / self.next_cp, *i think*. [060301, revised 060326]
self.clear_redo_stack( from_cp = self.last_cp, except_diff = self.current_diff ) # it's partly nim as of 060309
else:
# not really_changed
# guess: following line causes bug 3 when use_diff is true
if not use_diff:
state = self.last_cp.state
# note: depending on how this is reached, it sets state for first time or replaces it with an equal value
# (which is good for saving space -- otherwise we'd retain two equal but different huge state objects)
else:
pass # in this case there's no motivation, and (guess) it causes "bug 3", so don't do it,
# but do set state when assy_change_indicators says no change [done] 060301 8pm
self.current_diff.empty = True
self.current_diff.suppress_storing_undo_redo_ops = True # (this is not the only way this flag can be set)
# I'm not sure this is right, but as long as varid_vers are the same, or states equal, it seems to make sense... #####@@@@@
fill_checkpoint(self.next_cp, state, self.assy) # stores self.assy.all_change_indicators() onto it -- do that here, for clarity?
#e This will be revised once we incrementally track some changes - it won't redundantly grab unchanged state,
# though it's likely to finalize and compress changes in some manner, or grab changed parts of the state.
# It will also be revised if we compute diffs to save space, even for changes not tracked incrementally.
#e also store other metainfo, like time of completion, cmdname of caller, history serno, etc (here or in fill_checkpoint)
if not self.current_diff.empty and self.pref_report_checkpoints():
if cptype == 'end_cmd':
cmdname = self.current_diff.cmdname()
else:
cmdname = cmdname_for_debug # a guess, used only for debug messages -- true cmdname is not yet known, in general
if cmdname:
desc = "(%s/%s)" % (cptype, cmdname)
else:
desc = "(%s)" % cptype
self.debug_histmessage( "(undo checkpoint %s: %r)" % (desc, self.next_cp) )
del cmdname, desc
if not self.current_diff.suppress_storing_undo_redo_ops:
redo_diff = self.current_diff
undo_diff = redo_diff.reverse_order()
self.store_op(redo_diff)
self.store_op(undo_diff)
# note, we stored those whether or not this was a begin or end checkpoint;
# figuring out which ones to offer, merging them, etc, might take care of that, or we might change this policy
# and only store them in certain cases, probably if this diff is begin-to-end or the like;
# and any empty diff always gets merged with followon ones, or not offered if there are none. ######@@@@@@
# Shift checkpoint variables
self.last_cp = self.next_cp
self.next_cp = None # in case of exceptions in rest of this method
self.last_cp.cptype = cptype #k is this the only place that knows cptype, except for 'initial'?
## self.last_cp_arrival_reason = cptype # affects semantics of Undo/Redo user-level ops
## # (this is not redundant, since it might differ if we later revisit same cp as self.last_cp)
self._setup_next_cp() # sets self.next_cp and self.current_diff
return
def clear_redo_stack( self, from_cp = None, except_diff = None ): #060309 (untested)
"#doc"
#e scan diff+1s from from_cp except except_diff, thinking of diffs as edges, cp's or their indices as nodes.
# can we do it by using find_undoredos with an arg for the varid_vers to use instead of current ones?
# use that transclose utility... on the nodes, i guess, but collect the edges as i go
# (nodes are state_versions, edges are diffs)
# (transclose needs dict keys for these nodes... the nodes are data-like so they'll be their own keys)
assert from_cp is not None # and is a checkpoint?
# but it's ok if except_diff is None
state_version_start = self.state_version_of_cp(from_cp)
# toscan = { state_version_start: state_version_start } # TypeError: dict objects are unhashable
def dictkey(somedict):
"""
Return a copy of somedict's data suitable for use as the key in another dict.
(If somedict was an immutable dict object of some sort,
this could just return it directly.)
"""
items = somedict.items()
items.sort() # a bit silly since in present use there is always just one item, but this won't run super often, i hope
# (for that matter, in present use that item's key is always the same...)
# return items # TypeError: list objects are unhashable
return tuple(items)
toscan = { dictkey(state_version_start): state_version_start }
diffs_to_destroy = {}
def collector(state_version, dict1):
ops = self._raw_find_undoredos( state_version)
for op in ops:
if op.direction == 1 and op is not except_diff: # redo, not undo
# found an edge to destroy, and its endpoint node to further explore
diffs_to_destroy[op.key] = op
state_version_endpoint = self.state_version_of_cp( op.cps[1])
dict1[ dictkey(state_version_endpoint)] = state_version_endpoint
return # from collector
transclose( toscan, collector) # retval is not interesting to us; what matters is side effect on diffs_to_destroy
if diffs_to_destroy:
ndiffs = len(diffs_to_destroy)
len1 = len(self.stored_ops)
if env.debug():
print "debug: clear_redo_stack found %d diffs to destroy" % ndiffs
for diff in diffs_to_destroy.values():
diff.destroy() #k did I implem this fully?? I hope so, since clear_undo_stack probably uses it too...
# the thing to check is whether they remove themselves from stored_ops....
diffs_to_destroy = None # refdecr them too, before saying we're done (since the timing of that is why we say it)
toscan = state_version_start = from_cp = None
len2 = len(self.stored_ops)
savings = len1 - len2 # always 0 for now, since we don't yet remove them, so don't print non-debug msg when 0 (for now)
if ndiffs and (savings < 0 or env.debug()):
print " debug: clear_redo_stack finished; removed %d entries from self.stored_ops" % (savings,) ###k bug if 0 (always is)
else:
if 0 and env.debug():
print "debug: clear_redo_stack found nothing to destroy"
return
def current_command_info(self, *args, **kws): ##e should rename add_... to make clear it's not finding and returning it
assert not args
if not self._undo_archive_initialized:
if env.debug():
print_compact_stack("debug note: undo_archive not yet initialized (maybe not an error)")
return
self.current_diff.command_info.update(kws) # recognized keys include cmd_name
######@@@@@@ somewhere in... what? a checkpoint? a diff? something larger? (yes, it owns diffs, in 1 or more segments)
# it has 1 or more segs, each with a chain of alternating cps and diffs.
# someday even a graph if different layers have different internal cps. maybe just bag of diffs
# and overall effect on varidvers, per segment. and yes it's more general than just for undo; eg affects history.
return
def do_op(self, op): ###@@@ 345pm some day bfr 060123 - figure out what this does if op.prior is not current, etc;
# how it relates to whether assy changed since last_cp set; etc.
"""
Assuming caller has decided it's safe, good, etc, in the case of out-of-order undo,
do one of the diff-ops we're storing
(ie apply it to model to change its state in same direction as in this diff),
and record this as a change to current varid_ver pairs
(not yet sure if those are stored here or in model or both, or of details for overall vs state-subset varids).
If this is exactly what moves model state between preexisting checkpoints (i.e. not out-of-order),
change overall varid_ver (i.e. our analog of an "undo stack pointer") to reflect that;
otherwise [nim as of 060118] make a new checkpoint, ver, and diff to stand for the new state, though some state-subset
varid_vers (from inside the diff) will usually be reused. (Always, in all cases I know how to code yet; maybe not for list ops.)
"""
assert self._undo_archive_initialized
# self.current_diff is accumulating changes that occur now,
# including the ones we're about to do by applying self to assy.
# Make sure it is not itself later stored (redundantly) as a diff that can be undone or redone.
# (If it was stored, then whenever we undid a change, we'd have two copies of the same change stored as potential Undos.)
self.current_diff.suppress_storing_undo_redo_ops = True # [should this also discard self.last_cp as irrelevant??? then apply_to can set it? 060301 Q ###@@@]
# Some code (find_undoredos) might depend on self.assy.all_change_indicators() being a valid
# representative of self.assy's state version;
# during op.apply_to( archive) that becomes false (as changes occur in archive.assy), but apply_to corrects this at the end
# by restoring self.assy.all_change_indicators() to the correct old value from the end of the diff.
# The remaining comments might be current, but they need clarification. [060126 comment]
# in present implem [060118], we assume without checking that this op is not being applied out-of-order,
# and therefore that it always changes the model state between the same checkpoints that the diff was made between
# (or that it can ignore and/or discard any way in which the current state disagrees with the diff's start-checkpoint-state).
#bruce 060314 not useful now, but leave the code as example (see longer comment elsewhere):
## self.mols_with_invalid_atomsets = {} ##@@ KLUGE to put this here -- belongs in become_state or so, but that's
## # not yet a method of self! BTW, this might be wrong once we merge -- need to review it then. [bruce 060313]
op.apply_to( self) # also restores view to what it was when that change was made [as of 060123]
# note: actually affects more than just assy, perhaps (ie glpane view state...)
#e when diffs are tracked, worry about this one being tracked
#e apply_to itself should also track how this affects varid_vers pairs #####@@@@@
#ditto:
## del self.mols_with_invalid_atomsets # this will have been used by updaters sometime inside op.apply_to
#060123 202p following [later removed] is wrong since this very command (eg inside some menu item)
# is going to do its own end-checkpoint.
# the diffs done by the above apply_to are going to end up in the current diff...
# what we need to do here is quite unrelated to self.last_cp -- know the varid_vers pairs, used in making undo/redo menuitems.
# (and i bet last_cp_arrival_reason is similarly wrongheaded
# and applies instead to (if anything) the "current state" of varid_vers pairs)
# ... but how do we track the current state's varid_ver pairs? in our own checkpoint filler?
# at two times:
# - when random code makes random changes it probably needs to track them... this can be done when those chgs are
# packed into a diff, since we're saying "make sure it looks like we just applied this diff as if by redoing this op"
# (including having same effect on varid_vers as that would have)
# - when it makes those while applying a diff, then at least the direct ones inside the diff can have them set to old vals
# (tho if updates to restore consistency also run, not sure they fit in -- but in single-varid system that's moot)
return
def state_version(self):
## assy_varid = make_assy_varid(self.assy._debug_name)
# kluge: optim of above:
assy_varid = ASSY_VARID_STUB
assy_ver = self.assy.all_change_indicators() # note: this is about totally current state, not any diff or checkpoint;
# it will be wrong while we're applying an old diff and didn't yet update assy.all_change_indicators() at the end
return {assy_varid: assy_ver} # only one varid for now (one global undo stack) [i guess this is still true with 3 change counters... 060227]
def state_version_of_cp(self, cp): #060309 ###k maybe this should be (or is already) a cp method...
return {ASSY_VARID_STUB: cp.assy_change_indicators}
def find_undoredos(self, warn_when_change_indicators_seem_wrong = True):
"""
Return a list of undo and/or redo operations
that apply to the current state; return merged ops if necessary.
@param warn_when_change_indicators_seem_wrong: see code and comments.
"""
#e also pass state_version? for now rely on self.last_cp or some new state-pointer...
if not self._undo_archive_initialized:
return []
if warn_when_change_indicators_seem_wrong:
# try to track down some of the bugs... if this is run when untracked changes occurred (and no checkpoint),
# it would miss everything. If this sometimes happens routinely when undo stack *should* be empty but isn't,
# it could explain dificulty in reproducing some bugs. [060216]
# update, bruce 071025: I routinely see this warning under certain conditions, which I forget.
# I don't think I experience bugs then, though.
if self.last_cp.assy_change_indicators != self.assy.all_change_indicators():
print "WARNING: find_undoredos sees self.last_cp.assy_change_indicators != self.assy.all_change_indicators()", \
self.last_cp.assy_change_indicators, self.assy.all_change_indicators()
pass
if self.current_diff.command_info['n_merged_changes']:
# if the current_diff has any changes at all (let alone merged ones, or more precisely, those intending
# to merge with yet-unhappened ones, because autocheckpointing is disabled), it's the only applicable one,
# in the current [060312] single-undo-stack, no-inserted-view-ops scheme. The reason we check specifically
# for merged changes (not any old changes) is that there's no record of any other kind of changes in current_diff,
# and when other kinds get noticed in there they immediately cause it to be replaced by a new (empty) current_diff.
# So there's no way and no point, and the reason there's no need is that non-current diffs get indexed
# so they can be found in stored_ops by _raw_find_undoredos.
return [self.current_diff.reverse_order()] ####k ####@@@@ ??? [added this 060312; something like it seems right]
state_version = self.state_version()
## state_version = dict([self.last_cp.varid_ver()]) ###@@@ extend to more pairs
# that's a dict from varid to ver for current state;
# this code would be happy with the items list but future code will want the dict #e
res = self._raw_find_undoredos( state_version) #060309 split that out, moved its debug code back here & revised it
if not res and debug_undo2 and (self.last_cp is not self.initial_cp):
print "why no stored ops for this? (claims to not be initial cp)", state_version # had to get here somehow...
if debug_undo2:
print "\nfind_undoredos dump of found ops, before merging:"
for op in res:
print op
print "end of oplist\n"
# Need to filter out obsolete redos (not sure about undos, but I guess not) ... actually some UIs might want to offer them,
# so i'd rather let caller do that (plus it has the separated undo/redo lists).
# But then to save time, let caller discard what it filters out, and let it do the merging only on what's left --
# i.e. change docstring and API so caller merges, not this routine (not sure of this yet) #####@@@@@.
######@@@@@@ need to merge
##k need to store_op at end-checkpoint! right now we store at all cps... will that be ok? probably good...
## (or begin, if end missed -- do that by auto-end? what about recursion?
# let recursion also end, then begin anew... maybe merge... see more extensive comments mentioning word "cyberspace",
# maybe in a notesfile or in code)
return res
def _raw_find_undoredos(self, state_version):
"#doc"
res = {}
for var, ver in state_version.items():
lis = self.stored_ops.get( (var, ver), () )
for op in lis: #e optim by storing a dict so we can use update here? doesn't matter for now
if not op.destroyed:
####e cond only needed because destroy doesn't yet unstore them;
# but don't bother unstoring them here, it would miss some
res[op.key] = op
# this includes anything that *might* apply... filter it... not needed for now with only one varid in the system. ###@@@
return res.values()
def store_op(self, op):
assert self._undo_archive_initialized
for varver in op.varid_vers():
ops = self.stored_ops.setdefault(varver, [])
ops.append(op)
return
def _n_stored_vals(self): #060309, unfinished, CALL IT as primitive ram estimate #e add args for variants of what it measures ####@@@@
res = 0
for oplist in self.stored_ops.itervalues():
for op in oplist:
res += op._n_stored_vals() ###IMPLEM
return res
def wrap_op_with_merging_flags(self, op, flags = None):
"[see docstring in undo_manager]"
return MergingDiff(op, flags = flags, archive = self) # stub
pass # end of class AssyUndoArchive
ASSY_VARID_STUB = 'assy' # kluge [060309]: permit optims from this being constant, as long as this doesn't depend on assy, etc;
# all uses of this except in make_assy_varid() are wrong in principle, so fix them when that kluge becomes invalid.
def make_assy_varid(assy_debug_name):
"""
make the varid for changes to the entire assy,
for use when we want a single undo stack for all its Parts
"""
## return 'varid_stub_' + (assy_debug_name or "") #e will come from assy itself
return ASSY_VARID_STUB # stub, but should work
# end
| NanoCAD-master | cad/src/foundation/undo_archive.py |
# Copyright 2004-2009 Nanorex, Inc. See LICENSE file for details.
"""
Group.py -- Class (or superclass) for all non-leaf nodes in the
internal model tree of Nodes.
@author: Josh
@version: $Id$
@copyright: 2004-2009 Nanorex, Inc. See LICENSE file for details.
History:
Originally by Josh; gradually has been greatly extended by Bruce,
but the basic structure of Nodes and Groups has not been changed.
Bruce 071110 split Group.py out of Utility.py. (And may soon
split out Node and/or LeafNode as well.)
Bruce 080305 changed superclass from Node to NodeWithAtomContents.
"""
from utilities import debug_flags
from utilities.debug import print_compact_stack, print_compact_traceback
from utilities.debug_prefs import debug_pref_History_print_every_selected_object
from utilities.icon_utilities import imagename_to_pixmap
from utilities.Log import redmsg, quote_html
import foundation.env as env
from foundation.state_constants import S_CHILDREN, S_DATA
from foundation.NodeWithAtomContents import NodeWithAtomContents
from commands.GroupProperties.GroupProp import GroupProp
# ==
_superclass = NodeWithAtomContents #bruce 080305 revised this
class Group(NodeWithAtomContents):
"""
A kind of Node which groups other nodes (its .members,
informally called its "kids") in the model tree, for drawing,
and for selection.
Its members can be various other kinds of Groups (subtrees of nodes)
or non-Group Nodes (e.g. Jigs, Chunks).
Group is used as both a concrete and abstract class.
(I.e. it's instantiated directly, but also has subclasses.)
"""
# default values of per-subclass constants
featurename = "" # (redundant with Node)
# It's intentional that we don't provide this for Group itself, so a
# selected Group in the MT doesn't bother you by offering wiki help on
# Group. Maybe we'll leave it off of Chunk as well... but maybe a
# better system would be to let user turn it off for specific classes
# they're familiar with, or to relegate it to a help submenu rather
# than MT context menu, or in some other way make it less visible...
# [bruce 051201]
autodelete_when_empty = False # subclasses whose instances want most
# current commands to delete them whenever they become empty should
# define this to be True. (Individual instances could also override it
# if desired.) The current command's keep_empty_group method
# will then get to decide, assuming that command doesn't override its
# autodelete_empty_groups method. [bruce 080305]
# See also temporarily_prevent_autodelete_when_empty, below.
_mmp_group_classifications = () # should be extended in some subclasses...
# This should be a tuple of classifications that appear in
# files_mmp._GROUP_CLASSIFICATIONS, most general first.
# There is no need for more than one element except to support
# old code reading new mmp files.
# [bruce 080115]
# instance variable default values and/or undoable state declarations
# (note that copyable_attrs also declares undoable state, for Nodes)
_s_attr_members = S_CHILDREN # this declares group.members for Undo
# note: group.members are informally called its "kids",
# but need not be identical to the output of group.MT_kids(),
# which gives the list of nodes to show as its children in
# the Model Tree.
temporarily_prevent_autodelete_when_empty = False
# For explanation, see comments in default implem of
# Command.autodelete_empty_groups method.
# [bruce 080326, part of fixing logic bug 2705]
_s_attr_temporarily_prevent_autodelete_when_empty = S_DATA
# ==
def __init__(self, name, assy, dad, members = (), editCommand = None):
###@@@ review inconsistent arg order
self.members = [] # must come before _superclass.__init__
self.__cmfuncs = []
# __cmfuncs lists funcs to call right after the next time
# self.members is changed
_superclass.__init__(self, assy, name, dad)
self.open = True
for ob in members:
self.addchild(ob)
#@Note: subclasses use this argument in self.edit() [review: still true?]
# REVIEW: is defining this in the superclass Group,
# which no longer uses it, still justified? [bruce 080801 question]
self.editCommand = editCommand
return
def _um_initargs(self): #bruce 051013 [in class Group]
"""
[Overrides Node._um_initargs; see its docstring.]
"""
# [note: as of 060209 this is probably well-defined and correct (for
# most subclasses), but not presently used]
return (self.name, self.assy), {}
# note reversed arg order from Node version
# dad and members (like most inter-object links) are
# best handled separately
def _undo_update(self): # in class Group [bruce 060306]
self.changed_members()
# This is part of the fix for bug 1617; fixing it will also
# require separate changes in MMKit by Mark. Review: is this safe
# to do in arbitrary order vs. other Undo-related updates, or do
# we need to only do it at the end, and/or in some order when
# several Groups changed?? I don't know, so for now I'll wait and
# see if we notice bugs from doing it in arbitrary order.
# [bruce 060306]
_superclass._undo_update(self)
return
def is_group(self):
"""
[overrides Node method; see its docstring]
"""
return True
_extra_classifications = ()
def set_extra_classifications( self, extra_classifications): #bruce 080115
self._extra_classifications = list(extra_classifications)
open_specified_by_mmp_file = False
def readmmp_info_opengroup_setitem( self, key, val, interp ):
"""
This is called when reading an mmp file, for each "info opengroup"
record which occurs right after this node's "group" record is read and
no other node (or "group" record) has been read.
Key is a list of words, val a string; the entire record format
is presently [050421] "info opengroup <key> = <val>".
Interp is an object to help us translate references in <val>
into other objects read from the same mmp file or referred to by it.
See the calls of this method from files_mmp for the doc of interp
methods.
If key is recognized, set the attribute or property it refers to to
val; otherwise do nothing (or for subclasses of Group which handle
certain keys specially, call the same method in the superclass for
other keys).
(An unrecognized key, even if longer than any recognized key, is not
an error. Someday it would be ok to warn about an mmp file containing
unrecognized info records or keys, but not too verbosely (at most once
per file per type of info).)
"""
#bruce 050421, to read group open state from mmp file
if key == ['open']:
# val should be "True" or "False" (unrecognized vals are ignored)
if val == 'True':
self.open = True
self.open_specified_by_mmp_file = True
# so code to close the clipboard won't override it
elif val == 'False':
self.open = False
self.open_specified_by_mmp_file = True
elif debug_flags.atom_debug:
print "atom_debug: maybe not an error: \"info opengroup open\" " \
"ignoring unrecognized val %r" % (val,)
else:
if debug_flags.atom_debug:
print "atom_debug: fyi: info opengroup (in %r) with " \
"unrecognized key %r (not an error)" % (self, key,)
return
def drag_move_ok(self):
return True # same as for Node
def drag_copy_ok(self):
return True
# for my testing... REVIEW: maybe make it False for Alpha though 050201
def MT_DND_can_drop_inside(self): #bruce 080317
"""
Are ModelTree Drag and Drop operations permitted to drop nodes
inside self?
[overrides Node method; overridden again in some subclasses]
"""
return True # for most Groups
def is_selection_group_container(self): #bruce 050131 for Alpha
"""
Whether this group causes each of its direct members to be treated
as a "selection group" (see another docstring for what that means,
but note that it can be true of leaf nodes too, in spite of the name).
[Intended to be overridden only by the Clipboard.]
"""
return False # for most groups
def haspicked(self): # bruce 050126
"""
Whether node's subtree has any picked members.
[See comments in Node.haspicked docstring.]
"""
if self.picked:
return True
for m in self.members:
if m.haspicked():
return True
return False
def changed_members(self): #bruce 050121 new feature, now needed by BuildAtoms
"""
Whenever something changes self.members in any way (insert, delete,
reorder), it MUST call this method to inform us (but only *after* it
makes the change); we'll inform other interested parties, if any.
(To tell us you're an interested party, use
call_after_next_changed_members.)
Notes: This need not be called after changes in membership *within*
our members, only after direct changes to our members list. Our
members list is public, but whether it's incrementally changed (the
same mutable list object) or replaced is not defined (and for whatever
wants to change it, either one is acceptable). It is deprecated for
anything other than a Group (or subclass) method to directly change
self.members, but if it does, calling this immediately afterwards is
required. [As of 050121 I don't know for sure if all code yet follows
this rule, but I think it does. ##k]
"""
self.invalidate_atom_content() #bruce 080306
if self.part:
self.part.changed() # does assy.changed too
elif self.assy:
# [bruce 050429 comment: I'm suspicious this is needed or good if
# we have no part (re bug 413), but it's too dangerous to change
# it just before a release, so bug 413 needs a different fix (and
# anyway this is not the only source of assy.changed() from
# opening a file -- at least chunk.setDisplayStyle also does it).
# For Undo we might let .changed() propogate only into direct
# parents, and then those assy.changed() would not happen and bug
# 413 might be fixable differently.]
self.assy.changed()
# note: it is ok for something in part.changed() or assy.changed()
# to modify self.__cmfuncs
cm = self.__cmfuncs
if cm:
self.__cmfuncs = [] # must do this first in case func appends to it
for func in cm:
try:
func(self)
# pass self, in case it's different from the object
# they subscribed to (due to kluge_change_class)
except:
msg = "error in some cmfunc, ignored by %r" % self
print_compact_traceback( msg + ": ")
return
def _ac_recompute_atom_content(self): #bruce 080306
"""
Recompute and return (but do not record) our atom content,
optimizing this if it's exactly known on any node-subtrees.
[Overrides superclass method. Subclasses whose kids are not exactly
self.members must override or extend this further.]
"""
atom_content = 0
for member in self.members:
atom_content |= (member._f_updated_atom_content())
return atom_content
def call_after_next_changed_members(self, func, only_if_new = False):
"""
Call func once, right after the next time anything changes
self.members. At that time, pass it one argument, self; ignore its
retval; print error message (in debug version only) if it has
exceptions.
If our members are taken over by another Group instance (see
kluge_change_class), then it, not us, will call func and be the
argument passed to func.
Typically, func should be an "invalidation function", recording the
need to update something; when that update later occurs, it uses
self.members and again supplies a func to this method. (If every call
of func did an update and gave us a new func to record, this might be
inefficient when self.members is changed many times in a row;
nevertheless this is explicitly permitted, which means that we
explicitly permit func, when called from our code, to itself call this
method, supplying either the same func or a new one.)
"""
# note: this method is no longer used as of 080821, but it can remain,
# since it's still correct and potentially useful. [bruce 080821]
if only_if_new and (func in self.__cmfuncs):
return
self.__cmfuncs.append( func) # might occur during use of same func!
# methods before this are by bruce 050108 and should be
# reviewed when my rewrite is done ###@@@
def get_topmost_subnodes_of_class(self, clas): #bruce 080115, revised 080807
"""
Return a list of the topmost (direct or indirect)
children of self (Nodes or Groups), but never self itself,
which are instances of the given class (or of a subclass).
That is, scanning depth-first into self's child nodes,
for each child we include in our return value, we won't
include any of its children.
@param clas: a class.
@note: to avoid import cycles, it's often desirable to
specify the class as an attribute of a convenient
Assembly object (e.g. xxx.assy.DnaSegment)
rather than as a global value that needs to be imported
(e.g. DnaSegment, after "from xxx import DnaSegment").
@see: same-named method on class Part.
"""
#NOTE: this method is duplicated in class Part (see Part.py)
#-- Ninad 2008-08-06 [bruce 090121 comment: TODO/FIX/REVIEW that]
res = []
for child in self.members:
if isinstance( child, clas):
res.append(child)
elif child.is_group():
res.extend( child.get_topmost_subnodes_of_class( clas) )
return res
def kluge_change_class(self, subclass):
#bruce 050109 ###@@@ temporary [until files_mmp & assy make this kind
# of assy.root, shelf, tree on their own]
"""
Return a new Group with this one's members but of the specified subclass
(and otherwise just like this Group, which must be in class Group itself,
not a subclass). This won't be needed once class Assembly is fixed to make
the proper subclasses directly.
"""
assert self.__class__ is Group
if self._encoded_classifications():
# bug (or mmp format error), but an assertion might not be fully
# safe [bruce 080115]
msg = "Bug: self has _encoded_classifications %r (discarded) " \
"in kluge_change_class to %r: %r" % \
(self._encoded_classifications(), subclass.__name__, self)
print msg
env.history.message( redmsg(quote_html(msg)) )
pass # but continue anyway
new = subclass(self.name, self.assy, self.dad) # no members yet
assert isinstance(new, Group)
# (but usually it's also some subclass of Group, unlike self)
if self.dad:
# don't use addmember, it tells the assy it changed
# (and doesn't add new in right place either) --
# just directly patch dad's members list to replace self with new
ind = self.dad.members.index(self)
self.dad.members[ind] = new
# don't tell dad its members changed, until new is finished (below)
self.dad = None # still available in new.dad if we need it
new.members = self.members # let new steal our members directly
new.__cmfuncs = self.__cmfuncs
# and take responsibility for our members changing...
self.__cmfuncs = []
# self should no longer be used; enforce this
self.members = 333 # not a sequence
self.temporarily_prevent_autodelete_when_empty = False
#bruce 080326 precaution, probably not needed
self.node_icon = "<bug if this is called>"
# REVIEW: setting self.node_icon here causes a pylint error
# message on a different line, "def node_icon". I don't know why,
# and I think pylint is wrong (or if not, wrong to not refer to
# this line). However, this line is definitely questionable and
# confusing -- should clean up. [bruce 090121 comment]
for mem in new.members:
mem.dad = new
# bruce 050205 design discussion:
# should we now call mem.changed_dad()?
#
# * reasons yes: new's new class might differ in rules for
# selgroup or space (e.g. be the top of a selgroup) and change_dad
# might be noticing and responding to that change, so this might
# turn out to be required if something has cached that info in mem
# already.
#
# * reasons no: ... some vague uneasiness. Oh, it might falsely
# tell assy it changed, but I think our caller handles that.
#
# So yes wins, unless bugs show up!
# BUT: don't do this until we're all done (so new is entirely valid).
## mem.changed_dad()
for attr in ['open', 'hidden', 'picked']:
# not name, assy, dad (done in init or above),
# selgroup, space (done in changed_dad)
try:
val = getattr(self, attr)
except AttributeError:
pass # .open will go away soon;
# others are probably always defined but I'm not sure
# (and should not care here, as long as I get them all)
else:
setattr(new, attr, val)
for mem in new.members:
mem.changed_dad() # reason is explained above [bruce 050205]
new.dad.changed_members()
# since new class is different from self.class, this might be
# needed ###@@@ is it ok?
return new
#bruce 050113 deprecated addmember and confined it to Node; see its
# docstring.
#bruce 071110 split def addmember between Node and Group,
# so Node needn't import Group now that they're in different modules.
def addmember(self, node, before_or_top = False):
"""
[Deprecated public method]
[overrides Node implem; different behavior;
see Node implem docstring for documentation of both implems]
"""
if not self.MT_DND_can_drop_inside():
#bruce 080317 -- we should revise all addmember calls that this
#turns up to test what they care about and call addchild or
#addsibling explicitly
msg = "WARNING: addmember on class of %r has not been reviewed" \
" for correctness"
print_compact_stack( msg + ": " % self) ###
self.addchild( node, top = before_or_top)
return
def addchild(self,
newchild,
_guard_ = 050201,
top = False,
after = None,
before = None ):
"""
Add the given node, newchild, to the end (aka. bottom) of this Group's
members list, or to the specified place (top aka. beginning, or after
some child or index, or before some child or index) if one of the
named arguments is given.
Ok even if newchild is already a member of self, in same or different
location than requested (it will be moved), or a member of some other
Group (it will be removed). (Behavior with more than one named
argument is undefined.)
Note: the existence of this method (as an attribute) might be used as
a check for whether a Node can be treated like a Group [as of 050201].
Special case: legal and no effect if newchild is None or 0 (or
anything false); this turns out to be needed by
assy.copy_sel/Group.copy or Jig.copy! [050131 comment]
[Warning (from when this was called addmember): semantics (place of
insertion, and optional arg name/meaning) are not consistent with
Node.addmember; see my comments in its docstring. -- bruce 050110]
[note, 050315: during low-level node-tree methods like addchild and
delmember, and also during pick and unpick methods, there is no
guarantee that the Part structure of our assy's node tree is correct,
so checkparts should not be called, and assy.part should not be asked
for; in general, these methods might need to know that each node has a
part (perhaps None), but they should treat the mapping from nodes to
parts as completely arbitrary, except for calling inherit_part to help
maintain it.]
"""
#bruce 050113 renamed from addmember
#bruce 050110/050206 updated docstring based on current code
# Note: Lots of changes implemented at home 050201-050202 but
# not committed until 050206 (after Alpha out); most dates
# 050201-050202 below are date of change at home.
#bruce 050201 added _guard_, after, before
assert _guard_ == 050201
if newchild is None:
#bruce 050201 comment: sometimes newchild was the number 0,
# since Group.copy returned that as a failure code!!!
# Or it can be None (Jig.copy, or Group.copy after I revised it).
return
# check self and newchild are ok and in same assy [bruce 080218]
# Note: we can't assert not self.killed() or not newchild.killed(),
# since new nodes look killed due to .dad being None (a defect in
# current implem of killed? or a misnaming of it, if it really means
# "in the model"?). If we try, we fail while making any new Group
# with a members list, including assy.root. Should revise Node.killed
# to not be true for new nodes, only for killed but not revived ones.
## assert not self.killed(), "self must not be killed in %r.addchild(%r)" % \
## (self, newchild)
# But this should fail for really-killed self or newchild, as long as
# we keep setting their assy to None -- but the 2nd one is temporarily
# just a debug print, since it fails in InsertDna_EditCommand.py when
# used with dna updater (need to fix that soon):
assert self.assy is not None, "%r has no .assy in addchild" % self
## assert self.assy is newchild.assy, \
if not (self.assy is newchild.assy):
print "\nBUG***: " \
"%r.addchild(%r) assy mismatch: %r is not %r" % \
(self, newchild, self.assy, newchild.assy)
#bruce 050205: adding several safety checks (and related new feature
#of auto-delmember) for help with MT DND; they're a good idea anyway.
#See also today's changes to changed_dad().
if newchild.dad and not (newchild in newchild.dad.members):
# This is really a bug or a very deprecated behavior, but we
# tolerate it for now.
# Details: some node-creating methods like molecule.copy and/or
# Group.copy have the unpleasant habit of setting dad in the newly
# made node without telling the dad! This almost certainly means
# the other dad-related aspects of the node are wrong... probably
# best to just pretend those methods never did that. Soon after
# Alpha we should fix them all and then make this a detected error
# and no longer tolerate it.
if debug_flags.atom_debug:
msg = "atom_debug: addchild setting newchild.dad to None " \
"since newchild not in dad's members: %s, %s" % \
(self, newchild)
print_compact_stack(msg)
newchild.dad = None
if newchild.is_ascendant(self):
#bruce 050205 adding this for safety (should prevent DND-move
#cycles as a last resort, tho might lose moved nodes) (this msg
#covers newchild is self too, since that's a length-1 cycle)
print "\nBUG: addchild refusing to form a cycle, " \
"doing nothing; this indicates a bug in the caller:", \
self, newchild
return
if newchild.dad:
# first cleanly remove newchild from its prior home. (Callers not
# liking this can set newchild.dad = None before calling us. But
# doing so (or not liking this) is deprecated.)
if newchild.dad is self:
# this might be wanted (as a way of moving a node within
# self.members) (and a caller might request it by accident
# when moving a node from a general position, so we want to
# cooperate), but the general-case code won't work if the
# before or after options were used, whether as nodes (if the
# node used as a marker is newchild itself) or as indices
# (since removal of newchild will change indices of subsequent
# nodes). So instead, if those options were used, we fix them
# to work.
if type(before) is type(1):
# indices will change, use real nodes instead (ok even
# if real node is 'newchild'! we detect that below)
before = self.members[before]
if type(after) is type(1):
after = self.members[after]
if before is newchild or after is newchild:
# this is a noop, and it's basically a valid request, so
# just do it now (i.e. return immediately); note that
# general-case code would fail since these
# desired-position-markers would be gone once we remove
# newchild from self.members.
return
# otherwise (after our fixes above) the general-case code
# should be ok. Fall thru to removing newchild from prior home
# (in this case, self), before re-adding it in a new place.
# remove newchild from its prior home (which may or may not be
# self):
newchild.dad.delmember(newchild, unpick = False)
# this sets newchild.dad to None, but doesn't mess with its
# .part, .assy, etc
#bruce 080502 bugfix (of undo/redo losing selectedness of PAM
# DNA chunks when this is called by dna updater to put them in
# possibly different groups): passing unpick = False
# Only now will we actually insert newchild into self.
# [end of this part of bruce 050205 changes]
## self.assy.changed() # now done by changed_members below
# (todo: and what about informing the model tree, if it's
# displaying us? probably we need some subscription-to-changes or
# modtime system...) [review: obs cmt? that's probably fixed by now]
if top:
self.members.insert(0, newchild) # Insert newchild at the very top
elif after is not None: # 0 has different meaning than None!
if type(after) is not type(0):
after = self.members.index(after)
# raises ValueError if not found, that's fine
if after == -1:
self.members += [newchild]
# Add newchild to the bottom (.insert at -1+1 doesn't do
# what we want for this case)
else:
self.members.insert(after+1, newchild)
# Insert newchild after the given position
# (check: does this work for negative indices?)
elif before is not None:
if type(before) is not type(0):
before = self.members.index(before)
# raises ValueError if not found, that's fine
self.members.insert(before, newchild)
# Insert newchild before the given position
# (check: does this work for negative indices?)
else:
self.members.append(newchild)
# Add newchild to the bottom, i.e. end (default case)
newchild.dad = self
newchild.changed_dad()
# note: this picks newchild if newchild.dad is picked, and
# sometimes calls inherit_part
newchild.dad.changed_members()
# must be done *after* they change and *after* changed_dad has
# made them acceptable for new dad
# Note: if we moved newchild from one place to another in self,
# changed_members is called twice, once after deletion and once after
# re-insertion. That's probably ok, but I should #doc this in the
# related subscriber funcs so callers are aware of it. [bruce 050205]
return
def delmember(self, obj, unpick = True):
if obj.dad is not self:
# bruce 050205 new feature -- check for this (but do nothing about
# it)
if debug_flags.atom_debug:
msg = "atom_debug: fyi: delmember finds obj.dad is not self"
print_compact_stack( msg + ": ") #k does this ever happen?
if unpick:
#bruce 080502 new feature: let this unpick be optional (before
#now, it was always done)
obj.unpick()
#bruce 041029 fix bug 145 [but callers should not depend on
#this happening! see below]
#k [bruce 050202 comment, added 050205]: review this unpick again
#sometime, esp re DND drag_move (it might be more relevant for
#addchild than for here; more likely it should be made not needed
#by callers)
# [bruce 050203 review: this unpick is still needed, to keep
# killed obj out of selmols, unless we revise things enough to let
# us invalidate selmols here, or the like; and [050206] we should,
# since this side effect is sometimes bad (though I forget which
# recent case of it bugged me a lot).]
## self.assy.changed() # now done by changed_members below
try:
self.members.remove(obj)
except:
# relying on this being permitted is deprecated [bruce 050121]
if debug_flags.atom_debug:
msg = "atom_debug: fyi: delmember finds obj not in members list"
print_compact_stack( msg + ": ") #k does this ever happen?
return
obj.dad = None # bruce 050205 new feature
if not self.members:
# part of fix for logic bug 2705 [bruce 080326]
if self.temporarily_prevent_autodelete_when_empty:
del self.temporarily_prevent_autodelete_when_empty
# restore class-default state (must be False)
pass
self.changed_members() # must be done *after* they change
return
def steal_members(self): #bruce 050526
"""
Remove all of this group's members (like delmember would do) and
return them as a list. Assume self doesn't yet have a dad and no
members are picked.
[Private method, for copy -- not reviewed for general use!]
"""
res = self.members
self.members = []
for obj in res:
self.temporarily_prevent_autodelete_when_empty = False
#bruce 080326 precaution
if obj.dad is not self: # error, debug-reported but ignored
if debug_flags.atom_debug:
msg = "atom_debug: fyi: steal_members finds obj.dad is not self"
print_compact_stack( msg + ": ") #k does this ever happen?
obj.dad = None
# assume not needed for our private purpose, though it would be
# needed in general:
## self.changed_members()
return res
def pick(self):
"""
select the Group -- and therefore (due to our selection invariants)
all its members
[extends Node.pick]
"""
_superclass.pick(self)
# note: important for speed to do _superclass.pick first, so
# ob.pick() sees it's picked when its subr scans up the tree
# [bruce 050131 comment]
for ob in self.members:
ob.pick()
if debug_pref_History_print_every_selected_object():
# bruce 050131 comment:
# I'm very skeptical of doing this history.message
# recursively, but I'm not changing it for Alpha
msg = self.description_for_history()
env.history.message( msg )
return
def description_for_history(self):
"""
Return something to print in the history whenever we are selected
[some subclasses should override this]
"""
return "Group Name: [" + self.name +"]"
def unpick(self):
"""
unselect the Group -- and all its members! [see also unpick_top]
[extends Node method]
"""
_superclass.unpick(self)
for ob in self.members:
ob.unpick()
def unpick_top(self): #bruce 050131 for Alpha: bugfix
"""
[Group implem -- go up but don't go down]
[extends Node method]
"""
#redoc, and clean it all up
_superclass.unpick(self)
def unpick_all_members_except(self, node): #bruce 050131 for Alpha
"""
[private method; #doc; overrides Node method]
"""
# todo: should probably inline into unpick_all_except and split that
# for Node/Group
res = False
for ob in self.members:
res1 = ob.unpick_all_except( node)
res = res or res1
# note: the above is *not* equivalent (in side effects)
# to res = res or ob.unpick_all_except( node)!
return res
def is_glpane_content_itself(self): #bruce 080319
"""
For documentation, see the Node implementation of this method.
[overrides Node method; not normally overridden on subclasses of Group]
"""
return False
def pick_if_all_glpane_content_is_picked(self): #bruce 080319
"""
If not self.is_glpane_content_itself()
(which is the case for Group and all subclasses as of 080319),
but if some of self's content *is* "glpane content" in that sense,
and if all such content is picked, then pick self.
(Note that picking self picks all its contents.)
@return: whether self contains any (or is, itself) "glpane content".
@note: in spite of the name, if self contains *no* glpane content,
and is not glpane content itself, this does not pick self.
[overrides Node method; shouldn't need to be overridden on subclasses,
since they can override is_glpane_content_itself instead]
"""
has_glpane_content = False # modified below if glpane content is found
any_is_unpicked = False # modified below; only covers glpane content
for m in self.members:
m_has_glpane_content = m.pick_if_all_glpane_content_is_picked()
if m_has_glpane_content:
has_glpane_content = True
if not m.picked:
any_is_unpicked = True
# this means we won't pick self, but we must still
# continue, to determine has_glpane_content
# and to call pick_if_all_glpane_content_is_picked for its
# side effects within remaining members
continue
for m in [self]: # this form shows the similarity with the above loop
m_has_glpane_content = m.is_glpane_content_itself()
if m_has_glpane_content:
has_glpane_content = True
if not m.picked:
any_is_unpicked = True
continue
if any_is_unpicked and self.picked:
print "\n*** BUG: %r is picked but apparently has unpicked content" % self
if has_glpane_content and not any_is_unpicked:
# note: we might add arguments which modify when this behavior
# occurs, e.g., to disable it for ordinary Groups which are not
# inside any special Groups (such as DnaGroups) for some callers;
# if so, they may be able to skip some of the member loop as well.
self.pick()
return has_glpane_content
def _f_move_nonpermitted_members( self, **opts): # in Group [bruce 080319]
"""
[friend method for enforce_permitted_members_in_groups]
Find all non-permitted nodes at any level inside self.
For each such node, if it can find a home by moving higher within self,
move it there, otherwise move it outside self, to just after self in
self.dad (after calling self.part.ensure_toplevel_group() to make sure
self.dad is in the same part as self). (When moving several nodes
after self from one source, try to preserve their relative order.
When from several sources, keep putting newly moved ones after
earlier moved ones. This is less important than safety and efficiency.)
If this makes self sufficiently invalid to need to be killed,
it's up to the caller to find out (via _f_wants_to_be_killed)
and kill self. We don't do this here in case the caller wants to
defer it (though as of 080319, they don't).
@return: whether we ejected any nodes.
"""
have_unscanned_members = True
move_after_this = self # a cursor in self.dad, to add new nodes after
while have_unscanned_members:
have_unscanned_members = False # might be changed below
for m in self.members[:]:
if not self.permit_as_member(m, **opts):
# eject m
if move_after_this is self:
self.part.ensure_toplevel_group()
# must do this before first use of self.dad
self.dad.addchild(m, after = move_after_this)
#k verify it removes m from old home == self
move_after_this = m
# emit a summary message
summary_format = \
"Warning: ejected [N] nonpermitted member(s) of a " \
"%s of class %s" % \
(self.short_classname(), m.short_classname())
env.history.deferred_summary_message( redmsg(summary_format) )
else:
# keep m, but process it recursively
ejected_anything = m.is_group() and \
m._f_move_nonpermitted_members(**opts)
# note: if self cares about deeper (indirect) members,
# it would have to pass new opts to indicate this to
# lower levels. so far this is not needed.
# note: this already added the ejected nodes (if any)
# into self after m!
if ejected_anything:
if m._f_wants_to_be_killed(**opts):
m.kill()
summary_format = \
"Warning: killed [N] invalid object(s) of class %s" % \
(m.short_classname(), )
env.history.deferred_summary_message(
redmsg(summary_format) )
have_unscanned_members = True
# we might (or might not) improve the ordering of
# moved nodes by starting over here using 'break', but
# in some cases this might be much slower (quadratic
# time or worse), so don't do it
continue # next m
continue # while have_unscanned_members
return (move_after_this is not self) # whether anything was ejected
def permit_as_member(self, node, pre_updaters = True, **opts): # in Group
"""
[friend method for enforce_permitted_members_in_groups and subroutines]
Does self permit node as a direct member,
when called from enforce_permitted_members_in_groups with
the same options as we are passed?
@rtype: boolean
[overridden in some subclasses]
"""
return True
def _f_wants_to_be_killed(self, pre_updaters = True, **opts): # in Group
"""
[friend method for enforce_permitted_members_in_groups and subroutines]
Does self want to be killed due to members that got ejected
by _f_move_nonpermitted_members (or due to completely invalid structure
from before then, and no value in keeping self even temporarily)?
@rtype: boolean
[overridden in some subclasses]
"""
return False
# ==
def permit_addnode_inside(self): #bruce 080626 added this to Group API
"""
Can UI operations wanting to add new nodes to some convenient place
decide to add them inside this Group?
[should be overridden in some Group subclasses which look like leaf nodes
to the user when seen in the model tree]
"""
return True # for most Groups
# ==
def inherit_part(self, part): # Group method; bruce 050308
"""
Self (a Group) is inheriting part from its dad.
Set this part in self and all partless kids
(assuming those are all at the top of the nodetree under self).
[extends Node method]
"""
_superclass.inherit_part(self, part)
for m in self.members:
if m.part is None:
m.inherit_part(part)
return
def all_content_is_hidden(self): # Ninad 080129; revised by Bruce 080205
"""
[overrides Node.all_content_is_hidden]
Return True if *all* members of this group are hidden. Otherwise
return False.
@see: dna_model.DnaGroup.node_icon() for an example use.
"""
for memberNode in self.members:
if not memberNode.all_content_is_hidden():
return False
return True
def hide(self):
for ob in self.members:
ob.hide()
def unhide(self):
for ob in self.members:
ob.unhide()
def apply2all(self, fn):
"""
Apply fn to self and (as overridden here in Group) all its members.
It's safe for fn to modify self.members list (since we scan a copy),
but if members of not-yet-scanned nodes are modified, that will affect
what nodes are reached by our scan, since each nodes' members list is
copied only when we reach it. For example, if fn moves a node to a later
subtree, then the same apply2all scan will reach the same node again
in its new position.
[overrides Node implem]
"""
fn(self)
for ob in self.members[:]:
ob.apply2all(fn)
def apply_to_groups(self, fn):
"""
Like apply2all, but only applies fn to all Group nodes (at or under self).
@note: this *does* apply fn to leaf-like Groups such as DnaStrand,
and to any groups inside them (even though they are not
user-visible in the model tree).
[overrides Node implem]
"""
fn(self)
for ob in self.members[:]:
ob.apply_to_groups(fn)
def apply2picked(self, fn):
"""
Apply fn to the topmost picked nodes under (or equal to) self. That
is, scan the tree of self and its members (to all levels including
leaf nodes), applying fn to all picked nodes seen, but not scanning
into the members of picked nodes. Thus, for any node, fn is never
applied to both that node and any of its ancestors. For effect of fn
modifying a members list, see comments in apply2all docstring. [An
example of (i hope) a safe way of modifying it, as of 050121, is in
Group.ungroup.]
[overrides Node implem]
"""
if self.picked:
fn(self)
else:
for ob in self.members[:]:
ob.apply2picked(fn)
def hindmost(self): ###@@@ should rename
"""
[docstring is meant for both Node and Group methods taken together:]
Thinking of nodes as subtrees of the model tree, return the smallest
subtree of self which contains all picked nodes in this subtree, or None
if there are no picked nodes in this subtree. Note that the result does
not depend on the order of traversal of the members of a Group.
"""
if self.picked:
return self
node = None
for x in self.members:
h = x.hindmost()
if node and h:
return self
node = node or h
return node
def permits_ungrouping(self):
"""
Should the user interface permit users to dissolve this Group
using self.ungroup?
[Some subclasses should override this.]
"""
return True # yes, for normal groups.
def ungroup(self):
"""
If this Node is a Group, dissolve it, letting its members
join its dad, if this is possible and if it's permitted as a
user-requested operation. [bruce 050121 thinks this should be
split into whether this is permitted, and doing it whether or
not it's permitted; the present method is really a UI operation
rather than a structural primitive.]
[overrides Node.ungroup]
"""
#bruce 050121 revised: use permits_ungrouping;
# add kids in place of self within dad (rather than at end)
if self.dad and self.permits_ungrouping():
## if self.name == self.assy.name: return
## (that's now covered by permits_ungrouping)
for x in self.members[:]:
## x.moveto(self.dad)
## # todo: should probably put them before self in there
self.delmember(x)
self.addsibling(x, before = True)
# put them before self, to preserve order [bruce 050126]
self.kill()
# == Group copy methods [revised/added by bruce 050524-050526]
def will_copy_if_selected(self, sel, realCopy):
# wware 060329 added realCopy arg
if realCopy:
# [bruce 060329 comment on wware code:]
# This recursion is just to print warnings. It's safe for now,
# since this function is apparently not itself called recursively
# while copying Group members, but that might change, and if it
# does this will also need to change. It also appears to be
# incorrect, at least in some cases, e.g. a Measure Distance jig
# in a Group gets copied even if only one atom does (in spite of
# having printed this message), though the produced object gives a
# traceback when displayed. And the easiest fix for that might be
# for copying to do a recursive call of this, which is exactly
# what would make this method's own recursion unneeded and unsafe
# (it would become exponential in number of nested Groups, in
# runtime and number of redundant warnings).
for x in self.members:
x.will_copy_if_selected(sel, True)
return True
def copy_full_in_mapping(self, mapping): # Group method
"""
#doc; overrides Node method
"""
# todo: merge with copy_with_provided_copied_partial_contents
# (similar but not identical code and comments)
#bruce 050526, revised 080314, 081212
# Note: the subclasses of Group include
# DnaGroup, DnaStrand and DnaSegment (which are effectively new kinds
# of model objects), and PartGroup and ClipboardShelfGroup (which
# are needed in special places/roles in the MT to give them special
# behavior). The special-MT-place subclasses probably need to be copied
# as ordinary Groups, whereas the Dna-related classes need to be
# copied as instances of the same subclass. To support this distinction
# (new feature and likely bugfix), I'll introduce a method to return
# the class to use for making copies.
# [bruce 080314, comment revised 080331]
class_for_copies = self._class_for_copies(mapping)
new = class_for_copies(self.name, mapping.assy, None)
## probably not needed:
## self._copy_editCommand_to_copy_of_self_if_desirable(new)
self.copy_copyable_attrs_to(new)
# redundantly copies .name; also copies .open.
# (This might be wrong for some Group subclasses!
# Not an issue for now, but someday it might be better to use
# attrlist from target, or intersection of their attrlists...)
mapping.record_copy(self, new) # asserts it was not already copied
for mem in self.members:
memcopy = mem.copy_full_in_mapping(mapping)
# can be None, if mem refused to be copied
if memcopy is not None:
new.addchild(memcopy)
return new
def _class_for_copies(self, mapping): #bruce 080314
"""
[private; overridden in PartGroup and ClipboardShelfGroup]
Return the subclass of Group which should be used for making copies of self.
"""
# default implem, for subclasses meant for new model objects
return self.__class__
def copy_with_provided_copied_partial_contents( self, name, assy, dad, members):
"""
Imitate Group(name, assy, dad, members) but using the correct class
for copying self. (Arg signature is like that of Group.__init__
except that all args are required.)
@param dad: None, or a node we should make our new parent node.
@note: in current calls, members will be a partial copy of self.members,
possibly modified with wrapping groups, merged or dissolved
internal groups, partialness of copy at any level, etc.
@note: assy might not be self.assy, but will be the assy of all passed
members and dad.
"""
#bruce 080414
# todo: merge with copy_full_in_mapping
# (similar but not identical code and comments)
mapping = "KLUGE: we know all implems of _class_for_copies ignore this argument"
# this KLUGE needs cleanup after the release
class_for_copies = self._class_for_copies(mapping)
new = class_for_copies(name, assy, dad, members)
## probably not needed:
## self._copy_editCommand_to_copy_of_self_if_desirable(new)
self.copy_copyable_attrs_to(new)
# redundantly copies .name [messing up the one we just passed];
# also copies .open.
# (This might be wrong for some Group subclasses!
# Not an issue for now, but someday it might be better to use
# attrlist from target, or intersection of their attrlists...)
new.name = name # fix what copy_copyable_attrs_to might have messed up
return new
# ==
def kill(self): # in class Group
"""
[extends Node method]
"""
#bruce 050214: called Node.kill instead of inlining it; enhanced
# Node.kill; and fixed bug 381 by killing all members first.
self._f_prekill()
# note: this has to be done before killing the members,
# even though _superclass.kill might do it too [bruce 060327]
for m in self.members[:]:
m.kill()
_superclass.kill(self)
def _f_set_will_kill(self, val): #bruce 060327 in Group
"""
[private helper method for _f_prekill; see its docstring for details;
subclasses with owned objects should extend this]
[extends Node method]
"""
_superclass._f_set_will_kill( self, val)
for m in self.members:
m._f_set_will_kill( val)
return
def reset_subtree_part_assy(self): #bruce 051227
"""
[extends Node method]
"""
for m in self.members[:]:
m.reset_subtree_part_assy()
_superclass.reset_subtree_part_assy(self)
return
def is_ascendant(self, node):
"""
[overrides Node.is_ascendant, which is a very special case of the same
semantics]
Returns True iff self is an ascendant of node,
i.e. if the subtree of nodes headed by self contains node.
(node must be a Node or None (for None we return False);
thus it's legal to call this for node being any node's dad.)
"""
#e rename nodetree_contains? is_ancestor? (tho true of self too)
#e or just contains? (no, not obvious arg is a node)
while node is not None:
if node is self:
return True
node = node.dad
return False
def nodespicked(self):
"""
Return the number of nodes currently selected in this subtree.
[extends Node.nodespicked()]
Warning (about current implementation [050113]): scans the entire
tree... calling this on every node in the tree might be slow (every
node scanned as many times as it is deep in the tree).
"""
npick = _superclass.nodespicked(self)
# bruce 050126 bugfix: was 0 (as if this was called leavespicked)
for ob in self.members:
npick += ob.nodespicked()
return npick
def node_icon(self, display_prefs):
open = display_prefs.get('open', False)
if open:
return imagename_to_pixmap("modeltree/group-expanded.png")
else:
return imagename_to_pixmap("modeltree/group-collapsed.png")
def MT_kids(self, display_prefs = {}):
#bruce 050109; 080108 renamed from kids to MT_kids, revised semantics
"""
[Overrides Node.MT_kids(); is overridden in some subclasses]
Return the ordered list of our kids which should be displayed in a
model tree widget which is using (for this node itself) the given
display prefs.
(These might include the boolean pref 'open', default False, telling us
whether the tree widget plans to show our kids or not. But there is
no need to check for that, since the caller will only actually show
our MT_kids if self is openable and open. Note that some
implementations of self.openable() might check whether MT_kids
returns any kids or not, so ideally it should be fast.)
(Don't include inter-kid gaps for drag & drop explicitly as kids;
see another method for that. ###nim)
Subclasses can override this; this version is valid for any Group
whose .members don't need filtering or updating or augmenting (like
PartGroup does as of 050109).
@see: self.openable()
@see: Node_api.MT_kids docstring, which is more definitive than this one
@see: Group.MT_kids
"""
# [Note that it ought to be ok for subclasses to have a set of MT_kids
# which is not related to their .members, provided callers (tree
# widgets) never assume node.dad corresponds to the parent relation in
# their own tree of display items. I don't know how well the existing
# caller (modelTree.py) follows this so far. -- bruce 050113.
# Update, bruce 080306 -- maybe as of a change today, it does --
# we'll see.]
return self.members #bruce 081217 removed list() wrapper
def openable(self):
"""
whether tree widgets should permit the user to open/close
their view of this node, and show children when it's open
[overrides Node method]
"""
# if we decide this depends on the tree widget or on something about
# it, we'll have to pass in some args... don't do that unless/until we
# need to.
return True
def edit(self):
"""
[this is overridden in some subclasses of Group]
@see: DnaGroup.edit() for an example (overrides this method)
"""
cntl = GroupProp(self) # Normal group prop
cntl.exec_()
self.assy.mt.mt_update()
def getProps(self): # probably by Ninad
"""
Get specific properties of this Group (if it is editable).
Overridden in subclasses.
Default implementation returns an empty tuple.
@see: DnaSegment.getProps() for an example.
"""
return ()
def setProps(self, props): # probably by Ninad
"""
Set certain properties (set vals for some attrs of this group).
Overridden in subclasses.
Default implementation does nothing.
@see: self.getProps()
@see: DnaSegment.setProps() for an example.
"""
return
def dumptree(self, depth = 0): # just for debugging
print depth * "...", self.name
for x in self.members:
if x.dad is not self:
print "bad thread:", x, self, x.dad
x.dumptree(depth + 1)
return
def draw(self, glpane, dispdef): #bruce 050615, 071026 revised this
if self.hidden:
#k does this ever happen? This state might only be stored
# on the kids... [bruce 050615 question]
return
## self._draw_begin(glpane, dispdef)
try:
for ob in self.members: ## [:]:
ob.draw(glpane, dispdef) #see also self.draw_after_highlighting
# Check: Do they actually use dispdef? I know some of them
# sometimes circumvent it (i.e. look directly at outermost one).
# Todo: I might like to get them to honor it, and generalize
# dispdef into "drawing preferences". Or it might be easier for
# drawing prefs to be separately pushed and popped in the glpane
# itself... we have to worry about things which are drawn before
# or after main drawing loop -- they might need to figure out
# their dispdef (and coords) specially, or store them during first
# pass (like renderpass.py egcode does when it stores modelview
# matrix for transparent objects). [bruce 050615 comments]
except:
msg = "exception in drawing some Group member; skipping to end"
print_compact_traceback(msg + ": ")
## self._draw_end(glpane, dispdef)
return
def draw_after_highlighting(self, glpane, dispdef, pickCheckOnly = False):
"""
Things to draw after highlighting. See superclass method for more info.
@see: self.draw()
@see: Node.draw_after_highlighting() which this overrides
"""
anythingDrawn = False
if not self.hidden:
for member in self.members:
anythingDrawn_by_member = member.draw_after_highlighting(
glpane,
dispdef,
pickCheckOnly = pickCheckOnly )
if anythingDrawn_by_member:
anythingDrawn = True
return anythingDrawn
## def _draw_begin(self, glpane, dispdef): #bruce 050615
## """
## Subclasses can override this to change how their child nodes are drawn.
## """
## pass
##
## def _draw_end(self, glpane, dispdef): #bruce 050615
## """
## Subclasses which override _draw_begin should also override _draw_end to
## undo whatever changes were made by _draw_begin (preferably by popping
## stacks, rather than by doing inverse transformations, which only work
## if nothing was messed up by child nodes or exceptions from them, and
## which might be subject to numerical errors).
## """
## pass
def getstatistics(self, stats):
"""
add group to part stats
"""
stats.ngroups += 1
for ob in self.members:
ob.getstatistics(stats)
def writemmp(self, mapping): #bruce 080115 use classifications
encoded_classifications = self._encoded_classifications()
mapping.write( "group (%s)%s\n" % (
mapping.encode_name( self.name),
encoded_classifications and
(" " + encoded_classifications) or ""
))
# someday: we might optimize by skipping info opengroup open if it has
# the default value, but it's hard to find out what that is reliably
# for the various special cases. It's not yet known if it will be
# meaningful for all subclasses, so we write it for all of them for
# now. [bruce 080115 comment, revised 080331]
mapping.write("info opengroup open = %s\n" %
(self.open and "True" or "False"))
# All "info opengroup" records should be written before we write
# any of our members. If Group subclasses override this method
# (and don't call it), they'll need to behave similarly.
self.writemmp_other_info_opengroup(mapping) #bruce 080507 refactoring
# [bruce 050422: this is where we'd write out "jigs moved forward" if
# they should come at start of this group...]
for xx in mapping.pop_forwarded_nodes_after_opengroup(self):
mapping.write_forwarded_node_for_real(xx)
for x in self.members:
x.writemmp(mapping)
# [bruce 050422: ... and this is where we'd write them, to put them
# after some member leaf or group.]
for xx in mapping.pop_forwarded_nodes_after_child(x):
mapping.write_forwarded_node_for_real(xx)
mapping.write("egroup (" + mapping.encode_name(self.name) + ")\n")
def _encoded_classifications(self): #bruce 080115
"""
[should not need to be overridden; instead,
subclasses should assign a more specific value of
_mmp_group_classifications]
"""
assert type(self._mmp_group_classifications) == type(())
# especially, not a string
classifications = list( self._mmp_group_classifications)
if self._extra_classifications:
classifications.extend( self._extra_classifications)
return " ".join(classifications)
def writemmp_other_info_opengroup(self, mapping): #bruce 080507 refactoring
"""
[subclasses which want to write more kinds of "info opengroup" records
should override this to do so.]
"""
del mapping
return
def writepov(self, f, dispdef):
if self.hidden:
return
for x in self.members:
x.writepov(f, dispdef)
def writemdl(self, alist, f, dispdef):
if self.hidden:
return
for x in self.members:
x.writemdl(alist, f, dispdef)
def __str__(self):
# (review: is this ever user-visible, e.g. in history messages?)
return "<group " + self.name +">"
def move(self, offset): # in Group [bruce 070501 added this to Node API]
"""
[overrides Node.move]
"""
for m in self.members:
m.move(offset)
return
def pickatoms(self): # in Group [bruce 070501 added this to Node API]
"""
[overrides Node method]
"""
npicked = 0
for m in self.members:
npicked += m.pickatoms()
return npicked
pass # end of class Group
# end
| NanoCAD-master | cad/src/foundation/Group.py |
# Copyright 2008 Nanorex, Inc. See LICENSE file for details.
"""
NodeWithAtomContents.py -- abstract class for Node subclasses which
can contain Atoms.
@author: Bruce
@version: $Id$
@copyright: 2008 Nanorex, Inc. See LICENSE file for details.
History:
Bruce 080305 added some abstract classes between Node and Group
in the inheritance hierarchy, of which is this one.
Bruce 080306 added atom content access & maintenance methods.
"""
from foundation.Utility import NodeWith3DContents
_superclass = NodeWith3DContents
class NodeWithAtomContents(NodeWith3DContents):
# REVIEW: which methods can safely assert that subclass must implement?
"""
Abstract class for Node subclasses which can contain Atoms.
Notable subclasses include Chunk and Group.
"""
def pickatoms(self):
"""
[overrides Node method; subclasses must override this method]
"""
pass ### assert 0, "subclass must implement"
def contains_atom(self, atom):
"""
[overrides Node method; subclasses must override this method]
"""
assert 0, "subclass must implement"
# == atom content access & maintenance methods [bruce 080306]
# initial values of instance variables
# atom content bits, minimum possible value and maximum possible value
#
# (When the min and max bits at the same position differ, the corresponding
# atom content bit is not known, and will be recomputed as needed, and
# stored in both instance variables. The initial values correspond to all
# content bits being uncertain.)
_min_atom_content = 0 # instance variable value is always >= 0
_max_atom_content = -1 # instance variable value might be < 0 or not
# access
def get_atom_content(self, flags = -1):
"""
[overrides Node method]
"""
min_content = self._min_atom_content & flags
max_content = self._max_atom_content & flags
## print "get_atom_content(%r) needs %#x, sees %#x/%#x" % \
## (self, flags, self._min_atom_content, self._max_atom_content )
if min_content != max_content:
min_content = self._f_updated_atom_content() & flags
# note: we update all of its bits, regardless of flags --
# this might be inefficient in some cases by descending
# into subnodes we don't care about, but on the whole
# it seems likely to be faster, since this method
# might be called for several disjoint flags in succession
## print "get_atom_content(%r) returns %#x" % (self, min_content)
return min_content
# recomputation
def _f_updated_atom_content(self):
"""
Recompute, record, and return our atom content,
optimizing this if it's exactly known on self or on any node-subtrees.
[Overrides Node method. Subclasses whose kids are not exactly
self.members must override or extend this further.]
"""
min_content = self._min_atom_content
if min_content == self._max_atom_content:
return min_content # assume these attrs are always correct
atom_content = self._ac_recompute_atom_content()
assert atom_content >= 0, \
"illegal atom content %#x computed in %r._ac_recompute_atom_content()" % \
( atom_content, self )
self._min_atom_content = atom_content
self._max_atom_content = atom_content
## print "_f_updated_atom_content(%r) returns %#x" % (self, atom_content)
return atom_content
def _ac_recompute_atom_content(self):
"""
[All subclasses must override this method.]
"""
# NOTE: this would be named _recompute_atom_content,
# except for a conflict with Chunk superclass InvalMixin
# which reserves all names starting with _recompute_ for use
# with its special rules. This should somehow be fixed or
# worked around, e.g., use a less generic prefix in InvalMixin
# or at least provide an "out" like defining
# _inputs_for_xxx = _NOT_FOR_INVALMIXIN before the _recompute_
# method. (There's a case like that in class Atom as well,
# but it seems to cause no harm.) Right now there is no time
# for that given that renaming this method will also work. # CLEAN UP
# [bruce 080306]
assert 0, "subclass must implement"
# invalidation
def invalidate_atom_content(self, flags = -1):
"""
Your kids are changing internally and/or being removed/added
more than the caller wants to keep track of, so just make
all the given content flags uncertain (and do necessary
propogated invalidations, including to the model tree).
"""
# this should be optimized, but the following is correct
# and exercises the other methods. (And for repeated invals
# with the same flags, those calls will not recurse, and a
# simple check here of the need to call them could prevent
# them both [###todo].)
self.maybe_remove_some_atom_content(flags)
self.maybe_add_some_atom_content(flags)
return
def _undo_update(self): #bruce 080310
self.invalidate_atom_content()
_superclass._undo_update(self)
return
# incremental update
def remove_some_atom_content(self, flags):
"""
One of your kids is, or might be (it makes no difference which one
in the "remove" case), removing some "atom content" of the given type
(due to changes in itself, or being removed as your kid).
Record and propogate the change, doing mt_update if required.
@see: add_some_atom_content, maybe_add_some_atom_content
[unlikely to be overridden in subclasses; if it is,
override its alias maybe_remove_some_atom_content too]
"""
# possible optimization (only worth the unclarity if this is ever
# noticeable on a profile): just iterate up the dad chain, i.e.
# inline the recursive calls of this method on self.dad.
# much of this would then be doable in Pyrex.
new = old = self._min_atom_content
assert new >= 0
new &= (~flags)
assert new >= 0
## if (old, new) != (0, 0):
## print "removed %#x from %#x to get %#x" % (flags, old, new)
removed = old - new # often 0, so we optimize for that
# note: subtraction makes sense for boolean flag words in this case
# (though not in general) since new is a subset of old
assert removed >= 0
if removed:
self._min_atom_content = new
dad = self.dad # usually present, optim for that
if dad:
# note: no atom content is contributed
# directly by self -- it all comes from Atoms
# and those are not Nodes.
dad.remove_some_atom_content(removed)
### TODO: mt_update, if we are currently shown in the model tree, in a way this should change
return
maybe_remove_some_atom_content = remove_some_atom_content
def maybe_add_some_atom_content(self, flags):
"""
One of your kids *might be* adding some "atom content" of the given type
(due to changes in itself, or being added as your kid).
Record and propogate the change, doing mt_update if required.
@see: add_some_atom_content, remove_some_atom_content
[unlikely to be overridden in subclasses]
"""
# note: see possible optimization comment in remove_some_atom_content
new = old = self._max_atom_content
new |= flags
## if (old, new) != (-1, -1):
## print "added %#x to %#x to get %#x" % (flags, old, new)
added = new - old # often 0, so we optimize for that
# note: subtraction makes sense for boolean flag words in this case
# (though not in general) since new is a superset of old
if added:
self._max_atom_content = new
dad = self.dad # usually present, optim for that
if dad:
# note: no atom content is contributed
# directly by self -- it all comes from Atoms
# and those are not Nodes.
dad.maybe_add_some_atom_content(added)
#bruce 080311 fix bug 2657: add -> maybe_add
### TODO: mt_update, if needed
return
def add_some_atom_content(self, flags):
"""
One of your kids *is* adding some "atom content" of the given type
(due to changes in itself, or being added as your kid).
Record and propogate the change, doing mt_update if required.
@note: unlike maybe_add_some_atom_content, in this case we can add bits
to self._min_atom_content as well as self._max_atom_content.
@see: maybe_add_some_atom_content, remove_some_atom_content
[unlikely to be overridden in subclasses]
"""
# note: see possible optimization comment in remove_some_atom_content
assert flags >= 0
new_max = old_max = self._max_atom_content
new_max |= flags
## if (old_max, new_max) != (-1, -1):
## print "max: added %#x to %#x to get %#x" % (flags, old_max, new_max)
added_max = new_max - old_max # often 0, so we optimize for that
new_min = old_min = self._min_atom_content
new_min |= flags
assert new_min >= 0
## if (old_min, new_min) != (-1, -1):
## print "min: added %#x to %#x to get %#x" % (flags, old_min, new_min)
added_min = new_min - old_min # often 0, so we optimize for that
if added_max or added_min:
self._max_atom_content = new_max
self._min_atom_content = new_min
dad = self.dad # usually present, optim for that
if dad:
dad.add_some_atom_content(added_max | added_min)
### TODO: mt_update, if needed
return
pass # end of class NodeWithAtomContents
# end
| NanoCAD-master | cad/src/foundation/NodeWithAtomContents.py |
# Copyright 2004-2009 Nanorex, Inc. See LICENSE file for details.
"""
inval.py -- simple invalidation/update system for attributes within an object
@author: Bruce
@version: $Id$
@copyright: 2004-2009 Nanorex, Inc. See LICENSE file for details.
bruce 050513 replaced some == with 'is' and != with 'is not', to avoid __getattr__
on __xxx__ attrs in python objects.
"""
from utilities.debug import print_compact_traceback
debug_counter = 0 # uncomment the related code (far below) to find out what's calling our __getattr__ [bruce 050513]
# For now, we only support formulas whose set of inputs is constant,
# in the sense that evaluating the formula always accesses all the inputs.
# (If this rule is violated, we won't detect that but it can cause bugs.)
# For usage instructions, see the example use in chunk.py.
# Later we should add documentation for how to use this in general.
# But I couldn't resist adding just a bit of that now, so here's a sketch...
# assuming you already fully understand the general inval/update pattern,
# which is not explained here.
# If it's not explained here, where is it explained?
# Basically, a formula for attr1 is a _recompute_attr1 method in a client object,
# with a class attribute _inputs_for_attr1 listing the names of other invalidatable attrs
# (in the same instance) used by this formula. The formula must always use all of
# the attrs in that list or bugs can result (described sketchily in comments
# in chunk.py). Whenever any of those "input attrs" is invalidated (and not already invalid),
# attr1 will be invalidated too.
# The formula can also use outside info, provided that attr1
# will be manually invalidated (by external code) whenever that outside info might have changed.
# Invalidation means: declaring (at runtime) that the currently assigned value
# (of some attr in some instance) might be wrong, and should therefore no longer be used,
# but should be recomputed when (or before) it's next needed.
# Invalidation in this system is represented by deleting the invalid attribute.
# (Terminology note:
# "Invalid" means an attr *should* be deleted; "invalidate" means *actually* deleting it.
# It's possible for an attribute to be invalid, but not be invalidated,
# if some external code changes it and doesn't invalidate it. That leads to bugs.)
# ... more info needed here (or, better, in an external doc.html file)
# about inval methods, updating, etc...
class inval_map:
"""
Record info, for a given class, about which attributes
(in a hypothetical instance) will need to be invalidated
when a given one changes; it's ok if this contains cycles
(I think), but this has never been tested.
"""
def __init__(self):
self.affected_by = {} # a public attribute
def record_output_depends_on_inputs(self, output, inputs):
"""
Record the fact that output (an attr name)
always depends on each input in inputs (a list of attr names),
and that output and each input are invalidatable attributes.
(To just record that attr is an invalidatable attribute,
call this with inputs == [].)
"""
# first make sure they are all recorded as invalidatable attrs
for attr in [output] + inputs:
self.affected_by.setdefault(attr, [])
# now record that each input affects output
for input in inputs:
lis = self.affected_by[input]
if not output in lis:
lis.append(output)
return
pass # end of class inval_map
# ==
def remove_prefix_func(prefix):
"""
return a function which assumes its arg starts with prefix, and removes it
"""
ll = len(prefix)
def func(str):
## assert str.startswith(prefix)
return str[ll:]
return func
def filter_and_remove_prefix( lis, prefix):
return map( remove_prefix_func( prefix), filter( lambda attr: attr.startswith( prefix), lis ) )
invalmap_per_class = {}
def make_invalmap_for(obj):
"""
make and return a fresh inval_map for an object (or a class)
"""
# check if all _recompute_xxx have _input_for_xxx defined!
imap = inval_map()
inputsfor_attrs = filter_and_remove_prefix( dir(obj), "_inputs_for_" )
recompute_attrs = filter_and_remove_prefix( dir(obj), "_recompute_" )
for attr in inputsfor_attrs:
assert attr in recompute_attrs, "_inputs_for_%s should be defined only when _recompute_%s is defined" % (attr, attr)
recompute_attrs.remove(attr)
inputs = getattr(obj, "_inputs_for_" + attr)
output = attr
assert type(inputs) is type([])
imap.record_output_depends_on_inputs( output, inputs)
assert not recompute_attrs, \
"some _recompute_ attrs lack _inputs_for_ attrs: %r" % recompute_attrs
return imap
# ==
class InvalMixin:
"""
Mixin class for supporting a simple invalidation/update scheme
for certain attributes of each instance of the main class you use it with.
Provides __getattr__ and a few other methods. Supports special
attributes and methods in the main class whose names
start with prefixes _inputs_for_, _get_, _recompute_.
"""
# Recomputation methods. Certain attributes, whose values should always
# equal the result of formulas which depend on other attributes (of the
# same or different objects), are not always explicitly defined,
# but instead are recomputed as needed by the following methods, which
# should only be called by __getattr__ (which will save their results
# for reuse until they become incorrect, as signalled by the
# "invalidation methods" defined elsewhere).
# [Sometime I should write a separate documentation file containing a
# more complete explanation. #e]
#
# The recomputation method _recompute_xxx should compute the currently
# correct value for the attribute self.xxx, and either return it, or store
# it in self.xxx (and return None), as it prefers. (If the correct value
# is None, then to avoid ambiguity the recomputation method must store it.
# If it doesn't, some assertions might fail.)
#
# Exceptions raised by these
# methods are errors, and result in a printed warning and self.xxx = None
# (but self.xxx will be considered valid, in the hope that this will
# delay the next call to the buggy recompute method).
#
# A recomputation method _recompute_xxx can also set the values of other
# invalidatable attributes (besides xxx) which it happens to compute at
# the same time as xxx, to avoid the need to redundantly compute them later;
# but if it does that, it must call self.validate_attr on the names of those
# attributes, or later invalidations of them might not be done properly.
# (Actually, for now, that method is a noop except for checking assertions,
# and nothing will detect the failure to call it when it should be called.)
#
# self.validate_attr should never be called except when the attr was known
# to be invalid, and was then set to the correct value (e.g. in a
# recomputation method). This differs from self.changed_attr, which is
# generally called outside of recomputation methods by code which sets
# something to its new correct value regardless of whether it was previously
# invalidated. Changed_attr has to invalidate whatever depends on attr, but
# validate_attr doesn't.
#
# (We might or might not teach __getattr__ to detect the bug of not calling
# validate_attr; if we do and it's efficient, the requirement of calling it
# explicitly could be
# removed. Maybe scanning self.__dict__ before and after will be ok. #e)
#
# The set of invalidatable attributes needed by _recompute_xxx is determined
# by .... or can be specified by... ###@@@
# If the correct value of self.xxx depends on anything else, then any code
# that changes those other things needs to either declare ... or call ... ###@@@.
def __getattr__(self, attr): # in class InvalMixin; doesn't inherit _eq_id_mixin_ -- should it? ##e [060209]
"""
Called to compute certain attrs which have not been recomputed since
the other attrs they depend on were initialized or changed. Code which
might change the value that these attrs should have (i.e. which might make
them "invalid") is required to "invalidate them" (i.e. to declare them
invalid, at runtime) by......
"""
if attr.startswith('_'): # e.g. __repr__, __add__, etc -- be fast
## #bruce 050513 debug code to see why this is called so many times (1.7M times for load/draw 4k atom part)
## global debug_counter
## debug_counter -= 1
## if debug_counter < 0:
## debug_counter = 38653
## print_compact_stack("a random _xxx call of this, for %r of %r: " % (attr, self))
raise AttributeError, attr
## global debug_counter
## debug_counter -= 1
## if debug_counter < 0:
## debug_counter = 38653
## print_compact_stack("a random non-_xxx call of this, for %r of %r: " % (attr, self))
return getattr_helper(self, attr)
# invalidation methods for client objects to call manually
# and/or for us to call automatically:
def validate_attr(self, attr):
# in the initial implem, just storing the attr's value is sufficient.
# let's just make sure it was in fact stored.
assert self.__dict__.has_key(attr), "validate_attr finds no attr %r was saved, in %r" % (attr, self)
#e if it was not stored, we could also, instead, print a warning and store None here.
pass
def validate_attrs(self, attrs):
map( self.validate_attr, attrs)
def invalidate_attrs(self, attrs, **kws):
"""
invalidate each attribute named in the given list of attribute names
"""
if not kws:
# optim:
map( self.invalidate_attr, attrs)
else:
map( lambda attr: self.invalidate_attr(attr, **kws), attrs)
def invalidate_attr(self, attr, skip = ()):
"""
Invalidate the attribute with the given name.
This requires also invalidating any attribute registered as depending on this one,
but in doing that we won't invalidate the ones named in the optional list 'skip',
or any which depend on attr only via the ones in 'skip'.
"""
#e will we need to support special case invalidation methods for certain
# attrs, like molecule.havelist?
if attr in skip:
return
try:
delattr(self, attr)
except AttributeError:
# already invalid -- we're done
return
# it was not already invalid; we have to invalidate its dependents too
self.changed_attr(attr, skip = skip)
return
def changed_attr(self, attr, **kws):
for dep in self.__imap[attr]:
self.invalidate_attr(dep, **kws)
return
def changed_attrs(self, attrs):
"""
You (the caller) are reporting that you changed all the given attrs;
so we will validate these attrs and invalidate all their dependees,
but when invalidating each one's dependees, we'll
skip inval of *all* the attrs you say you directly changed,
since we presume you changed them all to correct values.
For example, if a affects b, b affects c, and you tell us you
changed a and b, we'll end up invalling c but not b.
Thus, this is not the same as calling changed_attr on each one --
that would do too much invalidation.
"""
self.validate_attrs(attrs)
for attr in attrs:
self.changed_attr( attr, skip = attrs )
# init method:
def init_InvalMixin(self): # used to be called 'init_invalidation_map'
"""
call this in __init__ of each instance of each client class
"""
# Set self.__inval_map. We assume the value depends only on the class,
# so we only compute it the first time we see this class.
key = id( self.__class__)
imap = invalmap_per_class.get( key)
if not imap:
imap = make_invalmap_for( self.__class__)
invalmap_per_class[key] = imap
self.__imap = imap.affected_by
return
# debug methods (invalidatable_attrs is also used by some Undo update methods (not just for debugging) as of 060224)
def invalidatable_attrs(self):
res = self.__imap.keys()
res.sort() #bruce 060224
return res
def invalidatableQ(self, attr):
return attr in self.__imap #bruce 060224 revised this
def invalidQ(self, attr):
assert self.invalidatableQ(attr)
return not self.__dict__.has_key(attr)
def validQ(self, attr):
assert self.invalidatableQ(attr)
return self.__dict__.has_key(attr)
def invalid_attrs(self):
return filter( self.invalidQ, self.invalidatable_attrs() )
def valid_attrs(self):
return filter( self.validQ, self.invalidatable_attrs() )
pass # end of class InvalMixin
def getattr_helper(self, attr):
"""
[private helper function]
self is an InvalMixin instance (but this is a function, not a method)
"""
# assume caller has handled attrs starting with '_'.
# be fast in this function, it's called often.
# simplest first: look for a get method
# (look in self.__class__, for speed; note that we get an unbound method then)
ubmeth = getattr(self.__class__, "_get_" + attr, None)
if ubmeth:
# _get_ method is not part of the inval system -- just do it
return ubmeth(self) # pass self since unbound method
# then look for a recompute method
ubmeth = getattr(self.__class__, "_recompute_" + attr, None)
if not ubmeth:
raise AttributeError, attr #bruce 060228 making this more conservative in case it's not always so rare
## # rare enough to raise a nicer exception than our own __getattr__ does
## ###e this should use a safe_repr function for self [bruce 051011 comment]
## raise AttributeError, "%s has no %r: %r" % (self.__class__.__name__, attr, self)
try:
val = ubmeth(self)
except:
print_compact_traceback("exception (ignored, storing None) in _recompute_%s method for %r: " % (attr,self) )
val = None
setattr(self, attr, val) # store it ourselves
# now either val is not None, and we need to store it ourselves
# (and we'll be loose and not warn if it was already stored --
# I don't know if asserting it was not stored could be correct in theory);
# or it's None and correct value should have been stored (and might also be None).
if val is not None:
setattr(self, attr, val) # store it ourselves
else:
val = self.__dict__.get(attr)# so we can return it from __getattr__
if val is None and self.__dict__.get(attr,1):
# (test is to see if get result depends on default value '1')
# error: val was neither returned nor stored; always raise exception
# (but store it ourselves to discourage recursion if exception ignored)
setattr(self, attr, val)
msg = "bug: _recompute_%s returned None, and did not store any value" % attr
print msg
assert val is not None, msg # not sure if this will be seen
## self.validate_attr(attr) # noop except for asserts, so removed for now
return val
# ==
# test code
class testclass(InvalMixin):
def __init__(self):
self.init_InvalMixin()
_inputs_for_c = ['a','b']
def _recompute_c(self):
return self.a + self.b
_inputs_for_d = ['c']
def _recompute_d(self):
return 100 + self.c
_inputs_for_e = ['c']
def _recompute_e(self):
return 1000 + self.c
def testab(a,b,tobj):
tobj.a = a
tobj.b = b
tobj.invalidate_attr('c')
assert tobj.e == 1000 + a + b
tobj.invalidate_attr('c')
assert tobj.e == 1000 + a + b
if __name__ == '__main__':
# if you need to import this from somewhere else for the test, use this code,
# removed to avoid this warning:
# .../cad/src/inval.py:0: SyntaxWarning: name
# 'print_compact_traceback' is assigned to before global declaration
## global print_compact_traceback
## for mod in sys.modules.values(): # import it from somewhere...
## try:
## print_compact_traceback = mod.print_compact_traceback
## break
## except AttributeError:
## pass
tobj = testclass()
testab(1,2,tobj)
testab(3,4,tobj)
tobj.a = 17
tobj.c = 23 # might be inconsistent with the rule, that's ok
print "about to tobj.changed_attrs(['a','c'])"
tobj.changed_attrs(['a','c']) # should inval d,e but not c or b (even tho a affects c); how do we find out?
print "this should inval d,e but not c or b (even tho a affects c); see what is now invalid:"
print tobj.invalid_attrs()
print "now confirm that unlike the rule, c != a + b; they are c,b,a = %r,%r,%r" % (tobj.c,tobj.b,tobj.a)
print "and fyi here are d and e: %r and %r" % (tobj.d,tobj.e)
print "and here are c,b,a again (should be unchanged): %r,%r,%r" % (tobj.c,tobj.b,tobj.a)
# this looks correct, need to put outputs into asserts, above:
"""
about to tobj.changed_attrs(['a','c'])
this should inval d,e but not c or b (even tho a affects c); see what is now invalid:
['e', 'd']
now confirm that unlike the rule, c != a + b; they are c,b,a = 23,4,17
and fyi here are d and e: 123 and 1023
and here are c,b,a again (should be unchanged): 23,4,17
"""
# end
| NanoCAD-master | cad/src/foundation/inval.py |
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details.
"""
undo_manager.py - own and manage an UndoArchive, feeding it info about
user-command events (such as when to make checkpoints and how to describe
the diffs generated by user commands), and package the undo/redo ops it offers
into a reasonable form for supporting a UI.
@author: Bruce
@version: $Id$
@copyright: 2005-2008 Nanorex, Inc. See LICENSE file for details.
[060117 -- for current status see undo_archive.py module docstring]
"""
from utilities.debug import register_debug_menu_command_maker
from utilities.debug import print_compact_traceback, print_compact_stack
from utilities import debug_flags
from platform_dependent.PlatformDependent import is_macintosh
from foundation.undo_archive import AssyUndoArchive
import foundation.undo_archive as undo_archive # for debug_undo2;
# could move that to a debug flags module; not urgent
from utilities.constants import noop
from utilities.prefs_constants import undoAutomaticCheckpoints_prefs_key
import foundation.env as env
from utilities.Log import greenmsg, redmsg ##, orangemsg
import time
class UndoManager:
"""
[abstract class]
[060117 addendum: this docstring is mostly obsolete or nim]
Own and manage an undo-archive, in such a way as to provide undo/redo operations
and a current-undo-point within the archive [addendum 060117: undo point might be in model objects or archive, not here,
and since objects can't be in more than one archive or have more than one cur state (at present), this doesn't matter much]
on top of state-holding objects which would otherwise not have these undo/redo ops
(though they must have the ability to track changes and/or support scanning of their state).
Assume that no other UndoManager or UndoArchive is tracking the same state-holding objects
(i.e. we own them for undo-related purposes).
#e future: Perhaps also delegate all command-method-calls to the state-holding objects...
but for now, tolerate external code directly calling command methods on our state-holding objects,
just receiving begin/end calls related to those commands and their subroutines or calling event-handlers,
checkpoint calls from same, and undo/redo command callbacks.
"""
pass
try:
_last_autocp # don't change it when we reload this module
except NameError:
_last_autocp = True # used only for history messages
class AssyUndoManager(UndoManager):
"""
An UndoManager specialized for handling the state held by an assy (an instance of class assembly).
"""
active = True #060223 changed this to True, since False mainly means it died, not that it's being initialized [060223]
_undo_manager_initialized = False #060223
def __init__(self, assy, menus = ()): # called from assy.__init__
"""
Do what can be done early in assy.__init__; caller must also (subsequently) call init1
and either _initial_checkpoint or (preferred) clear_undo_stack.
@type assy: assembly.assembly
@warning: callers concerned with performance should heed the warning in
the docstring of clear_undo_stack about when to first call it.
"""
# assy owns the state whose changes we'll be managing...
# [semiobs cmt:] should it have same undo-interface as eg chunks do??
self._current_main_menu_ops = {}
self.assy = assy
self.menus = menus
return
def init1(self): #e might be merged into end of __init__
"""
Do what we might do in __init__ except that it might be too early during assy.__init__ then (see also _initial_checkpoint)
"""
assy = self.assy
self.archive = AssyUndoArchive(assy)
## assy._u_archive = self.archive ####@@@@ still safe in 060117 stub code?? [guess 060223: not needed anymore ###@@@]
# [obs??] this is how model objects in assy find something to report changes to (typically in their __init__ methods);
# we do it here (not in caller) since its name and value are private to our API for model objects to report changes
## self.archive.subscribe_to_checkpoints( self.remake_UI_menuitems )
## self.remake_UI_menuitems() # so it runs for initial checkpoint and disables menu items, etc
if is_macintosh():
win = assy.w
from PyQt4.Qt import Qt
win.editRedoAction.setShortcut(Qt.CTRL+Qt.SHIFT+Qt.Key_Z) # set up incorrectly (for Mac) as "Ctrl+Y"
# note: long before 060414 this is probably no longer needed
# (since now done in gui.WhatsThisText_for_MainWindow.py),
# but it's safe and can be left in as a backup.
# exercise the debug-only old pref (deprecated to use it):
self.auto_checkpoint_pref() # exercise this, so it shows up in the debug-prefs submenu right away
# (fixes bug in which the pref didn't show up until the first undoable change was made) [060125]
# now look at the official pref for initial state of autocheckpointing [060314]
## done later -- set_initial_AutoCheckpointing_enabled( ... )
return
def _initial_checkpoint(self): #bruce 060223; not much happens until this is called (order is __init__, init1, _initial_checkpoint)
"""
Only called from self.clear_undo_stack().
"""
set_initial_AutoCheckpointing_enabled( True )
# might have to be True for initial_checkpoint; do no UI effects or history msg; kluge that the flag is a global [060314]
self.archive.initial_checkpoint()
## self.connect_or_disconnect_menu_signals(True)
self.remake_UI_menuitems() # try to fix bug 1387 [060126]
self.active = True # redundant
env.command_segment_subscribers.append( self._in_event_loop_changed )
self._undo_manager_initialized = True
## redundant call (bug); i hope this is the right one to remove: self.archive.initial_checkpoint()
# make sure the UI reflects the current pref for auto-checkpointing [060314]
# (in practice this happens at startup and after File->Open);
# only emit history message if it's different than it was last time this session,
# or different than True the first time
global _last_autocp
autocp = env.prefs[undoAutomaticCheckpoints_prefs_key]
update_UI = True
print_to_history = (_last_autocp != autocp)
_last_autocp = -1 # if there's an exception, then *always* print it next time around
set_initial_AutoCheckpointing_enabled( autocp, update_UI = update_UI, print_to_history = print_to_history)
_last_autocp = autocp # only print it if different, next time
return
def deinit(self):
self.active = False
## self.connect_or_disconnect_menu_signals(False)
# and effectively destroy self... [060126 precaution; not thought through]
self.archive.destroy()
self._current_main_menu_ops = {}
self.assy = self.menus = None
#e more??
return
# this is useless, since we have to keep them always up to date for sake of accel keys and toolbuttons [060126]
## def connect_or_disconnect_menu_signals(self, connectQ): # this is a noop as of 060126
## win = self.assy.w
## if connectQ:
## method = win.connect
## else:
## method = win.disconnect
## for menu in self.menus:
## method( menu, SIGNAL("aboutToShow()"), self.remake_UI_menuitems ) ####k
## pass
## return
def clear_undo_stack(self, *args, **kws): #bruce 080229 revised docstring
"""
Intialize self if necessary, and make an initial checkpoint,
discarding whatever undo archive data is recorded before that (if any).
This can be used by our client to complete our initialization
and define the earliest state which an Undo can get back to.
(It is the preferred way for external code to do that.)
And, it can be used later to redefine that point, making all earlier
states inaccessible (as a user op for reducing RAM consumption).
@note: calling this several times in the same user op is allowed,
and leaves the state the same as if this had only been
called the last of those times.
@warning: the first time this is called, it scans and copies all
currently reachable undoable state *twice*. All subsequent
times, it does this only once. This means it should be
called as soon as the client assy is fully initialized
(when it is almost empty of undoable state), even if it
will always be called again soon thereafter, after
some initial (potentially large) data has been added to
the assy. Otherwise, that second call will be the one
which scans its state twice, and will take twice as long
as necessary.
"""
# note: this is now callable from a debug menu / other command,
# as of 060301 (experimental)
if not self._undo_manager_initialized:
self._initial_checkpoint() # have to do this here, not in archive.clear_undo_stack
return self.archive.clear_undo_stack(*args, **kws)
def menu_cmd_checkpoint(self): # no longer callable from UI as of 060301, and not recently reviewed for safety [060301 comment]
self.checkpoint( cptype = 'user_explicit' )
def make_manual_checkpoint(self): #060312
"""
#doc; called from editMakeCheckpoint, presumably only when autocheckpointing is disabled
"""
self.checkpoint( cptype = 'manual', merge_with_future = False )
# temporary comment 060312: this might be enough, once it sets up for remake_UI_menuitems
return
__begin_retval = None ###k this will be used when we're created by a cmd like file open... i guess grabbing pref then is best...
def _in_event_loop_changed(self, beginflag, infodict, tracker): # 060127; 060321 added infodict to API
"[this bound method will be added to env.command_segment_subscribers so as to be told when ..."
# infodict is info about the nature of the stack change, passed from the tracker [bruce 060321 for bug 1440 et al]
# this makes "report all checkpoints" useless -- too many null ones.
# maybe i should make it only report if state changes or cmdname passed...
if not self.active:
self.__begin_retval = False #k probably doesn't matter
return True # unsubscribe
# print beginflag, len(tracker.stack) # typical: True 1; False 0
if 1:
#bruce 060321 for bug 1440: we need to not do checkpoints in some cases. Not sure if this is correct re __begin_retval;
# if not, either clean it up for that or pass the flag into the checkpoint routine to have it not really do the checkpoint
# (which might turn out better for other reasons anyway, like tracking proper cmdnames for changes). ##e
pushed = infodict.get('pushed')
popped = infodict.get('popped')
# zero or one of these exists, and is the op_run just pushed or popped from the stack
if pushed is not None:
typeflag = pushed.typeflag # entering this guy
elif popped is not None:
typeflag = popped.typeflag # leaving this guy (entering vs leaving doesn't matter for now)
else:
typeflag = '' # does this ever happen? (probably not)
want_cp = (typeflag != 'beginrec')
if not want_cp:
if 0 and env.debug():
print "debug: skipping cp as we enter or leave recursive event processing"
return # this might be problematic, see above comment [tho it seems to work for now, for Minimize All anyway];
# if it ever is, then instead of returning here, we'll pass want_cp to checkpoint routines below
if beginflag:
self.__begin_retval = self.undo_checkpoint_before_command()
###e grab cmdname guess from top op_run i.e. from begin_op? yes for debugging; doesn't matter in the end though.
else:
if self.__begin_retval is None:
# print "self.__begin_retval is None" # not a bug, will be normal ... happens with file open (as expected)
self.__begin_retval = self.auto_checkpoint_pref()
self.undo_checkpoint_after_command( self.__begin_retval )
self.__begin_retval = False # should not matter
return
def checkpoint(self, *args, **kws):
# Note, as of 060127 this is called *much* more frequently than before (for every signal->slot to a python slot);
# we will need to optimize it when state hasn't changed. ###@@@
global _AutoCheckpointing_enabled, _disable_checkpoints
res = None
if not _disable_checkpoints: ###e are there any exceptions to this, like for initial cps?? (for open file in extrude)
opts = dict(merge_with_future = not _AutoCheckpointing_enabled)
# i.e., when not auto-checkpointing and when caller doesn't override,
# we'll ask archive.checkpoint to (efficiently) merge changes so far with upcoming changes
# (but to still cause real changes to trash redo stack, and to still record enough info
# to allow us to properly remake_UI_menuitems)
opts.update(kws) # we'll pass it differently from the manual checkpoint maker... ##e
res = self.archive.checkpoint( *args, **opts )
self.remake_UI_menuitems() # needed here for toolbuttons and accel keys; not called for initial cp during self.archive init
# (though for menu items themselves, the aboutToShow signal would be sufficient)
return res # maybe no retval, this is just a precaution
def auto_checkpoint_pref(self): ##e should remove calls to this, inline them as True
return True # this is obsolete -- it's not the same as the checkmark item now in the edit menu! [bruce 060309]
## return debug_pref('undo: auto-checkpointing? (slow)', Choice_boolean_True, #bruce 060302 changed default to True, added ':'
## prefs_key = 'A7/undo/auto-checkpointing',
## non_debug = True)
def undo_checkpoint_before_command(self, cmdname = ""):
"""
###doc
[returns a value which should be passed to undo_checkpoint_after_command;
we make no guarantees at all about what type of value that is, whether it's boolean true, etc]
"""
#e should this be renamed begin_cmd_checkpoint() or begin_command_checkpoint() like I sometimes think it's called?
# recheck the pref every time
auto_checkpointing = self.auto_checkpoint_pref() # (this is obs, only True is supported, as of long before 060323)
if not auto_checkpointing:
return False
# (everything before this point must be kept fast)
cmdname2 = cmdname or "command"
if undo_archive.debug_undo2:
env.history.message("debug_undo2: begin_cmd_checkpoint for %r" % (cmdname2,))
# this will get fancier, use cmdname, worry about being fast when no diffs, merging ops, redundant calls in one cmd, etc:
self.checkpoint( cptype = 'begin_cmd', cmdname_for_debug = cmdname )
if cmdname:
self.archive.current_command_info(cmdname = cmdname) #060126
return True # this code should be passed to the matching undo_checkpoint_after_command (#e could make it fancier)
def undo_checkpoint_after_command(self, begin_retval):
assert begin_retval in [False, True], "begin_retval should not be %r" % (begin_retval,)
if begin_retval:
# this means [as of 060123] that debug pref for undo checkpointing is enabled
if undo_archive.debug_undo2:
env.history.message(" debug_undo2: end_cmd_checkpoint")
# this will get fancier, use cmdname, worry about being fast when no diffs, merging ops, redundant calls in one cmd, etc:
self.checkpoint( cptype = 'end_cmd' )
pass
return
# ==
def node_departing_assy(self, node, assy): #bruce 060315;
# revised 060330 to make it almost a noop, since implem was obsolete and it caused bug 1797
#bruce 080219 making this a debug print only, since it happens with dna updater
# (probably a bug) but exception may be causing further bugs; also adding a message.
# Now I have a theory about the bug's cause: if this happens in a closed assy,
# deinit has set self.assy to None. To repeat, open and close a dna file with dna updater
# off, then turn dna updater on. Now this should cause the "bug (harmless?)" print below.
#bruce 080314 update: that does happen, so that print is useless and verbose,
# so disable it for now. Retain the other ones.
if assy is None or node is None:
print "\n*** BUG: node_departing_assy(%r, %r, %r) sees assy or node is None" % \
(self, node, assy)
return
if self.assy is None:
# this will happen for now when the conditions that caused today's bug reoccur,
# until we fix the dna updater to never run inside a closed assy (desirable)
# [bruce 080219]
if 0: #bruce 080314
print "\nbug (harmless?): node_departing_assy(%r, %r, %r), but " \
"self.assy is None (happens when self's file is closed)" % \
(self, node, assy)
return
if not (assy is self.assy):
print "\n*** BUG: " \
"node_departing_assy(%r, %r, %r) sees wrong self.assy = %r" % \
(self, node, assy, self.assy)
# assy is self.assy has to be true (given that neither is None),
# since we were accessed as assy.undo_manager.
return
# ==
def current_command_info(self, *args, **kws):
self.archive.current_command_info(*args, **kws)
def undo_redo_ops(self):
# copied code below [dup code is in undo_manager_older.py, not in cvs]
# the following value for warn_when_change_indicators_seem_wrong is a kluge
# (wrong in principle but probably safe, not entirely sure it's correct) [060309]
# (note, same value was hardcoded inside that method before bruce 071025;
# see comment there about when I see the warnings; it's known that it gives
# false warnings if we pass True when _AutoCheckpointing_enabled is false):
ops = self.archive.find_undoredos(
warn_when_change_indicators_seem_wrong = _AutoCheckpointing_enabled )
# state_version - now held inside UndoArchive.last_cp (might be wrong) ###@@@
# [what the heck does that comment mean? bruce 071025 Q]
undos = []
redos = []
d1 = {'Undo':undos, 'Redo':redos}
for op in ops:
optype = op.optype()
d1[optype].append(op) # sort ops by type
## done in the subr: redos = filter( lambda redo: not redo.destroyed, redos) #060309 since destroyed ones are not yet unstored
# remove obsolete redo ops
if redos:
lis = [ (redo.cps[1].cp_counter, redo) for redo in redos ]
lis.sort()
only_redo = lis[-1][1]
redos = [only_redo]
for obs_redo in lis[:-1]:
if undo_archive.debug_undo2 or env.debug():
#060309 adding 'or env.debug()' since this should never happen once clear_redo_stack() is implemented in archive
print "obsolete redo:", obs_redo
pass #e discard it permanently? ####@@@@
return undos, redos
def undo_cmds_menuspec(self, widget):
# WARNING: this is not being maintained, it's just a development draft.
# So far it lacks merging and history message and perhaps win_update and update_select_mode. [060227 comment]
"""
Return a menu_spec for including undo-related commands in a popup menu
(to be shown in the given widget, tho i don't know why the widget could matter)
"""
del widget
archive = self.archive
# copied code below [dup code is in undo_manager_older.py, not in cvs]
res = []
#bruce 060301 removing this one, since it hasn't been reviewed in awhile so it might cause bugs,
# and maybe it did cause one...
## res.append(( 'undo checkpoint (in RAM only)', self.menu_cmd_checkpoint ))
#060301 try this one instead:
res.append(( 'clear undo stack (experimental)', self.clear_undo_stack ))
undos, redos = self.undo_redo_ops()
###e sort each list by some sort of time order (maybe of most recent use of the op in either direction??), and limit lengths
# there are at most one per chunk per undoable attr... so for this test, show them all, don't bother with submenus
if not undos:
res.append(( "Nothing we can Undo", noop, 'disabled' ))
###e should figure out whether "Can't Undo XXX" or "Nothing to Undo" is more correct
for op in undos + redos:
# for now, we're not even including them unless as far as we know we can do them, so no role for "Can't Undo" unless none
arch = archive # it's on purpose that op itself has no ref to model, so we have to pass it [obs cmt?]
cmd = lambda _guard1_ = None, _guard2_ = None, arch = arch: arch.do_op(op) #k guards needed? (does qt pass args to menu cmds?)
## text = "%s %s" % (op.type, op.what())
text = op.menu_desc()
res.append(( text , cmd ))
if not redos:
res.append(( "Nothing we can Redo", noop, 'disabled' ))
return res
def remake_UI_menuitems(self): #e this should also be called again if any undo-related preferences change ###@@@
#e see also: void QPopupMenu::aboutToShow () [signal], for how to know when to run this (when Edit menu is about to show);
# to find the menu, no easy way (only way: monitor QAction::addedTo in a custom QAction subclass - not worth the trouble),
# so just hardcode it as edit menu for now. We'll need to connect & disconnect this when created/finished,
# and get passed the menu (or list of them) from the caller, which is I guess assy.__init__.
if undo_archive.debug_undo2:
print "debug_undo2: running remake_UI_menuitems (could be direct call or signal)"
global _disable_UndoRedo
disable_reasons = list(_disable_UndoRedo) # avoid bugs if it changes while this function runs (might never happen)
if disable_reasons:
undos, redos = [], [] # note: more code notices the same condition, below
else:
undos, redos = self.undo_redo_ops()
win = self.assy.w
undo_mitem = win.editUndoAction
redo_mitem = win.editRedoAction
for ops, action, optype in [(undos, undo_mitem, 'Undo'), (redos, redo_mitem, 'Redo')]: #e or could grab op.optype()?
extra = ""
if disable_reasons:
try:
why_not = str(disable_reasons[0][1]) # kluges: depends on list format, and its whymsgs being designed for this use
except:
why_not = ""
extra += " (not permitted %s)" % why_not # why_not is e.g. "during drag" (nim) or "during Extrude"
if undo_archive.debug_undo2:
extra += " (%s)" % str(time.time()) # show when it's updated in the menu text (remove when works) ####@@@@
if ops:
action.setEnabled(True)
if not ( len(ops) == 1): #e there should always be just one for now
#060212 changed to debug msg, since this assert failed (due to process_events?? undoing esp image delete)
print_compact_stack("bug: more than one %s op found: " % optype)
op = ops[0]
op = self.wrap_op_with_merging_flags(op) #060127
text = op.menu_desc() + extra #060126
action.setText(text)
fix_tooltip(action, text) # replace description, leave (accelkeys) alone (they contain unicode chars on Mac)
self._current_main_menu_ops[optype] = op #e should store it into menu item if we can, I suppose
op.you_have_been_offered()
# make sure it doesn't change its mind about being a visible undoable op, even if it gets merged_with_future
# (from lack of autocp) and turns out to contain no net changes
# [bruce 060326 re bug 1733; probably only needed for Undo, not Redo]
else:
action.setEnabled(False)
## action.setText("Can't %s" % optype) # someday we might have to say "can't undo Cmdxxx" for certain cmds
## action.setText("Nothing to %s" % optype)
text = "%s%s" % (optype, extra)
action.setText(text) # for 061117 commit, look like it used to look, for the time being
fix_tooltip(action, text)
self._current_main_menu_ops[optype] = None
pass
#bruce 060319 for bug 1421
win.editUndoAction.setWhatsThis( win.editUndoText )
win.editRedoAction.setWhatsThis( win.editRedoText )
from foundation.whatsthis_utilities import refix_whatsthis_text_and_links
## if 0:
## # this works, but is overkill and is probably too slow, and prints huge numbers of console messages, like this:
## ## TypeError: invalid result type from MyWhatsThis.text()
## # (I bet I could fix the messages by modifying MyWhatsThis.text() to return "" (guess))
## from foundation.whatsthis_utilities import fix_whatsthis_text_and_links
## fix_whatsthis_text_and_links( win)
## if 0:
## # this prints no console messages, but doesn't work! (for whatsthis on tool buttons or menu items)
## # guess [much later]: it fails to actually do anything to these actions!
## from foundation.whatsthis_utilities import fix_whatsthis_text_and_links
## fix_whatsthis_text_and_links( win.editUndoAction )
## fix_whatsthis_text_and_links( win.editRedoAction )
## # try menu objects? and toolbars?
refix_whatsthis_text_and_links( ) ###@@@ predict: will fix toolbuttons but not menu items
#060304 also disable/enable Clear Undo Stack
action = win.editClearUndoStackAction
text = "Clear Undo Stack" + '...' # workaround missing '...' (remove this when the .ui file is fixed)
#e future: add an estimate of RAM to be cleared
action.setText(text)
fix_tooltip(action, text)
enable_it = not not (undos or redos)
action.setEnabled( enable_it )
return
#
# the kinds of things we can set on one of those actions include:
#
# self.setViewFitToWindowAction.setText(QtGui.QApplication.translate(self.__class__.__name__, "Fit to Window"))
# self.setViewFitToWindowAction.setText(QtGui.QApplication.translate(self.__class__.__name__, "&Fit to Window"))
# self.setViewFitToWindowAction.setToolTip(QtGui.QApplication.translate(self.__class__.__name__, "Fit to Window (Ctrl+F)"))
# self.setViewFitToWindowAction.setShortcut(QtGui.QApplication.translate(self.__class__.__name__, "Ctrl+F"))
# self.viewRightAction.setStatusTip(QtGui.QApplication.translate(self.__class__.__name__, "Right View"))
# self.helpMouseControlsAction.setWhatsThis(QtGui.QApplication.translate(self.__class__.__name__, "Displays help for mouse controls"))
def wrap_op_with_merging_flags(self, op, flags = None): #e will also accept merging-flag or -pref arguments
"""
Return a higher-level op based on the given op, but with the appropriate diff-merging flags wrapped around it.
Applying this higher-level op will (in general) apply op, then apply more diffs which should be merged with it
according to those merging flags (though in an optimized way, e.g. first collect and merge the LL diffs, then apply
all at once). The higher-level op might also have a different menu_desc, etc.
In principle, caller could pass flag args, and call us more than once with different flag args for the same op;
in making the wrapped op we don't modify the passed op.
"""
#e first we supply our own defaults for flags
return self.archive.wrap_op_with_merging_flags(op, flags = flags)
# main menu items (their slots in MWsemantics forward to assy which forwards to here)
def editUndo(self):
## env.history.message(orangemsg("Undo: (prototype)"))
self.do_main_menu_op('Undo')
def editRedo(self):
## env.history.message(orangemsg("Redo: (prototype)"))
self.do_main_menu_op('Redo')
def do_main_menu_op(self, optype):
"""
@note: optype should be Undo or Redo
"""
op_was_available = not not self._current_main_menu_ops.get(optype)
global _disable_UndoRedo
if _disable_UndoRedo: #060414
env.history.message(redmsg("%s is not permitted now (and this action was only offered due to a bug)" % optype))
return
global _AutoCheckpointing_enabled
disabled = not _AutoCheckpointing_enabled #060312
if disabled:
_AutoCheckpointing_enabled = True # temporarily enable it, just during the Undo or Redo command
self.checkpoint( cptype = "preUndo" ) # do a checkpoint with it enabled, so Undo or Redo can work normally.
# Note: in theory this might change what commands are offered and maybe even cause the error message below to come out
# (so we might want to revise it when disabled is true ##e), but I think that can only happen if there's a change_counter
# bug, since the only way the enabled cp will see changes not seen by disabled one is if archive.update_before_checkpoint()
# is first to set the change_counters (probably a bug); if this happens it might make Redo suddenly unavailable.
####e if optype is Redo, we could pass an option to above checkpoint to not destroy redo stack or make it inaccessible!
# (such an option is nim)
try:
op = self._current_main_menu_ops.get(optype)
if op:
undo_xxx = op.menu_desc() # note: menu_desc includes history sernos
env.history.message(u"%s" % undo_xxx) #e say Undoing rather than Undo in case more msgs?? ######@@@@@@ TEST u"%s"
self.archive.do_op(op)
self.assy.w.update_select_mode() #bruce 060227 try to fix bug 1576
self.assy.w.win_update() #bruce 060227 not positive this isn't called elsewhere, or how we got away without it if not
else:
if not disabled:
print "no op to %r; not sure how this slot was called, since it should have been disabled" % optype
env.history.message(redmsg("Nothing to %s (and it's a bug that its menu item or tool button was enabled)" % optype))
else:
print "no op to %r; autocp disabled (so ops to offer were recomputed just now; before that, op_was_available = %r); "\
"see code comments for more info" % ( optype, op_was_available)
if op_was_available:
env.history.message(redmsg("Nothing to %s (possibly due to a bug)" % optype))
else:
env.history.message(redmsg("Nothing to %s (and this action was only offered due to a bug)" % optype))
pass
except:
print_compact_traceback()
env.history.message(redmsg("Bug in %s; see traceback in console" % optype))
if disabled:
# better get the end-cp done now (since we might be relying on it for some reason -- I'm not sure)
self.checkpoint( cptype = "postUndo" )
_AutoCheckpointing_enabled = False # re-disable
return
pass # end of class AssyUndoManager
# ==
#e refile
def fix_tooltip(qaction, text): #060126
"""
Assuming qaction's tooltip looks like "command name (accel keys)" and might contain unicode in accel keys
(as often happens on Mac due to symbols for Shift and Command modifier keys),
replace command name with text, leave accel keys unchanged (saving result into actual tooltip).
OR if the tooltip doesn't end with ')', just replace the entire thing with text, plus a space if text ends with ')'
(to avoid a bug the next time -- not sure if that kluge will work).
"""
whole = unicode(qaction.toolTip()) # str() on this might have an exception
try:
#060304 improve the alg to permit parens in text to remain; assume last '( ' is the one before the accel keys;
# also permit no accel keys
if whole[-1] == ')':
# has accel keys (reasonable assumption, not unbreakably certain)
sep = u' ('
parts = whole.split(sep)
parts = [text, parts[-1]]
whole = sep.join(parts)
else:
# has no accel keys
whole = text
if whole[-1] == ')':
whole = whole + ' ' # kluge, explained in docstring
pass
# print "formed tooltip",`whole` # printing whole might have an exception, but printing `whole` is ok
qaction.setToolTip(whole) # no need for __tr, I think?
except:
print_compact_traceback("exception in fix_tooltip(%r, %r): " % (qaction, text) )
return
# == debugging code - invoke undo/redo from debug menu (only) in initial test implem
def undo_cmds_maker(widget):
###e maybe this belongs in assy module itself?? clue: it knows the name of assy.undo_manager; otoh, should work from various widgets
"""
[widget is the widget in which the debug menu is being put up right now]
"""
#e in theory we use that widget's undo-chain... but in real life this won't even happen inside the debug menu, so nevermind.
# for now just always use the assy's undo-chain.
# hmm, how do we find the assy? well, ok, i'll use the widget.
try:
assy = widget.win.assy
except:
if debug_flags.atom_debug:
return [('atom_debug: no undo in this widget', noop, 'disabled')]
return []
## if 'kluge' and not hasattr(assy, 'undo_manager'):
## assy.undo_manager = UndoManager(assy) #e needs review; might just be a devel kluge, or might be good if arg type is unciv
mgr = assy.undo_manager #k should it be an attr like this, or a sep func?
return mgr.undo_cmds_menuspec(widget)
register_debug_menu_command_maker( "undo_cmds", undo_cmds_maker)
# fyi: this runs once when the first assy is being created, but undo_cmds_maker runs every time the debug menu is put up.
# ==
# some global private state (which probably ought to be undo manager instance vars)
try:
_editAutoCheckpointing_recursing
except:
_editAutoCheckpointing_recursing = False
# only if we're not reloading -- otherwise, bug when setChecked calls MWsem slot which reloads
else:
if _editAutoCheckpointing_recursing and env.debug():
pass # print "note: _editAutoCheckpointing_recursing true during reload" # this happens!
try:
_AutoCheckpointing_enabled # on reload, use old value unchanged (since we often reload automatically during debugging)
except:
_AutoCheckpointing_enabled = True # this might be changed based on env.prefs whenever an undo_manager gets created [060314]
# older comment about that, not fully obs:
#e this might be revised to look at env.prefs sometime during app startup,
# and to call editAutoCheckpointing (or some part of it) with the proper initial state;
# the current code is designed, internally, for checkpointing to be enabled except
# for certain intervals, so we might start out True and set this to False when
# an undo_manager is created... we'll see; maybe it won't even (or mainly or only) be a global? [060309]
def _set_AutoCheckpointing_enabled( enabled):
global _AutoCheckpointing_enabled
_AutoCheckpointing_enabled = not not enabled
return
def set_initial_AutoCheckpointing_enabled( enabled, update_UI = False, print_to_history = False ):
"""
set autocheckpointing (perhaps for internal use),
doing UI updates only if asked, emitting history only if asked
"""
if update_UI:
win = env.mainwindow()
else:
# kluge: win is not needed in this case,
# and I'm not sure it's not too early
win = None
editAutoCheckpointing(win, enabled, update_UI = update_UI, print_to_history = print_to_history)
# we have the same API as this method except for the option default values
return
def editAutoCheckpointing(win, enabled, update_UI = True, print_to_history = True):
"""
This is called from MWsemantics.editAutoCheckpointing, which is documented as
"Slot for enabling/disabling automatic checkpointing." It sets
_AutoCheckpointing_enabled and optionally updates the UI and prints history.
It's also called by undo_manager initialization code, so its UI effects
are optional, but that's private -- all such use should go through
set_initial_AutoCheckpointing_enabled. Another private fact: win is
only used if update_UI is true.
"""
# Note: this would be in undo_UI except for its call from this file.
# Probably the two global flags it affects should really be instance
# variables in the undo manager object. If that's done, then maybe it
# could move back to undo_UI if it could find the undo manager via win.
# For that reason I might (later) put a version of it there which just
# delegates to this one. [bruce 071026 comment]
#
# Note: the reason this doesn't need to call something in assy.undo_manager
# (when used to implement the user change of the checkmark menu item for
# this flag) is that it's called within a slot in the mainwindow,
# which is itself wrapped by a begin_checkpoint and end_checkpoint,
# one or the other of which will act as a real checkpoint, unaffected by
# this flag. [bruce 060312 comment]
global _editAutoCheckpointing_recursing # TODO: make this a um instance variable??
if _editAutoCheckpointing_recursing:
# note: this happens! see comment where we set it below, for why.
## print "debug: _editAutoCheckpointing_recursing, returning as noop"
return
_set_AutoCheckpointing_enabled( enabled) # TODO: make this a um instance variable??
if print_to_history:
if enabled:
msg_short = "Autocheckpointing enabled"
msg_long = "Autocheckpointing enabled -- each operation will be undoable"
else:
msg_short = "Autocheckpointing disabled"
msg_long = "Autocheckpointing disabled -- only explicit Undo checkpoints are kept" #k length ok?
env.history.statusbar_msg( msg_long)
env.history.message( greenmsg(msg_short))
if update_UI:
# Inserting/removing editMakeCheckpointAction from the standardToolBar
# keeps the toolbar the correct length (i.e. no empty space at the end).
# BTW, it is ok to call removeAction() even when the action doesn't live
# in the toolbar. Mark 2008-03-01
win.standardToolBar.removeAction(win.editMakeCheckpointAction)
if not enabled:
win.standardToolBar.insertAction(win.editUndoAction,
win.editMakeCheckpointAction)
# This is needed to hide/show editMakeCheckpointAction in the "Edit" menu.
win.editMakeCheckpointAction.setVisible(not enabled)
# this is only needed when the preference changed, not when the menu item slot is used:
_editAutoCheckpointing_recursing = True
try:
win.editAutoCheckpointingAction.setChecked( enabled )
# warning: this recurses, via slot in MWsemantics [060314]
finally:
_editAutoCheckpointing_recursing = False
return
# ==
# API for temporarily disabling undo checkpoints and/or Undo/Redo commands [bruce 060414, to help mitigate bug 1625 et al]
try:
# Whether to disable Undo checkpoints temporarily, due the mode, or (nim) being inside a drag, etc
_disable_checkpoints # on reload, use old value unchanged
except:
_disable_checkpoints = [] # list of (code,string) pairs, corresponding to reasons we're disabled, most recent last
# (usually only the first one will be shown to user)
try:
# Whether to disable offering of Undo and Redo commands temporarily
_disable_UndoRedo
except:
_disable_UndoRedo = []
def disable_undo_checkpoints(whycode, whymsg = ""):
"""
Disable all undo checkpoints from now until a corresponding reenable call (with the same whycode) is made.
Intended for temporary internal uses, or for use during specific modes or brief UI actions (eg drags).
WARNING: if nothing reenables them, they will remain disabled for the rest of the session.
"""
global _disable_checkpoints
_disable_checkpoints = _do_whycode_disable( _disable_checkpoints, whycode, whymsg)
return
def disable_UndoRedo(whycode, whymsg = ""):
"""
Disable the Undo/Redo user commands from now until a corresponding reenable call (with the same whycode) is made.
Intended for temporary internal uses, or for use during specific modes or brief UI actions (eg drags).
WARNING: if nothing reenables them, they will remain disabled for the rest of the session.
"""
global _disable_UndoRedo
_disable_UndoRedo = _do_whycode_disable( _disable_UndoRedo, whycode, whymsg)
#e make note of need to update UI? I doubt we need this or have a good place to see the note.
#e ideally this would be part of some uniform scheme to let things subscribe to changes to that list.
return
def reenable_undo_checkpoints(whycode):
global _disable_checkpoints
_disable_checkpoints = _do_whycode_reenable( _disable_checkpoints, whycode)
def reenable_UndoRedo(whycode):
global _disable_UndoRedo
_disable_UndoRedo = _do_whycode_reenable( _disable_UndoRedo, whycode)
def _do_whycode_disable( reasons_list_val, whycode, whymsg):
"""
[private helper function for maintaining whycode,whymsg lists]
"""
res = filter( lambda (code, msg): code != whycode , reasons_list_val ) # zap items with same whycode
if len(res) < len(reasons_list_val) and env.debug():
print_compact_stack("debug fyi: redundant call of _do_whycode_disable, whycode %r msg %r, preserved reasons %r" % \
( whycode, whymsg, res ) )
res.append( (whycode, whymsg) ) # put the changed one at the end (#k good??)
return res
def _do_whycode_reenable( reasons_list_val, whycode):
"""
[private helper function for maintaining whycode,whymsg lists]
"""
res = filter( lambda (code, msg): code != whycode , reasons_list_val ) # zap items with same whycode
if len(res) == len(reasons_list_val) and env.debug():
print_compact_stack("debug fyi: redundant call of _do_whycode_reenable, whycode %r, remaining reasons %r" % \
( whycode, res ) )
return res
# ==
# TODO: probably make these assy methods. And if it's true
# that there's similar code elsewhere, merge it into them first.
# [bruce 071025 comment]
def external_begin_cmd_checkpoint(assy, cmdname = "command"): #bruce 060324
"""
Call this with the assy you're modifying, or None.
Pass whatever it returns to external_end_cmd_checkpoint later.
"""
# As of 060328 we use other code similar to these funcs in both GLPane.py and TreeWidget.py...
# worry: those (on mouse press/release) might interleave with these cmenu versions, depending on details of popup menus!
# But the worry is unfounded: If a click puts up the menu, no interleaving; if mouse stays down until command is done,
# then the outer press/release wraps (properly) the inner cmenu cps, unless outer release is missing (absorbed by popup menu
# as it apparently is), and then, releases's end-cp never occurs but cmenu's was enough. This might affect cmdname, but
# checkpointing should be fine. Note, it doesn't do begin_op / end_op, which could be an issue someday. ###@@@
if assy is not None:
begin_retval = assy.undo_checkpoint_before_command(cmdname)
return True, begin_retval # don't bother to include assy -- it doesn't really matter if it corresponds
return False, None
def external_end_cmd_checkpoint(assy, begin_retval):
if begin_retval is None:
# for convenience of callers worried about e.g. "release without press"
return
flag, begin_retval = begin_retval
if assy is not None:
# seems best to do this even if flag is False... but begin_retval is required to be True or False...
# which means this had a bug for a few days (passing None as begin_retval then) and was apparently
# not happening with flag false (or a debug print would have complained about the None)...
# so I should eliminate begin_retval someday, but for now, I need to fix that bug somehow. How about "or False".
# warning: this is a poorly-considered kluge. ###@@@ [bruce 060328]
assy.undo_checkpoint_after_command(begin_retval or False)
return
# ==
def wrap_callable_for_undo(func, cmdname = "menu command"):
#bruce 060324; moved from widgets.py to undo_manager.py, bruce 080203
"""
Wrap a callable object "func" so that begin and end undo checkpoints are
performed for it; be sure the returned object can safely be called at any
time in the future (even if various things have changed in the meantime).
@warning: If a reference needs to be kept to the returned object, that's
the caller's responsibility.
"""
# We use 3 guards in case PyQt passes up to 3 unwanted args to menu
# callables, and we don't trust Python to preserve func and cmdname except
# in the lambda default values. (Both of these precautions are generally
# needed or you can have bugs, when returning lambdas out of the defining
# scope of local variables they reference.)
res = (lambda _g1_ = None, _g2_ = None, _g3_ = None,
func = func, cmdname = cmdname:
_do_callable_for_undo(func, cmdname) )
return res
def _do_callable_for_undo(func, cmdname): #bruce 060324
"""
[private helper for wrap_callable_for_undo]
"""
assy = env.mainwindow().assy
# this needs review once we support multiple open files;
# caller might have to pass it in
begin_retval = external_begin_cmd_checkpoint(assy, cmdname = cmdname)
try:
res = func() # note: I don't know whether res matters to Qt
except:
print_compact_traceback("exception in menu command %r ignored: " % cmdname)
# REVIEW this message -- is func always a menu command?
res = None
assy = env.mainwindow().assy # (since it might have changed! (in theory))
external_end_cmd_checkpoint(assy, begin_retval)
return res
# end
| NanoCAD-master | cad/src/foundation/undo_manager.py |
# Copyright 2007 Nanorex, Inc. See LICENSE file for details.
"""
Assembly_API.py -- API for class assembly, also useful for isinstance tests
Note: the API is not filled in yet, but the isinstance usage is
relied on to remove some import cycles.
@author: bruce
@version: $Id$
@copyright: 2007 Nanorex, Inc. See LICENSE file for details.
"""
class Assembly_API:
# TODO: add the methods that are assumed by clients of any assembly
pass
| NanoCAD-master | cad/src/foundation/Assembly_API.py |
# Copyright 2007 Nanorex, Inc. See LICENSE file for details.
"""
test_connectWithState_constants.py -- constants for use in
test_connectWithState command and its PM.
This file can be merged into the command class, once it's possible
for the PM code to get these values directly from the staterefs
(an intended improvement).
$Id$
History:
070830 bruce split this out of test_command_PMs.py
"""
# definitions for cylinder height and caps style, stored as preferences values
#
# (Note: in a realistic example, these would be defined using the State macro,
# just as is done for the other state, below. The only reason they're defined
# as preference values here is to illustrate how that kind of state can be used
# interchangeably with State-macro-defined instance variables by connectWithState.)
CYLINDER_HEIGHT_PREFS_KEY = "a9.2 scratch/test_connectWithState_PM/cylinder height"
CYLINDER_HEIGHT_DEFAULT_VALUE = 7.5
CYLINDER_ROUND_CAPS_PREFS_KEY = "a9.2 scratch/test_connectWithState_PM/cylinder round_caps"
CYLINDER_ROUND_CAPS_DEFAULT_VALUE = True
def cylinder_round_caps():
import foundation.env as env
return env.prefs.get( CYLINDER_ROUND_CAPS_PREFS_KEY, CYLINDER_ROUND_CAPS_DEFAULT_VALUE)
# The state for cylinder width, cylinder color [nim], and cylinder orientation
# is defined using the State macro in the command object (not in this file).
# It could be referenced by the PM class in this file by:
# - self.command.cylinderWidth
# - self.command.cylinderColor (nim)
# - self.command.cylinderVertical (###NIM)
# but in fact is referenced indirectly using string literals for those attr names.
CYLINDER_VERTICAL_DEFAULT_VALUE = True
CYLINDER_WIDTH_DEFAULT_VALUE = 2.0
### REVISE: the default value should come from the stateref, when using the State macro
# end
| NanoCAD-master | cad/src/prototype/test_connectWithState_constants.py |
# Copyright 2007-2009 Nanorex, Inc. See LICENSE file for details.
"""
example_expr_command.py -- example of how to use an interactive graphics
expr in a command (unfinished, so partly scratch code); command and PM
are each variants of ExampleCommand1's command and PM classes
@author: Bruce
@version: $Id$
@copyright: 2007-2009 Nanorex, Inc. See LICENSE file for details.
History:
070830 bruce split this out of test_commands.py and test_command_PMs.py,
in which it was called ExampleCommand2E
"""
# == PM class
from prototype.test_command_PMs import ExampleCommand1_PM
from PM.PM_LineEdit import PM_LineEdit
from PM.PM_GroupBox import PM_GroupBox
_superclass_PM = ExampleCommand1_PM
class ExampleCommand2E_PM( ExampleCommand1_PM ):
# NOTE: used to use ExampleCommand2_PM which uses GBC; UNTESTED with this superclass [bruce 080910]
"""
Property Manager for Example Command 2E
"""
title = "Example Command 2E"
prefix = "Thing2E" # for names created by GBC
def _addGroupBoxes(self):
"""
Add group boxes to this Property Manager.
"""
_superclass_PM._addGroupBoxes(self)
self.pmGroupBox2 = PM_GroupBox( self, title = "group box 2" )
self._loadGroupBox2(self.pmGroupBox2)
return
def _loadGroupBox2(self, groupbox):
"""
Load widgets into group box 2.
"""
self.someLineEdit = \
PM_LineEdit( groupbox,
label = "Text:",
text = "initial text (pm)",
setAsDefault = True,
spanWidth = False )
### TODO: self.someLineEdit.connectWithState( ... some text state ...)
# and then connect the TextState text to the same state
# (or just use that state? no, it needs an address outside that complicated expr)
return
pass
# == command class
# these imports are not needed in a minimal example like ExampleCommand1:
from graphics.drawing.CS_draw_primitives import drawline
from utilities.constants import red
from geometry.VQT import V
from exprs.instance_helpers import get_glpane_InstanceHolder
from exprs.draggable import DraggablyBoxed
from exprs.instance_helpers import InstanceMacro
from exprs.attr_decl_macros import State
from exprs.TextRect import TextRect
class TextState(InstanceMacro): # rename?
text = State(str, "initial text", doc = "text")
_value = TextRect(text) # need size?
pass
from prototype.test_commands import ExampleCommand1 # NOTE: was ExampleCommand2, this revision UNTESTED [bruce 080910]
##from commands.SelectAtoms.SelectAtoms_Command import SelectAtoms_Command # used indirectly via ExampleCommand1
from commands.SelectAtoms.SelectAtoms_GraphicsMode import SelectAtoms_GraphicsMode
##class ExampleCommand2E_GM( ExampleCommand1.GraphicsMode_class): #bruce 071014 split out _GM class; works, except highlighting
class ExampleCommand2E_GM(SelectAtoms_GraphicsMode):
def Draw_other(self):
"""
Do some custom drawing (in the model's abs coordsys),
as well as whatever the superclass does.
"""
#print "start ExampleCommand2E Draw_other"
super(ExampleCommand2E_GM, self).Draw_other()
drawline(red, V(1,0,1), V(1,1,1), width = 2)
self.command._expr_instance.draw()
#print "end ExampleCommand2E Draw_other"
pass
##class ExampleCommand2E_GM_KLUGED( ExampleCommand1.GraphicsMode_class,
## SelectAtoms_Command #### KLUGE, will it work? trying to use it just for its GM aspects...
## ): #bruce 071014 split out _GM class
## # status, 071022: works except for highlighting (tho it looked like it did something on mouseover;
## # i forget if this eg had a good HL color change on that resizer), and on drag on that resizer i got
## # a region selection rubberband lasso/window. Until now it also had an exception in leftUp then,
## ## AttributeError: 'ExampleCommand2E_GM_KLUGED' object has no attribute 'ignore_next_leftUp_event'
## ## [GLPane.py:1845] [selectAtomsMode.py:494]
## # but setting this in __init__ works around that (see comment there).
#@ATTENTION:
#UPDATE 2008-08-22: Above comment is obsolete since SelectAtomsMode class has
#been deprecated . This commented out code needs to be revised if its ever used.
#[-- Ninad comment]
##
## command = None # defeat the property in selectAtomsMode #k needed?
##
## def __init__(self, command):
## ExampleCommand1.GraphicsMode_class.__init__(self, command) # includes self.command = command
## SelectAtoms_Command.__init__(self, self.glpane) ##k??
## self.ignore_next_leftUp_event = True
## # kluge, since we didn't run the GM part of SelectAtoms_Command.Enter,
## # which normally does this.
## return
##
## def Draw_other(self):
## """
## Do some custom drawing (in the model's abs coordsys)...
## """
## glpane = self.glpane
## super(ExampleCommand2E_GM_KLUGED, self).Draw_other()
## drawline(red, V(1,0,1), V(1,1,1), width = 2)
## self.command._expr_instance.draw()
## pass
##KLUGE_USE_SELATOMS_AS_GM = True ####
##
##if KLUGE_USE_SELATOMS_AS_GM:
## ExampleCommand2E_GM = ExampleCommand2E_GM_KLUGED ##### improve
class ExampleCommand2E( ExampleCommand1, object):
"""
Add things not needed in a minimal example, to try them out.
(Uses a PM which is the same as ExampleCommand1 except for title.)
"""
# Note: object superclass is only needed to permit super(ExampleCommand2E, self) to work.
# object superclass should not come first, or it overrides __new__
# (maybe could fix using def __init__ -- not tried, since object coming last works ok)
commandName = 'ExampleCommand2E-commandName'
PM_class = ExampleCommand2E_PM
featurename = "Prototype: ExampleCommand2E"
GraphicsMode_class = ExampleCommand2E_GM
def __init__(self, commandSequencer):
"""
create an expr instance, to draw in addition to the model
"""
super(ExampleCommand2E, self).__init__(commandSequencer)
glpane = commandSequencer.assy.glpane
expr1 = TextState()
expr2 = DraggablyBoxed(expr1, resizable = True)
###BUG: resizing is projecting mouseray in the wrong way, when plane is tilted!
# I vaguely recall that the Boxed resizable option was only coded for use in 2D widgets,
# whereas some other constrained drag code is correct for 3D but not yet directly usable in Boxed.
# So this is just an example interactive expr, not the best way to do resizing in 3D. (Though it could be fixed.)
# note: this code is similar to _expr_instance_for_imagename in confirmation_corner.py
ih = get_glpane_InstanceHolder(glpane)
expr = expr2
index = (id(self),) # WARNING: needs to be unique, we're sharing this InstanceHolder with everything else in NE1
self._expr_instance = ih.Instance( expr, index, skip_expr_compare = True)
return
pass # end of class ExampleCommand2E
# end
| NanoCAD-master | cad/src/prototype/example_expr_command.py |
NanoCAD-master | cad/src/prototype/__init__.py |
|
# Copyright 2004-2009 Nanorex, Inc. See LICENSE file for details.
"""
test_drawing.py -- replaces or augments GLPane.paintGL() to try various
OpenGL rendering models, determine whether the performance bottleneck is
in OpenGL, Qt, or somewhere else, and find a better alternative.
@author: Russ
@version: $Id$
@copyright: 2004-2009 Nanorex, Inc. See LICENSE file for details.
Initial version simply renders an array of n^2 unit spheres, n by n.
To turn it on, enable TEST_DRAWING at the start of
graphics/widgets/GLPane_rendering_methods.py.
For manual measurements, you can spin them around with
the NE1 rotate control, getting frames per second off the MacOS OpenGl Profiler.
For automatic measurements, set the debug_pref 'startup in Test Graphics command
(next session)?', which invokes the TestGraphics_Command.
With a low number of spheres, the frame rate is bottlenecked by Qt. You'll know
that's the case because as you increase the number of spheres, the frame rate
doesn't go down at first. Eventually, something in the graphics card becomes
the bottleneck. These tests were done as a series of explorations into finding
and avoiding those bottlenecks.
"""
DRAWSPHERE_DETAIL_LEVEL = 2
AVAILABLE_TEST_CASES_DICT = {
# (testCase: description for combobox item text)
1: "",
2: "",
3: "",
3.1: "",
3.2: "",
3.3: "",
3.4: "",
4: "",
5: "",
6: "",
7: "",
8: "",
8.1: "Sphere primitives chunked in a DrawingSet",
8.2: "Sphere primitives chunked with transforms",
8.3: "Cylinder primitives chunked in a DrawingSet",
100: "test_selection_redraw",
}
AVAILABLE_TEST_CASES_ITEMS = AVAILABLE_TEST_CASES_DICT.items()
AVAILABLE_TEST_CASES_ITEMS.sort()
# Used for tests with graphicsMode.Draw_model() from TestGraphics_GraphicsMode.
USE_GRAPHICSMODE_DRAW = False # Changed to True in cases that use the following.
def test_Draw_model(glpane):
# WARNING: this duplicates some code with test_drawing().
if first_time:
return # Do nothing during the initial setup script.
if testCase == 1:
test_csdl.draw()
elif testCase == 2 or testCase == 5:
glColor3i(127, 127, 127)
glCallList(test_dl)
elif int(testCase) == 3:
test_spheres.draw()
elif int(testCase) == 8:
test_Draw_8x(glpane)
pass
elif int(testCase) >= 100:
#bruce 090102
glpane.configure_enabled_shaders()
test_Object.draw_complete()
pass
return
def test_Draw_8x(glpane): #bruce 090223 refactoring of common code
glpane.configure_enabled_shaders()
if testCase == 8:
for chunk in test_spheres:
chunk.draw()
continue
pass
else:
if testCase == 8.2:
animate_TCs()
pass
test_DrawingSet.draw(
###highlighted=True ### Test halo drawing.
)
if testCase == 8.3:
draw_cylinder_axes()
pass
return
def animate_TCs():
# Animate TCs, rotating them slowly.
# Note: as of 090223 and before, this works in DL case but not in shader
# case, because coordinate updates after TCs are modified are nim in
# shader case (I think). [bruce 090223 comment]
slow = 10.0 # Seconds.
angle = 2*pi * fmod(time.time(), slow) / slow
# Leave the first one as identity, and rotate the others in
# opposite directions around the X axis.
TCs[1].setRotateTranslate( Q(V(1, 0, 0), angle * 2),
V(0, 0, 0) )
TCs[2].setRotateTranslate( Q(V(1, 0, 0), -angle),
V(0, 0, 0) )
return
def draw_cylinder_axes():
# Overdraw with the cylinder axes.
glDisable(GL_DEPTH_TEST)
glBegin(GL_LINES)
for endpt1, endpt2 in test_endpoints:
glColor3i(0, 0, 0)
glVertex(endpt1)
glColor3i(255,255,255)
glVertex(endpt2)
continue
glEnd()
glEnable(GL_DEPTH_TEST)
return
# Draw an array of nSpheres x nSpheres, with divider gaps every 10 and 100.
# 10, 25, 50, 100, 132, 200, 300, 400, 500...
# safe default values for general use.
# Uncomment one of the overriding assignments below when debugging.
#testCase = 1; nSpheres = 10; chunkLength = 24
#testCase = 1; nSpheres = 10; chunkLength = 24; USE_GRAPHICSMODE_DRAW = True
#testCase = 1; nSpheres = 132
#testCase = 8; nSpheres = 50; chunkLength = 24
#testCase = 8; nSpheres = 132; chunkLength = 24
# Nice, big spheres for debugging.
testCase = 8.1; nSpheres = 10; chunkLength = 24; USE_GRAPHICSMODE_DRAW = True
# 50x50 is okay for either shader spheres, or Display Lists.
#testCase = 8.1; nSpheres = 50; chunkLength = 24
#testCase = 8.1; nSpheres = 50; chunkLength = 24; USE_GRAPHICSMODE_DRAW = True
#testCase = 8.1; nSpheres = 75; chunkLength = 24
#testCase = 8.1; nSpheres = 100; chunkLength = 200
#testCase = 8.1; nSpheres = 100; chunkLength = 50
### Initial DL pass-through version hangs at 8.1/100/24.
##testCase = 8.1; nSpheres = 100; chunkLength = 24
#testCase = 8.1; nSpheres = 100; chunkLength = 8
#testCase = 8.1; nSpheres = 100; chunkLength = 8; USE_GRAPHICSMODE_DRAW = True
#testCase = 8.1; nSpheres = 132; chunkLength = 200
#testCase = 8.1; nSpheres = 132; chunkLength = 8
# 132x132 is like one DNAO tile. Horribly slow on Display Lists.
##testCase = 8.1; nSpheres = 132; chunkLength = 8; USE_GRAPHICSMODE_DRAW = True
#testCase = 8.1; nSpheres = 200; chunkLength = 8
#testCase = 8.1; nSpheres = 300; chunkLength = 8
#testCase = 8.1; nSpheres = 400; chunkLength = 8
#testCase = 8.1; nSpheres = 500; chunkLength = 8
#testCase = 8.1; nSpheres = 600; chunkLength = 8
#testCase = 8.1; nSpheres = 100; chunkLength = 8; USE_GRAPHICSMODE_DRAW = True
#testCase = 8.1; nSpheres = 132; chunkLength = 8; USE_GRAPHICSMODE_DRAW = True
#testCase = 8.1; nSpheres = 200; chunkLength = 8; USE_GRAPHICSMODE_DRAW = True
#testCase = 8.1; nSpheres = 300; chunkLength = 8; USE_GRAPHICSMODE_DRAW = True
#testCase = 8.1; nSpheres = 400; chunkLength = 8; USE_GRAPHICSMODE_DRAW = True
#testCase = 8.1; nSpheres = 500; chunkLength = 8; USE_GRAPHICSMODE_DRAW = True
#testCase = 8.1; nSpheres = 600; chunkLength = 8; USE_GRAPHICSMODE_DRAW = True
#testCase = 8.2; nSpheres = chunkLength = 10
#testCase = 8.2; nSpheres = 50; chunkLength = 250
#testCase = 8.2; nSpheres = 100; chunkLength = 200
#testCase = 8.2; nSpheres = 100; chunkLength = 50
testCase = 8.3; nSpheres = 2; chunkLength = 8
#testCase = 8.3; nSpheres = 5; chunkLength = 8
#testCase = 8.3; nSpheres = 10; chunkLength = 8
#testCase = 8.3; nSpheres = 50; chunkLength = 8
#testCase = 8.3; nSpheres = 100; chunkLength = 8
#testCase = 8.3; nSpheres = 100; chunkLength = 8
# Longish chunks for test case 3.4 (with transforms)
#nSpheres = 132; transformChunkLength = 1000
#
# 16x16 sphere array, chunked by columns for vertex shader debugging display.
#nSpheres = transformChunkLength = 16
#
# Short chunks ok w/ TEXTURE_XFORMS = True, but too many chunks for N_CONST_XFORMS.
#nSpheres = 132; transformChunkLength = 70 # 249 chunks.
#nSpheres = 132; transformChunkLength = 50 # 349 chunks.
#nSpheres = 132; transformChunkLength = 20 # 872 chunks.
#nSpheres = 132; transformChunkLength = 10 # 1743 chunks.
#nSpheres = 132; transformChunkLength = 8 # 2178 chunks.
#
#nSpheres = 300; transformChunkLength = 8 # 11250 chunks.
#nSpheres = 400; transformChunkLength = 8 # 20000 chunks.
#nSpheres = 500; transformChunkLength = 8 # 31250 chunks.
from geometry.VQT import V, Q, A, norm, vlen, angleBetween
import graphics.drawing.drawing_globals as drawing_globals
from graphics.drawing.GLSphereBuffer import GLSphereBuffer
from graphics.drawing.DrawingSet import DrawingSet
from graphics.drawing.TransformControl import TransformControl
from graphics.drawing.ColorSorter import ColorSorter
from graphics.drawing.ColorSortedDisplayList import ColorSortedDisplayList
from graphics.drawing.CS_draw_primitives import drawsphere, drawcylinder
from graphics.drawing.CS_workers import drawsphere_worker_loop
from graphics.drawing.gl_buffers import GLBufferObject
from graphics.drawing.gl_Scale import glScale
import numpy
from OpenGL.GL import GL_ARRAY_BUFFER_ARB
from OpenGL.GL import GL_COLOR_BUFFER_BIT
from OpenGL.GL import GL_COMPILE_AND_EXECUTE
from OpenGL.GL import GL_CULL_FACE
from OpenGL.GL import GL_STENCIL_BUFFER_BIT
from OpenGL.GL import GL_DEPTH_BUFFER_BIT
from OpenGL.GL import GL_DEPTH_TEST
from OpenGL.GL import GL_ELEMENT_ARRAY_BUFFER_ARB
from OpenGL.GL import GL_FLOAT
from OpenGL.GL import GL_LINES
from OpenGL.GL import GL_MODELVIEW
from OpenGL.GL import GL_QUADS
from OpenGL.GL import GL_STATIC_DRAW
from OpenGL.GL import GL_UNSIGNED_INT
from OpenGL.GL import GL_VERTEX_ARRAY
from OpenGL.GL import glBegin
from OpenGL.GL import glCallList
from OpenGL.GL import glClear
from OpenGL.GL import glClearColor
from OpenGL.GL import glColor3i
from OpenGL.GL import glDisable
from OpenGL.GL import glDisableClientState
from OpenGL.GL import glDrawElements
from OpenGL.GL import glEnable
from OpenGL.GL import glEnableClientState
from OpenGL.GL import glEnd
from OpenGL.GL import glEndList
from OpenGL.GL import glFlush
from OpenGL.GL import glGenLists
from OpenGL.GL import glMatrixMode
from OpenGL.GL import glNewList
from OpenGL.GL import glNormalPointer
from OpenGL.GL import glPopMatrix
from OpenGL.GL import glPushMatrix
from OpenGL.GL import glSecondaryColor3fv
from OpenGL.GL import glTranslatef
from OpenGL.GL import glVertex
from OpenGL.GL import glVertexPointer
from OpenGL.GL.ARB.vertex_program import glDisableVertexAttribArrayARB
from OpenGL.GL.ARB.vertex_program import glEnableVertexAttribArrayARB
from OpenGL.GL.ARB.vertex_program import glVertexAttribPointerARB
import time
from math import sin, pi, fmod, floor
test_Object = None
test_DrawingSet = None
def delete_caches():
"""
External code which modifies certain parameters (e.g. testCase, nSpheres)
can call this to remove our caches, so the change takes effect.
"""
#bruce 080930; not sure it contains enough to make runtime change of testCase fully correct;
# should it contain _USE_SHADERS?
# Ideally we'd refactor this whole file so each testCase was its own class,
# with instances containing the cached objects and draw methods.
global test_csdl, test_dl, test_dls, test_ibo, test_vbo, test_spheres, test_DrawingSet, test_Object
global C_array, start_pos, first_time
test_csdl = None
test_dl = None
test_dls = None
test_ibo = None
test_vbo = None
test_spheres = None
if test_DrawingSet is not None:
test_DrawingSet = None
print "set test_DrawingSet = None"
test_endpoints = None
if test_Object:
test_Object.destroy()
test_Object = None
C_array = None
# Start at the lower-left corner, offset so the whole pattern comes
# up centered on the origin.
start_pos = V(-(nSpheres-1)/2.0, -(nSpheres-1)/2.0, 0)
# Enable set-up logic.
first_time = True
return
delete_caches() # initialize globals
# From drawsphere_worker_loop().
def sphereLoc(x, y): # Assume int x, y in the sphere array.
return start_pos + V( x + x/10 + x/100, y + y/10 + y/100, 0)
def rainbow(t):
# Colors progress from red to blue.
if t < .25:
# 0 to .25, red to yellow (ramp up green).
return [1.0, 4 * t, 0.0]
elif t < .5:
# .25 to .5, yellow to green (ramp down red).
return [4 * (.5 - t), 1.0, 0.0]
elif t < .75:
# .5 to .75, green to cyan (ramp up blue).
return [0.0, 1.0, 4 * (t - .5)]
else:
# .75 to 1, cyan to blue (ramp down green).
return [0.0, 4 * (1.0 - t), 1.0]
_USE_SHADERS = True # change to false if loading them fails the first time
def test_drawing(glpane, initOnly = False):
"""
When TEST_DRAWING is enabled at the start of
graphics/widgets/GLPane_rendering_methods.py,
and when TestGraphics_Command is run (see its documentation
for various ways to do that),
this file is loaded and GLPane.paintGL() calls the
test_drawing() function instead of the usual body of paintGL().
"""
# WARNING: this duplicates some code with test_Draw_model().
# Load the sphere shaders if needed.
global _USE_SHADERS
if _USE_SHADERS:
if not drawing_globals.test_sphereShader:
print "test_drawing: Loading sphere shaders."
try:
from graphics.drawing.gl_shaders import GLSphereShaderObject
drawing_globals.test_sphereShader = GLSphereShaderObject()
##### REVIEW: is this compatible with my refactoring in drawing_globals?
# If not, use of Test Graphics Performance command might cause subsequent
# bugs in other code. Ideally we'd call the new methods that encapsulate
# this, to setup shaders. [bruce 090304 comment]
print "test_drawing: Sphere-shader initialization is complete.\n"
except:
_USE_SHADERS = False
print "test_drawing: Exception while loading sphere shaders, will reraise and not try again"
raise
pass
global start_pos, first_time
if first_time:
# Set up the viewing scale, but then let interactive zooming work.
glpane.scale = nSpheres * .6
pass
# This same function gets called to set up for drawing, and to draw.
if not initOnly:
glpane._setup_modelview()
glpane._setup_projection()
##glpane._compute_frustum_planes()
glClearColor(64.0, 64.0, 64.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT )
##glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )
glMatrixMode(GL_MODELVIEW)
pass
global test_csdl, test_dl, test_dls, test_ibo, test_vbo, test_spheres
global test_DrawingSet, test_endpoints, test_Object
# See below for test case descriptions and timings on a MacBook Pro.
# The Qt event toploop in NE1 tops out at about 60 frames-per-second.
# NE1 with test toploop, single CSDL per draw (test case 1)
# . 17,424 spheres (132x132, through the color sorter) 4.8 FPS
# Russ 080919: More recently, 12.2 FPS.
# . Level 2 spheres have 9 triangles x 20 faces, 162 distinct vertices,
# visited on the average 2.3 times, giving 384 tri-strip vertices.
# . 17,424 spheres is 6.7 million tri-strip vertices. (6,690,816)
if testCase == 1:
if test_csdl is None:
print ("Test case 1, %d^2 spheres\n %s." %
(nSpheres, "ColorSorter"))
test_csdl = ColorSortedDisplayList()
ColorSorter.start(None, test_csdl)
drawsphere([0.5, 0.5, 0.5], # color
[0.0, 0.0, 0.0], # pos
.5, # radius
DRAWSPHERE_DETAIL_LEVEL,
testloop = nSpheres )
ColorSorter.finish(draw_now = True)
pass
else:
test_csdl.draw()
pass
# NE1 with test toploop, single display list per draw (test case 2)
# . 10,000 spheres (all drawing modes) 17.5 FPS
# . 17,424 spheres (132x132, manual display list) 11.1 FPS
# . 40,000 spheres (mode 5 - VBO/IBO spheres from DL's) 2.2 FPS
# . 40,000 spheres (mode 6 - Sphere shaders from DL's) 2.5 FPS
# . 90,000 spheres (all drawing modes) 1.1 FPS
elif testCase == 2:
if test_dl is None:
print ("Test case 2, %d^2 spheres\n %s." %
(nSpheres, "One display list calling primitive dl's"))
test_dl = glGenLists(1)
glNewList(test_dl, GL_COMPILE_AND_EXECUTE)
drawsphere_worker_loop(([0.0, 0.0, 0.0], # pos
.5, # radius
DRAWSPHERE_DETAIL_LEVEL,
nSpheres ))
glEndList()
pass
else:
glColor3i(127, 127, 127)
glCallList(test_dl)
pass
# NE1 with test toploop, one big chunk VBO/IBO of box quads (test case 3)
# . 17,424 spheres (1 box/shader draw call) 43.7 FPS
# . 17,424 spheres (1 box/shader draw call w/ rad/ctrpt attrs) 57.7 FPS
# . 40,000 spheres (1 box/shader draw call w/ rad/ctrpt attrs) 56.7 FPS
# . 90,000 spheres (1 box/shader draw call w/ rad/ctrpt attrs) 52.7 FPS
# . 160,000 spheres (1 box/shader draw call w/ rad/ctrpt attrs) 41.4 FPS
# . 250,000 spheres (1 box/shader draw call w/ rad/ctrpt attrs) 27.0 FPS
elif int(testCase) == 3:
doTransforms = False
if test_spheres is None:
print ("Test case 3, %d^2 spheres\n %s." %
(nSpheres, "One big VBO/IBO chunk buffer"))
if testCase == 3.1:
print ("Sub-test 3.1, animate partial updates.")
elif testCase == 3.2:
print ("Sub-test 3.2, animate partial updates" +
" w/ C per-chunk array buffering.")
elif testCase == 3.3:
print ("Sub-test 3.3, animate partial updates" +
" w/ Python array buffering.")
# . 3.4 - Big batch draw, with transforms indexed by IDs added.
# (Second FPS number with debug colors in the vertex shader off.)
# - 90,000 (300x300) spheres, TEXTURE_XFORMS = True, 26(29) FPS
# - 90,000 (300x300) spheres, N_CONST_XFORMS = 250, 26(29) FPS
# - 90,000 (300x300) spheres, N_CONST_XFORMS = 275, 0.3(0.6) FPS
# (What happens after 250? CPU usage goes from 40% to 94%.)
# -160,000 (400x400) spheres, TEXTURE_XFORMS = True, 26 FPS
# -250,000 (500x500) spheres, TEXTURE_XFORMS = True, 26 FPS
elif testCase == 3.4:
print ("Sub-test 3.4, add transforms indexed by IDs.")
from graphics.drawing.gl_shaders import TEXTURE_XFORMS
from graphics.drawing.gl_shaders import N_CONST_XFORMS
from graphics.drawing.gl_shaders import UNIFORM_XFORMS
if TEXTURE_XFORMS:
print "Transforms in texture memory."
elif UNIFORM_XFORMS:
print "%d transforms in uniform memory." % N_CONST_XFORMS
pass
else:
print "transforms not supported, error is likely"
doTransforms = True
pass
centers = []
radius = .5
radii = []
colors = []
if not doTransforms:
transformIDs = None
else:
transformIDs = []
transformChunkID = -1 # Allocate IDs sequentially from 0.
# For this test, allow arbitrarily chunking the primitives.
primCounter = transformChunkLength
transforms = [] # Accumulate transforms as a list of lists.
# Initialize transforms with an identity matrix.
# Transforms here are lists (or Numpy arrays) of 16 numbers.
identity = ([1.0] + 4*[0.0]) * 3 + [1.0]
pass
for x in range(nSpheres):
for y in range(nSpheres):
centers += [sphereLoc(x, y)]
# Sphere radii progress from 3/4 to full size.
t = float(x+y)/(nSpheres+nSpheres) # 0 to 1 fraction.
thisRad = radius * (.75 + t*.25)
radii += [thisRad]
# Colors progress from red to blue.
colors += [rainbow(t)]
# Transforms go into a texture memory image if needed.
# Per-primitive Transform IDs go into an attribute VBO.
if doTransforms:
primCounter = primCounter + 1
if primCounter >= transformChunkLength:
# Start a new chunk, allocating a transform matrix.
primCounter = 0
transformChunkID += 1
if 0: # 1
# Debug hack: Label mat[0,0] with the chunk ID.
# Vertex shader debug code shows these in blue.
# If viewed as geometry, it will be a slight
# stretch of the array in the X direction.
transforms += [
[1.0+transformChunkID/100.0] + identity[1:]]
elif 0: # 1
# Debug hack: Fill mat with mat.element pattern.
transforms += [
[transformChunkID +
i/100.0 for i in range(16)]]
else:
transforms += [identity]
pass
pass
# All of the primitives in a chunk have the same ID.
transformIDs += [transformChunkID]
pass
continue
continue
test_spheres = GLSphereBuffer()
test_spheres.addSpheres(centers, radii, colors, transformIDs, None)
if doTransforms:
print ("%d primitives in %d transform chunks of size <= %d" %
(nSpheres * nSpheres, len(transforms),
transformChunkLength))
shader = drawing_globals.test_sphereShader
shader.setupTransforms(transforms)
pass
else:
shader = drawing_globals.test_sphereShader
shader.configShader(glpane)
# Update portions for animation.
pulse = time.time()
pulse -= floor(pulse) # 0 to 1 in each second
# Test animating updates on 80% of the radii in 45% of the columns.
# . 3.1 - Update radii Python array per-column, send to graphics RAM.
# - 2,500 (50x50) spheres 55 FPS
# - 10,000 (100x100) spheres 35 FPS
# - 17,424 (132x132) spheres 22.2 FPS
# - 40,000 (200x200) spheres 12.4 FPS
# - 90,000 (300x300) spheres 6.0 FPS
if testCase == 3.1:
# Not buffered, send each column change.
radius = .5
margin = nSpheres/10
for x in range(margin, nSpheres, 2):
radii = []
for y in range(margin, nSpheres-margin):
t = float(x+y)/(nSpheres+nSpheres) # 0 to 1 fraction.
# Sphere radii progress from 3/4 to full size.
thisRad = radius * (.75 + t*.25)
phase = pulse + float(x+y)/nSpheres
radii += 8 * [thisRad-.1 + .1*sin(phase * 2*pi)]
continue
C_radii = numpy.array(radii, dtype=numpy.float32)
offset = x*nSpheres + margin
test_spheres.radii_vbo.update(offset * 8, C_radii)
continue
pass
# . 3.2 - Numpy buffered in C array, subscript assignments to C.
# - 2,500 (50x50) spheres 48 FPS
# - 10,000 (100x100) spheres 17.4 FPS
# - 17,424 (132x132) spheres 11.2 FPS
# - 40,000 (200x200) spheres 5.5 FPS
# - 90,000 (300x300) spheres 2.5 FPS
elif testCase == 3.2:
# Buffered per-chunk at the C array level.
radius = .5
margin = nSpheres/10
global C_array
if C_array is None:
# Replicate.
C_array = numpy.zeros((8 * (nSpheres-(2*margin)),),
dtype=numpy.float32)
pass
for x in range(margin, nSpheres, 2):
count = 0
for y in range(margin, nSpheres-margin):
t = float(x+y)/(nSpheres+nSpheres) # 0 to 1 fraction.
# Sphere radii progress from 3/4 to full size.
thisRad = radius * (.75 + t*.25)
phase = pulse + float(x+y)/nSpheres
C_array[count*8:(count+1)*8] = \
thisRad-.1 + .1*sin(phase * 2*pi)
count += 1
continue
offset = x*nSpheres + margin
test_spheres.radii_vbo.update(offset * 8, C_array)
continue
pass
# . 3.3 - updateRadii in Python array, copy via C to graphics RAM.
# - 2,500 (50x50) spheres 57 FPS
# - 10,000 (100x100) spheres 32 FPS
# - 17,424 (132x132) spheres 20 FPS
# - 40,000 (200x200) spheres 10.6 FPS
# - 90,000 (300x300) spheres 4.9 FPS
elif testCase == 3.3:
# Buffered at the Python level, batch the whole-array update.
radius = .5
margin = nSpheres/10
for x in range(margin, nSpheres, 2):
radii = []
for y in range(margin, nSpheres-margin):
t = float(x+y)/(nSpheres+nSpheres) # 0 to 1 fraction.
# Sphere radii progress from 3/4 to full size.
thisRad = radius * (.75 + t*.25)
phase = pulse + float(x+y)/nSpheres
radii += [thisRad-.1 + .1*sin(phase * 2*pi)]
continue
test_spheres.updateRadii( # Update, but don't send yet.
x*nSpheres + margin, radii, send = False)
continue
test_spheres.sendRadii()
pass
# Options: color = [0.0, 1.0, 0.0], transform_id = 1, radius = 1.0
test_spheres.draw()
pass
# NE1 with test toploop, separate sphere VBO/IBO box/shader draws (test case 4)
# . 17,424 spheres (132x132 box/shader draw quads calls) 0.7 FPS
elif testCase == 4:
if test_ibo is None:
print ("Test case 4, %d^2 spheres\n %s." %
(nSpheres,
"Separate VBO/IBO shader/box buffer sphere calls, no DL"))
# Collect transformed bounding box vertices and offset indices.
# Start at the lower-left corner, offset so the whole pattern comes
# up centered on the origin.
cubeVerts = drawing_globals.shaderCubeVerts
cubeIndices = drawing_globals.shaderCubeIndices
C_indices = numpy.array(cubeIndices, dtype=numpy.uint32)
test_ibo = GLBufferObject(
GL_ELEMENT_ARRAY_BUFFER_ARB, C_indices, GL_STATIC_DRAW)
test_ibo.unbind()
C_verts = numpy.array(cubeVerts, dtype=numpy.float32)
test_vbo = GLBufferObject(
GL_ARRAY_BUFFER_ARB, C_verts, GL_STATIC_DRAW)
test_vbo.unbind()
pass
else:
drawing_globals.test_sphereShader.configShader(glpane)
glEnableClientState(GL_VERTEX_ARRAY)
test_vbo.bind() # Vertex data comes from the vbo.
glVertexPointer(3, GL_FLOAT, 0, None)
drawing_globals.test_sphereShader.setActive(True)
glDisable(GL_CULL_FACE)
glColor3i(127, 127, 127)
test_ibo.bind() # Index data comes from the ibo.
for x in range(nSpheres):
for y in range(nSpheres):
# From drawsphere_worker_loop().
pos = start_pos + (x+x/10+x/100) * V(1, 0, 0) + \
(y+y/10+y/100) * V(0, 1, 0)
radius = .5
glPushMatrix()
glTranslatef(pos[0], pos[1], pos[2])
glScale(radius,radius,radius)
glDrawElements(GL_QUADS, 6 * 4, GL_UNSIGNED_INT, None)
glPopMatrix()
continue
continue
drawing_globals.test_sphereShader.setActive(False)
glEnable(GL_CULL_FACE)
test_ibo.unbind()
test_vbo.unbind()
glDisableClientState(GL_VERTEX_ARRAY)
pass
# NE1 with test toploop,
# One DL around separate VBO/IBO shader/box buffer sphere calls (test case 5)
# . 17,424 spheres (1 box/shader DL draw call) 9.2 FPS
elif testCase == 5:
if test_dl is None:
print ("Test case 5, %d^2 spheres\n %s." %
(nSpheres,
"One DL around separate VBO/IBO shader/box buffer sphere calls"))
# Collect transformed bounding box vertices and offset indices.
# Start at the lower-left corner, offset so the whole pattern comes
# up centered on the origin.
cubeVerts = drawing_globals.shaderCubeVerts
cubeIndices = drawing_globals.shaderCubeIndices
C_indices = numpy.array(cubeIndices, dtype=numpy.uint32)
test_ibo = GLBufferObject(
GL_ELEMENT_ARRAY_BUFFER_ARB, C_indices, GL_STATIC_DRAW)
test_ibo.unbind()
C_verts = numpy.array(cubeVerts, dtype=numpy.float32)
test_vbo = GLBufferObject(
GL_ARRAY_BUFFER_ARB, C_verts, GL_STATIC_DRAW)
test_vbo.unbind()
# Wrap a display list around the draws.
test_dl = glGenLists(1)
glNewList(test_dl, GL_COMPILE_AND_EXECUTE)
glEnableClientState(GL_VERTEX_ARRAY)
test_vbo.bind() # Vertex data comes from the vbo.
glVertexPointer(3, GL_FLOAT, 0, None)
drawing_globals.test_sphereShader.setActive(True)
glDisable(GL_CULL_FACE)
glColor3i(127, 127, 127)
test_ibo.bind() # Index data comes from the ibo.
for x in range(nSpheres):
for y in range(nSpheres):
# From drawsphere_worker_loop().
pos = start_pos + (x+x/10+x/100) * V(1, 0, 0) + \
(y+y/10+y/100) * V(0, 1, 0)
radius = .5
glPushMatrix()
glTranslatef(pos[0], pos[1], pos[2])
glScale(radius,radius,radius)
glDrawElements(GL_QUADS, 6 * 4, GL_UNSIGNED_INT, None)
glPopMatrix()
continue
continue
drawing_globals.test_sphereShader.setActive(False)
glEnable(GL_CULL_FACE)
test_ibo.unbind()
test_vbo.unbind()
glDisableClientState(GL_VERTEX_ARRAY)
glEndList()
else:
glColor3i(127, 127, 127)
glCallList(test_dl)
pass
pass
# NE1 with test toploop,
# N column DL's around VBO/IBO shader/box buffer sphere calls (test case 6)
# . 2,500 (50x50) spheres 58 FPS
# . 10,000 (100x100) spheres 57 FPS
# . 17,424 (132x132) spheres 56 FPS
# . 40,000 (200x200) spheres 50 FPS
# . 90,000 (300x300) spheres 28 FPS
# . 160,000 (400x400) spheres 16.5 FPS
# . 250,000 (500x500) spheres 3.2 FPS
elif testCase == 6:
if test_dls is None:
print ("Test case 6, %d^2 spheres\n %s." %
(nSpheres,
"N col DL's around VBO/IBO shader/box buffer sphere calls"))
# Wrap n display lists around the draws (one per column.)
test_dls = glGenLists(nSpheres) # Returns ID of first DL in the set.
test_spheres = []
for x in range(nSpheres):
centers = []
radius = .5
radii = []
colors = []
# Each column is relative to its bottom sphere location. Start
# at the lower-left corner, offset so the whole pattern comes up
# centered on the origin.
start_pos = V(0, 0, 0) # So it doesn't get subtracted twice.
pos = sphereLoc(x, 0) - V(nSpheres/2.0, nSpheres/2.0, 0)
for y in range(nSpheres):
centers += [sphereLoc(0, y)]
# Sphere radii progress from 3/4 to full size.
t = float(x+y)/(nSpheres+nSpheres) # 0 to 1 fraction.
thisRad = radius * (.75 + t*.25)
radii += [thisRad]
# Colors progress from red to blue.
colors += [rainbow(t)]
continue
test_sphere = GLSphereBuffer()
test_sphere.addSpheres(centers, radii, colors, None, None)
test_spheres += [test_sphere]
glNewList(test_dls + x, GL_COMPILE_AND_EXECUTE)
glPushMatrix()
glTranslatef(pos[0], pos[1], pos[2])
test_sphere.draw()
glPopMatrix()
glEndList()
continue
pass
else:
shader = drawing_globals.test_sphereShader
shader.configShader(glpane) # Turn the lights on.
for x in range(nSpheres):
glCallList(test_dls + x)
continue
pass
pass
# NE1 with test toploop,
# N column VBO sets of shader/box buffer sphere calls (test case 7)
# . 2,500 (50x50) spheres 50 FPS
# . 10,000 (100x100) spheres 30.5 FPS
# . 17,424 (132x132) spheres 23.5 FPS
# . 40,000 (200x200) spheres 16.8 FPS
# . 90,000 (300x300) spheres 10.8 FPS
# . 160,000 (400x400) spheres 9.1 FPS
# . 250,000 (500x500) spheres 7.3 FPS
elif testCase == 7:
if test_spheres is None:
print ("Test case 7, %d^2 spheres\n %s." %
(nSpheres, "Per-column VBO/IBO chunk buffers"))
test_spheres = []
for x in range(nSpheres):
centers = []
radius = .5
radii = []
colors = []
for y in range(nSpheres):
centers += [sphereLoc(x, y)]
# Sphere radii progress from 3/4 to full size.
t = float(x+y)/(nSpheres+nSpheres) # 0 to 1 fraction.
thisRad = radius * (.75 + t*.25)
radii += [thisRad]
# Colors progress from red to blue.
colors += [rainbow(t)]
continue
_spheres1 = GLSphereBuffer()
_spheres1.addSpheres(centers, radii, colors, None, None)
test_spheres += [_spheres1]
continue
pass
else:
shader = drawing_globals.test_sphereShader
shader.configShader(glpane)
for chunk in test_spheres:
chunk.draw()
pass
# NE1 with test toploop,
# Short chunk VBO sets of shader/box buffer sphere calls (test case 8)
# . 625 (25x25) spheres 30 FPS, 79 chunk buffers of length 8.
# . 2,500 (50x50) spheres 13.6 FPS, 313 chunk buffers of length 8.
# . 10,000 (100x100) spheres 6.4 FPS, 704 chunk buffers of length 8.
# . 10,000 (100x100) spheres 3.3 FPS, 1250 chunk buffers of length 8.
# . 17,424 (132x132) spheres 2.1 FPS, 2178 chunk buffers of length 8.
# . 2,500 (50x50) spheres 33.5 FPS, 105 chunk buffers of length 24.
# . 17,424 (132x132) spheres 5.5 FPS, 726 chunk buffers of length 24.
#
# Subcase 8.1: CSDLs in a DrawingSet. (Initial pass-through version.)
# . 2,500 (50x50) spheres 36.5 FPS, 105 chunk buffers of length 24.
# . 5,625 (75x75) spheres 16.1 FPS, 235 chunk buffers of length 24.
# . 10,000 (100x100) spheres 0.5 FPS?!, 414 chunk buffers of length 24.
# Has to be <= 250 chunks for constant memory transforms?
# . 10,000 (100x100) spheres 11.8 FPS, 50 chunk buffers of length 200.
# After a minute of startup.
# . 10,000 (100x100) spheres 9.3 FPS, 200 chunk buffers of length 50.
# After a few minutes of startup.
# Subcase 8.2: CSDLs in a DrawingSet with transforms. (Pass-through.)
# . 10,000 (100x100) spheres 11.5 FPS, 50 chunk buffers of length 200.
#
# Subcase 8.1: CSDLs in a DrawingSet. (First HunkBuffer version.)
# Measured with auto-rotate on, ignoring startup and occasional outliers.
# As before, on a 2 core, 2.4 GHz Intel MacBook Pro with GeForce 8600M GT.
# HUNK_SIZE = 10000
# . 2,500 (50x50) spheres 140-200 FPS, 105 chunks of length 24.
# . 5,625 (75x75) spheres 155-175 FPS, 235 chunks of length 24.
# . 10,000 (100x100) spheres 134-145 FPS, 50 chunks of length 200.
# . 10,000 (100x100) spheres 130-143 FPS, 200 chunks of length 50.
# . 10,000 (100x100) spheres 131-140 FPS, 1,250 chunks of length 8.
# Chunks are gathered into hunk buffers, so no chunk size speed diff.
# . 17,424 (132x132) spheres 134-140 FPS, 88 chunks of length 200.
# . 17,424 (132x132) spheres 131-140 FPS, 2,178 chunks of length 8.
# HUNK_SIZE = 20000
# . 17,424 (132x132) spheres 131-140 FPS, 88 chunks of length 200.
# . 17,424 (132x132) spheres 130-141 FPS, 2,178 chunks of length 8.
# HUNK_SIZE = 10000
# . 40,000 (200x200) spheres 77.5-82.8 FPS, 5,000 chunks of length 8.
# . 90,000 (300x300) spheres 34.9-42.6 FPS, 11,2500 chunks of length 8.
# Spheres are getting down to pixel size, causing moire patterns.
# Rotate the sphere-array off-axis 45 degrees to minimize.
# (Try adding multi-sampled anti-aliasing, to the drawing test...)
# . 160,000 (400x400) spheres 26.4-27.1 FPS, 20,000 chunks of length 8.
# . 250,000 (500x500) spheres 16.8-17.1 FPS, 31,250 chunks of length 8.
# The pattern is getting too large, far-clipping is setting in.
# . 360,000 (600x600) spheres 11.6-11.8 FPS, 45,000 chunks of length 8.
# Extreme far-clipping in the drawing test pattern.
# HUNK_SIZE = 20000; no significant speed-up.
# . 40,000 (200x200) spheres 75.9-81.5 FPS, 5,000 chunks of length 8.
# . 90,000 (300x300) spheres 41.2-42.4 FPS, 11,250 chunks of length 8.
# Spheres are getting down to pixel size, causing moire patterns.
# . 160,000 (400x400) spheres 26.5-26.9 FPS, 20,000 chunks of length 8.
# . 250,000 (500x500) spheres 16.5-17.1 FPS, 31,250 chunks of length 8.
# . 360,000 (600x600) spheres 11.8-12.1 FPS, 45,000 chunks of length 8.
# HUNK_SIZE = 5000; no significant slowdown or CPU load difference.
# . 40,000 (200x200) spheres 81.0-83.8 FPS, 5,000 chunks of length 8.
# . 160,000 (400x400) spheres 27.3-29.4 FPS, 20,000 chunks of length 8.
# . 360,000 (600x600) spheres 11.7-12.1 FPS, 45,000 chunks of length 8.
#
# Retest after updating MacOS to 10.5.5, with TestGraphics, HUNK_SIZE = 5000
# . 40,000 (200x200) spheres 68.7-74.4 FPS, 5,000 chunks of length 8.
# . 90,000 (300x300) spheres 39.4-42.0 FPS, 11,250 chunks of length 8.
# . 160,000 (400x400) spheres 24.4-25.2 FPS, 20,000 chunks of length 8.
# Retest with glMultiDrawElements drawing indexes in use, HUNK_SIZE = 5000
# . 40,000 (200x200) spheres 52.8-54.4 FPS, 5,000 chunks of length 8.
# . 90,000 (300x300) spheres 22.8-23.3 FPS, 11,250 chunks of length 8.
# . 160,000 (400x400) spheres 13.5-15.2 FPS, 20,000 chunks of length 8.
#
# Retest with reworked halo/sphere shader, HUNK_SIZE = 5000 [setup time]
# . 17,424 (132x132) spheres 52.8-53.7 FPS, 2,178 chunks of length 8. [60]
# . 40,000 (200x200) spheres 29.3-30.4 FPS, 5,000 chunks of length 8.[156]
# . 90,000 (300x300) spheres 18.2-19.2 FPS, 11,250 chunks of length 8.[381]
# . 160,000 (400x400) spheres 10.2-11.6 FPS, 20,000 chunks of length 8.[747]
# Billboard drawing patterns instead of cubes, HUNK_SIZE = 5000 [setup time]
# . 17,424 (132x132) spheres 49.7-55.7 FPS, 2,178 chunks of length 8. [35]
# . 40,000 (200x200) spheres 39.6-40.8 FPS, 5,000 chunks of length 8. [88]
# . 90,000 (300x300) spheres 18.9-19.5 FPS, 11,250 chunks of length 8.[225]
# . 160,000 (400x400) spheres 11.2-11.7 FPS, 20,000 chunks of length 8.[476]
#
elif int(testCase) == 8:
doTransforms = False
doCylinders = False
if test_spheres is None:
# Setup.
print ("Test case 8, %d^2 primitives\n %s, length %d." %
(nSpheres, "Short VBO/IBO chunk buffers", chunkLength))
if testCase == 8.1:
print ("Sub-test 8.1, sphere chunks are in CSDL's in a DrawingSet.")
test_DrawingSet = DrawingSet()
elif testCase == 8.2:
print ("Sub-test 8.2, spheres, rotate with TransformControls.")
test_DrawingSet = DrawingSet()
doTransforms = True
elif testCase == 8.3:
print ("Sub-test 8.3, cylinder chunks are in CSDL's in a DrawingSet.")
test_DrawingSet = DrawingSet()
doCylinders = True
pass
if test_DrawingSet:
# note: doesn't happen in test 8.0, which causes a bug then. [bruce 090223 comment]
print "constructed test_DrawingSet =", test_DrawingSet
if USE_GRAPHICSMODE_DRAW:
print ("Use graphicsMode.Draw_model for DrawingSet in paintGL.")
pass
t1 = time.time()
if doTransforms:
# Provide several TransformControls to test separate action.
global numTCs, TCs
numTCs = 3
TCs = [TransformControl() for i in range(numTCs)]
pass
def primCSDL(centers, radii, colors):
if not doTransforms:
csdl = ColorSortedDisplayList() # Transformless.
else:
# Test pattern for TransformControl usage - vertical columns
# of TC domains, separated by X coord of first center point.
# Chunking will be visible when transforms are changed.
xCoord = centers[0][0] - start_pos[0] # Negate centering X.
xPercent = (xCoord /
(nSpheres + nSpheres/10 +
nSpheres/100 - 1 + (nSpheres <= 1)))
xTenth = int(xPercent * 10 + .5)
csdl = ColorSortedDisplayList(TCs[xTenth % numTCs])
pass
# Test selection using the CSDL glname.
ColorSorter.pushName(csdl.glname)
ColorSorter.start(glpane, csdl)
for (color, center, radius) in zip(colors, centers, radii):
if not doCylinders:
# Through ColorSorter to the sphere primitive buffer...
drawsphere(color, center, radius,
DRAWSPHERE_DETAIL_LEVEL)
else:
# Through ColorSorter to cylinder primitive buffer...
if not drawing_globals.cylinderShader_available():
print "warning: not cylinderShader_available(), error is likely:"
if (True and # Whether to do tapered shader-cylinders.
# Display List cylinders don't support taper.
glpane.glprefs.cylinderShader_desired()):
###cylRad = (radius/2.0, (.75-radius)/2.0)
cylRad = (radius/1.5 - .167, .3 - radius/1.5)
else:
cylRad = radius/2.0
pass
endPt2 = center + V(0.5, 0.0, 0.0) # 0.5, -0.5)
drawcylinder(color, center, endPt2, cylRad)
global test_endpoints
test_endpoints += [(center, endPt2)]
pass
continue
ColorSorter.popName()
ColorSorter.finish(draw_now = True)
test_DrawingSet.addCSDL(csdl)
return csdl
if testCase == 8:
#bruce 090223 revised to try to avoid traceback
def chunkFn(centers, radii, colors):
res = GLSphereBuffer()
res.addSpheres(centers, radii, colors, None, None)
return res
pass
else:
chunkFn = primCSDL
pass
test_spheres = []
radius = .5
centers = []
radii = []
colors = []
global test_endpoints
test_endpoints = []
for x in range(nSpheres):
for y in range(nSpheres):
centers += [sphereLoc(x, y)]
# Sphere radii progress from 3/4 to full size.
t = float(x+y)/(nSpheres+nSpheres) # 0 to 1 fraction.
thisRad = radius * (.5 + t*.5)
radii += [thisRad]
# Colors progress from red to blue.
colors += [rainbow(t)]
# Put out short chunk buffers.
if len(centers) >= chunkLength:
test_spheres += [
chunkFn(centers, radii, colors) ]
centers = []
radii = []
colors = []
continue
continue
# Remainder fraction buffer.
if len(centers):
test_spheres += [chunkFn(centers, radii, colors)]
pass
print "Setup time", time.time() - t1, "seconds."
print "%d chunk buffers" % len(test_spheres)
pass
elif not initOnly: # Run.
test_Draw_8x(glpane)
pass
elif testCase == 100: #bruce 090102
# before making more of these, modularize it somehow
from commands.TestGraphics.test_selection_redraw import test_selection_redraw
test_class = test_selection_redraw
params = ( nSpheres, )
# note: test size is not directly comparable to other tests with same value of nSpheres
if test_Object is None \
or not isinstance(test_Object, test_class) \
or test_Object.current_params() != params: # review: same_vals?
# Setup.
if test_Object:
test_Object.destroy()
test_Object = test_class(*params)
test_Object.activate()
print test_Object
pass
# review: safe to change elif to if? not sure, GL state is only initialized below
elif not initOnly: # Run.
test_Object.draw_complete()
pass
pass
if not initOnly:
glMatrixMode(GL_MODELVIEW)
glFlush()
pass
first_time = False
return
| NanoCAD-master | cad/src/prototype/test_drawing.py |
# Copyright 2007-2009 Nanorex, Inc. See LICENSE file for details.
"""
test_connectWithState.py -- test the connectWithState features.
Also serves as scratch code for their improvement.
@author: Bruce
@version: $Id$
@copyright: 2007-2009 Nanorex, Inc. See LICENSE file for details.
History:
070830 bruce split this out of test_commands.py
"""
from prototype.test_connectWithState_constants import CYLINDER_HEIGHT_PREFS_KEY, CYLINDER_HEIGHT_DEFAULT_VALUE
from prototype.test_connectWithState_constants import cylinder_round_caps
##from test_connectWithState_constants import CYLINDER_VERTICAL_DEFAULT_VALUE
from prototype.test_connectWithState_constants import CYLINDER_WIDTH_DEFAULT_VALUE
### better to define here... ### REVISE
### REVISE: the default value should come from the stateref, when using the State macro,
# so it can be defined only in this file and not needed via globals by the PM
# REVISE: the following should just be the stateref's get_value and set_value methods.
# And -- to be realistic, we should find some setting that is more sensible to store in prefs,
# and make a prefs stateref for that setting rather than for what ought to be a model
# object attribute.
def cylinder_height():
import foundation.env as env
return env.prefs.get( CYLINDER_HEIGHT_PREFS_KEY, CYLINDER_HEIGHT_DEFAULT_VALUE)
def set_cylinder_height(val):
import foundation.env as env
env.prefs[CYLINDER_HEIGHT_PREFS_KEY] = val
# REVISE: for prefs state, what is defined in what file?
# can we make the PM code not even know whether specific state is defined in prefs or in the mode or in a node?
# (i don't yet know how, esp for state in a node where the choice of nodes depends on other state,
# but it's a good goal -- do we need to get the stateref itself from the command in a standard way? ### REVIEW)
# RELATED ISSUE: are staterefs useful when we don't have a UI to connect widgets to them? guess: yes.
# RELATED: can there be an object with modifiable attrs which refers to prefs values?
# If there was, the attr names would come from where? (the table in preferences.py i guess)
# Or, always define them in your own objs as needed using a State-like macro??
# === PM class
from prototype.test_connectWithState_PM import test_connectWithState_PM
# === GraphicsMode and Command classes
from prototype.test_commands import ExampleCommand
from geometry.VQT import V
from geometry.VQT import cross
from utilities.constants import pink, white
# TODO: import the following from somewhere
DX = V(1,0,0)
DY = V(0,1,0)
ORIGIN = V(0,0,0)
from graphics.drawing.CS_draw_primitives import drawcylinder
from graphics.drawing.CS_draw_primitives import drawsphere
from exprs.attr_decl_macros import Instance, State
from exprs.__Symbols__ import _self
from exprs.Exprs import call_Expr ## , tuple_Expr ### TODO: USE tuple_Expr
from exprs.Center import Center
from exprs.Rect import Rect # used to make our drag handle appearance
from exprs.DraggableHandle import DraggableHandle_AlongLine
from exprs.If_expr import If_expr
from widgets.prefs_widgets import ObjAttr_StateRef
from exprs.State_preMixin import State_preMixin
class _test_connectWithState_GM(ExampleCommand.GraphicsMode_class):
"""
Custom GraphicsMode for test_connectWithState.
"""
# bruce 071022 split this out, leaving all attrs in self.command
# [REVIEW -- do some attrs (and therefore some or all of the
# exprs overhead) belong here? Guess: yes.]
def Draw_other(self):
color = self.command.cylinderColor
length = cylinder_height()
## if self.command.cylinderVertical:
## direction = DY
## else:
## direction = DX
direction = self.command.direction
end1 = ORIGIN - direction * length/2.0
end2 = ORIGIN + direction * length/2.0
radius = self.command.cylinderWidth / 2.0
capped = True
drawcylinder(color, end1, end2, radius, capped)
if cylinder_round_caps():
detailLevel = 2
drawsphere( color, end1, radius, detailLevel)
drawsphere( color, end2, radius, detailLevel)
if self.command.widthHandleEnabled:
self.command.widthHandle.draw()
super(_test_connectWithState_GM, self).Draw_other() # added this, bruce 071022
return
pass
class test_connectWithState(State_preMixin, ExampleCommand):
# class constants needed by mode API for example commands
commandName = 'test_connectWithState-commandName'
featurename = "Prototype: Test connectWithState"
PM_class = test_connectWithState_PM
# tracked state -- this initializes specially defined instance variables
# which will track all their uses and changes so that connectWithState
# works for them:
cylinderVertical = State(bool, False)
cylinderWidth = State(float, CYLINDER_WIDTH_DEFAULT_VALUE)
# TODO: soon this will be the only use of this constant, so it can be inlined
cylinderColor = State('color-stub', pink) # type should be Color (nim), but type is not yet used
# note: you can add _e_debug = True to one or more of these State definitions
# to see debug prints about some accesses to this state.
GraphicsMode_class = _test_connectWithState_GM
# init methods
def __init__(self, commandSequencer):
# I don't know why this method is needed. ##### REVIEW (super semantics), FIX or clean up
# (note: that comment predates commandSequencer != glpane; after that, it's needed
# due to different init args)
glpane = commandSequencer.assy.glpane
super(test_connectWithState, self).__init__(glpane) # State_preMixin.__init__
ExampleCommand.__init__(self, commandSequencer) # (especially this part)
return
## def __init__(self, glpane):
## super(test_connectWithState, self).__init__(glpane)
#### # that only calls some mode's init method,
#### # so (for now) call this separately:
#### IorE_guest_mixin.__init__(self, glpane)
## return
# exprs-based formulae (and some compute methods)
direction = If_expr( cylinderVertical, DY, DX )
def _C_width_direction(self):
"""
compute self.width_direction
"""
# Note: to do this with a formula expr instead
# would require cross_Expr to be defined,
# and glpane.lineOfSight to be tracked.
return cross( self.direction, self.env.glpane.lineOfSight )
width_direction = _self.width_direction # so it can be used in formulae below
# stub for handle test code [070912]
widthHandleEnabled = True # stub
## widthHandle = Instance(Rect()) # stub
h_offset = 0.5 + 0.2 # get it from handle? nah (not good if that changes with time); just make it fit.
# or we could decide that handles ought to have useful fixed bounding boxes...
## widthHandle = Instance(Translate(Center(Rect(0.5)),
## width_direction * (cylinderWidth / 2.0 + h_offset) )) #stub
widthHandle = Instance( DraggableHandle_AlongLine(
appearance = Center(Rect(0.5, 0.5, white)),
### REVIEW:
# Can't we just replace the following with something based on the formula for the position,
# width_direction * (cylinderWidth / 2.0 + h_offset)
# ?
# As it is, I have to manually solve that formula for origin and direction to pass in,
# i.e. rewrite it as
# position = origin + direction * cylinderWidth
## height_ref = cylinderWidth, ###WRONG
## height_ref = ObjAttr_StateRef( _self, 'cylinderWidth'),
## ## AssertionError: ObjAttr_StateRef fallback is nim -- needed for S._self
height_ref = call_Expr( ObjAttr_StateRef, _self, 'cylinderWidth'), # guess at workaround; #e we need a more principled way!
### REVIEW: efficient enough? (guess: overhead only happens once, so yes)
# could we say instead something like: height_ref = Variable(cylinderWidth) ?? Or VariableRef? Or StateRef_to ?
origin = width_direction * h_offset, # note: also includes cylinder center, but that's hardcoded at ORIGIN
direction = width_direction / 2.0,
sbar_text = "cylinder width", ### TODO: make it a formula, include printed value of width?
range = (0.1, 10),
### TODO: DraggableHandle_AlongLine should take values from the stateref if this option is not provided;
# meanwhile, we ought to pass a consistent value!
))
# Note: the Instance is required; but I'm not sure if it would be
# if we were using a fuller exprs superclass or init code. [bruce 070912]
def cmd_Bigger(self):
self.cylinderWidth += 0.5
set_cylinder_height( cylinder_height() + 0.5)
# TODO: enforce maxima
return
def cmd_Smaller(self):
self.cylinderWidth -= 0.5
set_cylinder_height( cylinder_height() - 0.5)
# enforce minima (###BUG: not the same ones as declared in the PM)
### REVISE: min & max should be declared in State macro and (optionally) enforced by it
if self.cylinderWidth < 0.1:
self.cylinderWidth = 0.1
if cylinder_height() < 0.1:
set_cylinder_height(0.1)
return
pass
# end
| NanoCAD-master | cad/src/prototype/test_connectWithState.py |
# Copyright 2007-2008 Nanorex, Inc. See LICENSE file for details.
"""
test_connectWithState_PM.py -- Property Manager for test_connectWithState command.
@author: Bruce
@version: $Id$
@copyright: 2007-2008 Nanorex, Inc. See LICENSE file for details.
History:
070830 bruce split this out of test_command_PMs.py
"""
# This is scratch code for testing and demonstrating the "connectWithState" feature.
# It can be used with various types of state whose uses and changes are tracked in
# a standard way. For now, that kind of state includes only:
# - env.prefs[prefs_key] values (some examples below);
# - instance variables defined with the State macro in suitably defined classes (some examples below);
# - internal state in exprs created by the exprs module.
# Later we will add to that:
# - all state tracked by Undo
# and we'll also optimize the State macro and make it easier to use.
from widgets.prefs_widgets import Preferences_StateRef, Preferences_StateRef_double # TODO: remove these imports, get the refs from the model
from widgets.prefs_widgets import ObjAttr_StateRef # TODO: shorter or clearer name -- attribute_ref ?
from prototype.test_connectWithState_constants import CYLINDER_HEIGHT_PREFS_KEY, CYLINDER_HEIGHT_DEFAULT_VALUE
from prototype.test_connectWithState_constants import CYLINDER_ROUND_CAPS_PREFS_KEY, CYLINDER_ROUND_CAPS_DEFAULT_VALUE # TODO: get prefs refs from model so these are not needed here
##from test_connectWithState_constants import CYLINDER_VERTICAL_DEFAULT_VALUE
##from test_connectWithState_constants import CYLINDER_WIDTH_DEFAULT_VALUE
from prototype.test_command_PMs import ExampleCommand1_PM
from PM.PM_GroupBox import PM_GroupBox
from PM.PM_DoubleSpinBox import PM_DoubleSpinBox
from PM.PM_PushButton import PM_PushButton
from PM.PM_CheckBox import PM_CheckBox
# ===
class test_connectWithState_PM( ExampleCommand1_PM):
# does not use GBC; at least Done & Cancel should work
title = "test connectWithState"
def _addGroupBoxes(self):
"""
Add the groupboxes for this Property Manager.
"""
self.pmGroupBox1 = PM_GroupBox( self, title = "settings")
self._loadGroupBox1(self.pmGroupBox1)
self.pmGroupBox2 = PM_GroupBox( self, title = "commands")
self._loadGroupBox2(self.pmGroupBox2)
return
_sMaxCylinderHeight = 20 ### TODO: ask the stateref for this
def _loadGroupBox1(self, pmGroupBox):
"""
Load widgets into groupbox 1 (passed as pmGroupBox).
"""
# cylinder height (a double, stored as a preferences value)
cylinderHeight_stateref = Preferences_StateRef_double(
CYLINDER_HEIGHT_PREFS_KEY,
CYLINDER_HEIGHT_DEFAULT_VALUE )
### TODO: ask model object for this ref; this code should not need to know what kind it is (from prefs or model)
self.cylinderHeightSpinbox = \
PM_DoubleSpinBox( pmGroupBox,
label = "cylinder height:",
## value = CYLINDER_HEIGHT_DEFAULT_VALUE,
## # guess: default value or initial value (guess they can't be distinguished -- bug -- yes, doc confirms)
## setAsDefault = True,
### TODO: get all the following from the stateref, whenever the connection to state is made
minimum = 3,
maximum = self._sMaxCylinderHeight,
singleStep = 0.25,
decimals = self._sCoordinateDecimals,
suffix = ' ' + self._sCoordinateUnits )
# REVIEW: is it ok that the above will set some wrong defaultValue,
# to be immediately corrected by the following connection with state?
self.cylinderHeightSpinbox.connectWithState(
cylinderHeight_stateref,
debug_metainfo = True
)
# ==
# cylinder width (a double, stored in the command object,
# defined there using the State macro -- note, this is not yet a good
# enough example for state stored in a Node)
cylinderWidth_stateref = ObjAttr_StateRef( self.command, 'cylinderWidth')
## TEMPORARY: just make sure it's defined in there
junk = cylinderWidth_stateref.defaultValue
self.cylinderWidthSpinbox = \
PM_DoubleSpinBox( pmGroupBox,
label = "cylinder width:",
## value = defaultValue,
## setAsDefault = True,
## ### REVISE: the default value should come from the cylinderWidth_stateref
# (and so, probably, should min, max, step, units...)
minimum = 0.1,
maximum = 15.0,
singleStep = 0.1,
decimals = self._sCoordinateDecimals,
suffix = ' ' + self._sCoordinateUnits )
self.cylinderWidthSpinbox.connectWithState(
cylinderWidth_stateref,
debug_metainfo = True )
# ==
# cylinder round caps (boolean)
cylinderRoundCaps_stateref = Preferences_StateRef( CYLINDER_ROUND_CAPS_PREFS_KEY,
CYLINDER_ROUND_CAPS_DEFAULT_VALUE ) ### TODO: get from model
## TEMPORARY: just make sure it's defined in there
junk = cylinderRoundCaps_stateref.defaultValue
self.cylinderRoundCapsCheckbox = PM_CheckBox(pmGroupBox, text = 'round caps on cylinder')
## self.cylinderRoundCapsCheckbox.setDefaultValue(CYLINDER_ROUND_CAPS_DEFAULT_VALUE)
## # note: setDefaultValue is an extension to the PM_CheckBox API, not yet finalized
self.cylinderRoundCapsCheckbox.connectWithState(
cylinderRoundCaps_stateref,
debug_metainfo = True )
# ==
# cylinder vertical or horizontal (boolean)
cylinderVertical_stateref = ObjAttr_StateRef( self.command, 'cylinderVertical' )
self.cylinderVerticalCheckbox = PM_CheckBox(pmGroupBox, text = 'cylinder is vertical')
## self.cylinderVerticalCheckbox.setDefaultValue(CYLINDER_VERTICAL_DEFAULT_VALUE)
## ### REVISE: the default value should come from the stateref
self.cylinderVerticalCheckbox.connectWithState(
cylinderVertical_stateref,
debug_metainfo = True )
return # from _loadGroupBox1
def _loadGroupBox2(self, pmGroupBox): ### RENAME button attrs
self.startButton = \
PM_PushButton( pmGroupBox,
label = "",
text = "Bigger",
spanWidth = False ) ###BUG: button spans PM width, in spite of this setting
self.startButton.setAction( self.button_Bigger, cmdname = "Bigger")
self.stopButton = \
PM_PushButton( pmGroupBox,
label = "",
text = "Smaller",
spanWidth = False )
self.stopButton.setAction( self.button_Smaller, cmdname = "Smaller")
return
def button_Bigger(self):
self.command.cmd_Bigger()
def button_Smaller(self):
self.command.cmd_Smaller()
def _addWhatsThisText(self):
"""
What's This text for some of the widgets in the Property Manager.
"""
self.cylinderHeightSpinbox.setWhatsThis("cylinder height (stored in prefs)")
self.cylinderWidthSpinbox.setWhatsThis("cylinder width (stored as State in the command object)")
return
pass # end of class
# end
| NanoCAD-master | cad/src/prototype/test_connectWithState_PM.py |
# Copyright 2007-2009 Nanorex, Inc. See LICENSE file for details.
"""
test_commands.py -- try out using mode classes as command classes for a command stack;
find out the minimal needs for a command with PM (and improve them);
prototype a command stack
@author: Bruce
@version: $Id$
@copyright: 2007-2009 Nanorex, Inc. See LICENSE file for details.
How to run these test commands:
- set the debug_pref "test_commands enabled (next session)"
- quit and rerun NE1
- the debug menu's submenu "other" should contain new menu commands
defined in this file, such as ExampleCommand1. (Note: not all of them
are contiguous in that submenu, since the entire submenu is sorted
alphabetically.)
Cosmetic bugs:
- closing groupboxes makes them flash
TODO:
Split into several files in a subdirectory.
When cleaning up PropMgrBaseClass etc, note some other things Mark wants to work on soon:
- add message boxes to users of PropertyManagerMixin, in a way easily moved over to PropMgrBaseClass when they use that
- port more modes over to PropMgrBaseClass, e.g. extrudeMode
- split some PMs/modes into more than one smaller PM (especially MMKit).
Note: See PM_ElementSelector.py. Mark 2007-08-07.
Fix problems with _Example_TemporaryCommand_useParentPM (commented where it's used)
"""
from prototype.test_command_PMs import ExampleCommand1_PM
##from prototype.test_command_PMs import ExampleCommand2_PM
from PM.PM_WidgetsDemoPropertyManager import PM_WidgetsDemoPropertyManager
from command_support.GraphicsMode import GraphicsMode
from command_support.Command import Command
# ==
class _minimalGraphicsMode(GraphicsMode):
# [not sure whether this comment is obs, as of 080910:]
# this is enough to draw the axes, compass, etc, and the model, but not with highlighting (model or expr):
# What we need is some of what's in SelectAtoms_Command and maybe some of what's in testmode.
# It's more efficient to refactor those to get a new generally useful GraphicsMode,
# than to build them up separately here. HOWEVER, for the purpose of testing Command/GraphicsMode split,
# this one might be enough, if we split it. So do that below.
def Draw_model(self):
super(_minimalGraphicsMode, self).Draw_model()
self.glpane.part.draw(self.glpane) # draw the current Part
pass
class _minimalCommand(Command):
GraphicsMode_class = _minimalGraphicsMode
pass
## this worked a long time -- _superclass = _minimalCommand
# but time to try SelectAtoms again now that it's split [bruce 080123]
from commands.SelectAtoms.SelectAtoms_Command import SelectAtoms_Command
_superclass = SelectAtoms_Command
# ==
class ExampleCommand(_superclass):
"""
Abstract superclass for the example commands in this file.
Specific command subclasses need to define the following class constants:
commandName, and PM_class.
Some of them also need to override mode methods, such as Draw_model and/or Draw_other.
"""
test_commands_start_as_temporary_command = False
PM_class = None # if not overridden, means we need no PM (BUG: we'll still get a PM tab)
featurename = "Prototype: Undocumented Example Command"
from utilities.constants import CL_EDIT_GENERIC
command_level = CL_EDIT_GENERIC
__abstract_command_class = True
# ==
##class _Example_TemporaryCommand_useParentPM(ExampleCommand):
## # BUGS:
## # - doesn't call parentGraphicsMode.Draw; maybe should use something like
## # Overdrawing_GraphicsMode_preMixin from TemporaryCommand.py ####
## # [see also Delegating_GraphicsMode]
## #
## # Note: this works if you have your own PM; perhaps untested when you don't.
## # Warning: currently only one level of temporary commands is permitted;
## # if you enter one of these commands and then enter another TemporaryCommand (e.g. Zoom Tool)
## # it exits the first temporary commmand you were in.
## command_should_resume_prevMode = True #bruce 071011, to be revised (replaces need for customized Done method)
## test_commands_start_as_temporary_command = True # enter in different way
## ### maybe todo: set up a similar thing in Command API?
## # it would replace all calls of userEnterCommand.
## # [later, 080730: that idea might be obsolete or invalid in its details.
## # revisit after ongoing command stack refactoring.]
## pass
_Example_TemporaryCommand_useParentPM = ExampleCommand
# ==
class ExampleCommand1(ExampleCommand):
"""
Example command, which uses behavior similar to SelectAtoms_Command
[but, _superclass is now revised...].
[Which inherits class Command.]
"""
commandName = 'ExampleCommand1-commandName' # internal #e fix init code in basicMode to get it from classname?
featurename = "Prototype: Example Command 1"
# note: init code in basicMode won't even run now, since superclass defs it i think --
# actually, not sure abt that, probably it doesn't
PM_class = ExampleCommand1_PM
# note: ok_btn_clicked, etc, must be defined in our PM class (elsewhere),
# not in this class.
pass
##class ExampleCommand2( _Example_TemporaryCommand_useParentPM): # WRONG: this has own PM, so it'll mess up parent one.
## """
## Like ExampleCommand1, but use GBC (GeneratorBaseClass).
## (This difference shows up only in our PM class.)
## """
## commandName = 'ExampleCommand2-commandName'
## featurename = "Prototype: Example Command 2"
## PM_class = ExampleCommand2_PM
## pass
# ==
class PM_WidgetDemo(ExampleCommand):
"""
Used to demo all the PM widgets.
@see: PM_WidgetsDemoPropertyManager in PM_WidgetsDemoPropertyManager.py.
"""
# Note: this is no longer added to the UI. I don't know why it was removed.
# I know that for awhile it was broken due to a bug. [bruce 071030 comment]
commandName = 'PM_WidgetDemo-commandName'
featurename = "Test Command: PM_Widgets Demo"
PM_class = PM_WidgetsDemoPropertyManager
pass
# ==
# for init code which makes these available from the UI, see test_commands_init.py
# end
| NanoCAD-master | cad/src/prototype/test_commands.py |
# Copyright 2007-2008 Nanorex, Inc. See LICENSE file for details.
"""
test_command_PMs.py - property manager classes for test_commands.py.
See that file for more info.
@version: $Id$
"""
from PM.PM_Dialog import PM_Dialog
from PM.PM_GroupBox import PM_GroupBox
from PM.PM_DoubleSpinBox import PM_DoubleSpinBox
from PM.PM_ComboBox import PM_ComboBox
##from command_support.GeneratorBaseClass import GeneratorBaseClass
import time
class PM_Dialog_with_example_widgets( PM_Dialog):
"""
[private] PM_Dialog with some PM widgets common to several examples here
"""
# NOTE: contains some code copied (and perhaps modified) from AtomGeneratorDialog.py
# these class constants should be defined by each specific PM subclass
# (we don't define them here, since we want errors to occur if you don't override them)
# title = "title"
# pmName = "pm" + title
# iconPath = "path-to-some-icon.png"
#k all needed?
_sMinCoordinateValue = -30.0
_sMaxCoordinateValue = 30.0
_sStepCoordinateValue = 0.1
_sCoordinateDecimals = 4
_sCoordinateUnit = 'Angstrom'
_sCoordinateUnits = _sCoordinateUnit + 's'
_sElementSymbolList = ["H","O","C","S"]
def __init__(self):
PM_Dialog.__init__( self, self.pmName, self.iconPath, self.title )
msg = "Example command created at %s" % time.asctime()
# This causes the "Message" box to be displayed as well.
self.MessageGroupBox.insertHtmlMessage( msg, setAsDefault = False )
return
def _addGroupBoxes(self):
"""
Add group boxes to this Property Manager.
"""
self.pmGroupBox1 = PM_GroupBox( self, title = "Atom Parameters" )
self._loadGroupBox1(self.pmGroupBox1)
return
def _loadGroupBox1(self, inPmGroupBox):
"""
Load widgets into group box 1.
"""
# User input to specify what type of element/atom to generate
elementComboBoxItems = self._sElementSymbolList
self.elementComboBox = \
PM_ComboBox( inPmGroupBox,
label = "Elements :",
choices = elementComboBoxItems,
index = 0,
setAsDefault = True,
spanWidth = False )
# User input to specify x-coordinate
# of the generated atom's position.
self.xCoordinateField = \
PM_DoubleSpinBox( inPmGroupBox,
label = "x :",
value = 0.0,
setAsDefault = True,
minimum = self._sMinCoordinateValue,
maximum = self._sMaxCoordinateValue,
singleStep = self._sStepCoordinateValue,
decimals = self._sCoordinateDecimals,
suffix = ' ' + self._sCoordinateUnits )
return
def _addWhatsThisText(self):
"""
What's This text for some of the widgets in the Property Manager.
"""
self.xCoordinateField.setWhatsThis("<b>x</b><p>: The x-coordinate (up to </p>"
+ str( self._sMaxCoordinateValue )
+ self._sCoordinateUnits
+ ") of the Atom in "
+ self._sCoordinateUnits + '.')
pass # end of class PM_Dialog_with_example_widgets
# ==
class ExampleCommand1_PM( PM_Dialog_with_example_widgets): # these supers are needed (but 'object' is evidently not needed)
"""
Property Manager for Example Command 1 -- simplest that doesn't use GBC; buttons are noops
"""
# <title> - the title that appears in the property manager header.
title = "Example Command 1"
# <pmName> - the name of this property manager. This will be set to
# the name of the PropMgr (this) object via setObjectName(). ###k used only for debugging??
pmName = "pm" + title
# <iconPath> - full path to PNG file that appears in the header.
iconPath = "ui/actions/Commands Toolbar/BuildAtoms/InsertAtom.png" ###e REVISE
# bruce added these to make it work w/o GBC.
# (It doesn't need restore_defaults_btn_clicked because PropMgrBaseClass defines that itself.
# So does GBC, but to a noop method. So GBC better be inherited *after* PropMgrBaseClass!)
def ok_btn_clicked(self):
print "ok_btn_clicked, doing Done in", self.command
self.command.command_Done()
pass
def cancel_btn_clicked(self):
print "cancel_btn_clicked, doing Cancel in", self.command
self.command.command_Cancel()
pass
def preview_btn_clicked(self):
print "preview_btn_clicked (noop or nim, I think)", self
pass
def __init__(self, command = None):
# removed win argument, get it from command [bruce 080910]
win = command.win
print "creating", self ####
self.command = command #bruce 080909 renamed commandrun -> command, in all classes in package prototype
PM_Dialog_with_example_widgets.__init__( self ) ## ok before the next line? @@@
if 1: # bruce added these, otherwise various AttributeErrors [still true??]
self.win = win # needed in PropMgrBaseClass.show
self.pw = win.activePartWindow() # same
return
pass # end of class ExampleCommand1_PM
##class ExampleCommand2_PM( PM_Dialog_with_example_widgets, GeneratorBaseClass):
## """
## Property Manager for Example Command 2 -- simplest that uses GBC; generates a comment (ignores widget values)
## """
##
## title = "Example Command 2"
## pmName = "pm" + title
## iconPath = "ui/actions/Command Toolbar/BuildAtoms/InsertAtom.png" #e REVISE
##
## # need these, at least to use Done:
## prefix = "Thing2" # for names created by GBC [required when create_name_from_prefix is true (not sure about otherwise)]
## cmdname = "Generate a Thing2" # Undo/history cmdname used by GBC [optional, but affects history messages]
##
## def __init__(self, win, command = None):
## print "creating", self ####
## self.command = command
##
## PM_Dialog_with_example_widgets.__init__( self )
## GeneratorBaseClass.__init__( self, win)
## return
##
## def gather_parameters(self): ###REVIEW: the exception from this gets printed but not as a traceback...
## return (1,2) ###e not yet grabbed from the widgets
##
## def build_struct(self, name, params, position):
## """
## ... The return value should be the new structure, i.e. some flavor of a Node,
## which has not yet been added to the model. ...
## By convention ... the new node's name should be set to self.name,
## which the caller will have set to self.prefix appended with a serial number.
## """
## print "build_struct(", self, name, params, position, ")"###
## assert self.name == name # true by test and by examining GBC code
## # ignoring params and position for now
## assy = self.win.assy
## from model.Comment import Comment
## return Comment(assy, name, "comment created at " + time.asctime())
##
## #e bugs that remain:
## # - widget values not used for creating the thing
## # - preview for comment is not visible except in MT tab or history
## # - restore defaults does nothing useful
## # - whats this button does nothing
## # - when we leave this PM, the PM tab remains, tho it's empty
##
## def ok_btn_clicked(self):
## print "ok_btn_clicked, doing super then Done (kluge)", self
## GeneratorBaseClass.ok_btn_clicked(self)
## self.command.command_Done() ###k both command and Done -- and, kluge, instead GBC should call a done method in self.command
## pass
## def cancel_btn_clicked(self):
## print "cancel_btn_clicked, doing super then Done (kluge)", self
## GeneratorBaseClass.cancel_btn_clicked(self)
## self.command.command_Done() #update 2008-09-26: should this be command_Cancel()? [-- Ninad]
## pass
##
## pass # end of class ExampleCommand2_PM
# end
| NanoCAD-master | cad/src/prototype/test_command_PMs.py |
# Copyright 2007-2008 Nanorex, Inc. See LICENSE file for details.
"""
test_commands_init.py -- make the commands in test_commands available in the UI.
@author: Bruce
@version: $Id$
@copyright: 2007-2008 Nanorex, Inc. See LICENSE file for details.
How to run these test commands: see test_commands.py docstring.
This is initialized in startup_misc.py as of 071030.
"""
from utilities.debug import register_debug_menu_command
import utilities.EndUser as EndUser, utilities.Initialize as Initialize
# ==
def construct_cmdrun( cmd_class, commandSequencer):
"""
Construct and return a new "command run" object, for use in the given commandSequencer.
Don't start it -- there is no obligation for the caller to ever start it;
and if it does, it's allowed to do that after other user events and model changes
happened in the meantime [REVIEW THAT, it's not good for "potential commands"] --
but it should not be after this commandSequencer or its assy get replaced
(e.g. by Open File).
"""
# (we use same interface as <mode>.__init__ for now,
# though passing assy might be more logical)
cmdrun = cmd_class(commandSequencer)
if not hasattr(cmdrun, 'glpane'):
print "bug: no glpane in cmdrun %r: did it forget to call ExampleCommand.__init__?" % (cmdrun,)
if not hasattr(cmdrun, 'commandSequencer'):
print "bug: no commandSequencer in cmdrun %r: did it forget to call ExampleCommand.__init__?" % (cmdrun,)
###e should also put it somewhere, as needed for a mode ####DOIT
if 'kluge, might prevent malloc errors after removing pm from ui (guess)':
import foundation.changes as changes
changes.keep_forever(cmdrun)
return cmdrun
def start_cmdrun( cmdrun):
## ideally: cmd.Start() #######
commandSequencer = cmdrun.commandSequencer
# bruce 071011
# note: was written for commands with no PM of their own, but was only tested for a command that has one (and works)...
# also do we need the Draw delegation to parentCommand as in TemporaryCommand_Overdrawing? ### REVIEW
#
# update, bruce 080730:
# TODO: print a warning if cmdrun.command_level is not something
# we'd consider "nestable", per the likely intent of starting it
# as a temporary command.
commandSequencer.userEnterCommand( cmdrun, always_update = True)
print "done with start_cmdrun for", cmdrun
# returns as soon as user is in it, doesn't wait for it to "finish" -- so run is not a good name -- use Enter??
# problem: Enter is only meant to be called internally by glue code in CommandSequencer.
# solution: use a new method, maybe Start. note, it's not guaranteed to change to it immediately! it's like Done (newmode arg).
return
def enter_example_command(widget, example_command_classname):
## assert isinstance(widget, GLPane) # assumed by _reinit_modes; assertion disabled to fix an import cycle
glpane = widget
## if 0 and EndUser.enableDeveloperFeatures(): ###during devel only; broken due to file splitting
## # reload before use (this module only)
## if 0 and 'try reloading preqs too': ### can't work easily, glpane stores all the mode classes (not just their names)...
## glpane._reinit_modes() # just to get out of current mode safely
## import command_support.modes as modes
## reload(modes)
## ## from commands.SelectAtoms.SelectAtoms_Command import SelectAtoms_Command # commented so it doesn't affect import dependency tools
## _superclass = 'Undefined variable' # FIX
## if _superclass is SelectAtoms_Command:
## import commands.Select.selectMode as selectMode
## reload(selectMode)
## import commands.SelectAtoms.SelectAtoms_Command as SelectAtoms_Command
## reload(SelectAtoms_Command)
##
## # revised 071010 (glpane == commandSequencer), new code UNTESTED:
## glpane._recreate_nullmode()
## glpane._use_nullmode()
##
## glpane._reinit_modes() # try to avoid problems with changing to other modes later, caused by those reloads
## # wrong: uses old classes from glpane
## import prototype.test_command_PMs as test_command_PMs
## reload(test_command_PMs)
## Initialize.forgetInitialization(__name__)
## import prototype.test_commands_init as test_commands_init
## reload(test_commands_init)
## test_commands_init.initialize()
## # (note: reload code is untested since the change [bruce 071030]
## # to call initialize explicitly rather than as import side effect,
## # done together with splitting this module out of test_commands)
## pass
from prototype.test_commands_init import enter_example_command_doit
enter_example_command_doit(glpane, example_command_classname)
return
def enter_example_command_doit(glpane, example_command_classname):
example_command_class = globals()[example_command_classname]
example_command_class.commandName += 'x'
# kluge to defeat userEnterCommand comparison of commandName --
# seems to work; pretty sure it's needed for now
# (and still needed even with new command api).
# TODO: replace it with a new option to pass to that method.
commandSequencer = glpane.assy.commandSequencer
cmdrun = construct_cmdrun(example_command_class, commandSequencer)
start_cmdrun(cmdrun)
return
def initialize():
if (Initialize.startInitialization(__name__)):
return
# initialization code (note: it's all a kluge, could be cleaned up pretty easily)
# Note: the global declarations are needed due to the kluge above
# involving globals()[example_command_classname]
classnames = [ ] # extended below
global ExampleCommand1
from prototype.test_commands import ExampleCommand1
classnames.append("ExampleCommand1")
## global ExampleCommand2
## from prototype.test_commands import ExampleCommand2
## classnames.append("ExampleCommand2")
global test_connectWithState
from prototype.test_connectWithState import test_connectWithState
classnames.append("test_connectWithState")
global ExampleCommand2E
from prototype.example_expr_command import ExampleCommand2E
classnames.append("ExampleCommand2E")
for classname in classnames:
cmdname = classname # for now
register_debug_menu_command( cmdname, (lambda widget, classname = classname: enter_example_command(widget, classname)) )
Initialize.endInitialization(__name__)
return
# end
| NanoCAD-master | cad/src/prototype/test_commands_init.py |
# Copyright 2007 Nanorex, Inc. See LICENSE file for details.
"""
CommandToolbar.py - controls the main hierarchical toolbar for giving commands
@author: Ninad
@version: $Id$
@copyright: 2007 Nanorex, Inc. See LICENSE file for details.
Module classification: [bruce 071228]
This mainly contains control logic (especially regarding flyout
toolbar behavior and appearance) for any hierarchical
command toolbar which operates like NE1's main Command Toolbar.
Accordingly, after future refactoring, it could be part of a
general-purpose "command toolbar widget" package. But at the
moment, it inherits a hardcoding of NE1's specific Command Toolbar
layout and contents, via its superclass Ui_CommandToolbar,
which is what actually needs to be refactored.
So we have two imperfect choices for classifying it:
(1) put it in ne1_ui just because of its superclass; or
(2) classify it as a widget independent of ne1_ui,
but tolerate an import in it from ne1_ui for now,
until the superclass gets refactored (which might
not happen soon).
I think it's best to classify it "optimisitically"
using scheme (2), and let the wrong-direction import
remind us that the superclass needs refactoring.
I am assuming that will cause no inter-package import cycle,
since no cycle has been reported involving this module.
So for now, the superclass is in "ne1_ui", but this module
can go into its own toplevel "CommandToolbar" package.
(Not just into "widgets" or a subpackage, since it is a major
component of an app, and might still assume it's a singleton
or have client code which assumes that. In future, we can
consider making the general part non-singleton and putting
it into its own subpackage of "widgets".)
After future refactoring of the superclass Ui_CommandToolbar
(described in its module classification comment), its general
part would join this module as part of the same package
for a "general command toolbar widget".
BTW, I think it should be the case, and might already
be the case, that whatever state this class needs to help
maintain (even given its not-yet-refactored superclass)
will actually reside in the Command Sequencer and/or
in running commands, along with the logic for how
to modify that state. This class's object is just the
UI that makes the Command Sequencer state visible
and user-controllable.
BTW2, another needed refactoring (perhaps more related
to Ui_CommandToolbar than to this module) is to make
sure commands don't need to update their own UI elements
in various ways; for more on this see
http://www.nanoengineer-1.net/mediawiki/index.php?title=NE1_module/package_organization#Command_Toolbar
History:
ninad 20070109: created this in QT4 branch and subsequently modified it.
ninad 20070125: moved ui generation code to a new file Ui_CommandManager
ninad 20070403: implemented 'Subcontrol Area' in the command toolbar, related
changes (mainly revised _updateFlyoutToolBar,_setFlyoutDictionary)
and added/modified several docstrings
ninad 20070623: Moved _createFlyoutToolbar from modes to here and related changes
mark 20071226: Renamed CommandManager to CommandToolbar.
ninad 20080715: a) Workaround for Qt4.3 bug 2916 (disabled 'extension indicator'
button ">>" in the flyout toolbar. b) some refactoring to move common code
in classes FlyoutToolbar and NE1_QWidgetAction.
"""
from PyQt4 import QtGui
from PyQt4.Qt import Qt
from PyQt4.Qt import SIGNAL
from PyQt4.Qt import QToolButton
from PyQt4.Qt import QMenu
from PyQt4.Qt import QWidgetAction
from foundation.wiki_help import QToolBar_WikiHelp
from ne1_ui.toolbars.Ui_CommandToolbar import Ui_CommandToolbar
from utilities.debug import print_compact_traceback, print_compact_stack
class CommandToolbar(Ui_CommandToolbar):
"""
Command Toolbar is the big toolbar above the 3D graphics area and
the model tree. It is divided into the B{Control Area} and the
B{Flyout Toolbar Area}.
The B{Control Area} is a fixed toolbar on the left hand side with a
purple background color and contains buttons with drop down menus.
When you click on a button in the Control Area, the Flyout Toolbar Area
is updated and displays the menu of that button (in most cases).
The B{Flyout Toolbar Area} is divided into two areas, the
B{Subcontrol Area} (light olive background color) and the
B{Command Area} (light gray background color).
The Command Area shows commands based on the checked
SubControl Area button. Thus it could be empty in some situations.
"""
_f_current_flyoutToolbar = None
#CommandToolbar._f_current_flyoutToolbar is the current flyout toolbar
#that is displayed in the 'flyout area' of the command toolbar.
#This attr value usually changes when the command stack changes or
#when user clicks on a control button (exceptions apply).
#Example: When Build > Dna command is entered, it sets this attr on the
#commandToolbar class to the 'BuildDnaFlyout' object.
#When that command is exited, BuildDnaFlyout is first 'deactivated' and
#the self._f_current_flyoutToolbar is assigned a new value (The flyout
#object of the next command entered. This can even be 'None' if the
#next command doesn't have a flyoutToolbar)
#@see: self._setControlButtonMenu_in_flyoutToolbar()
#@see: self.resetToDefaultState()
#In the above methods, this attr value is changed.
#@see: AbstractFlyout.deActivateFlyoutToolbar()
#@see: bug 2937
#@see: self.command_update_flyout()
_f_previous_flyoutToolbar = None
#Suppose user is in a command whose custom flyout toolbar is shown.
#Now user clicks on another control button in the Command Toolbar,
#so, that control button menu is shown in the flyout area (instead of
#command specific custom flyout). When this happens, the
#self._f_current_flyoutToolbar is set to None. But, the present value
#of self._f_current_flyoutToolbar must be stored so as to 'deactivate'
#that flyout toolbar properly when user leaves the command for which
#that custom flyout is meant for. This is done by using the above
#class attr.
#@see: self._setControlButtonMenu_in_flyoutToolbar()
#@see: self.resetToDefaultState()
#In the above methods, this attr value is changed.
#@see: AbstractFlyout.deActivateFlyoutToolbar()
#@see: bug 2937
#@see: self.command_update_flyout()
def __init__(self, win):
"""
Constructor for class CommandToolbar.
@param win: Mainwindow object
@type win: L{MWsemantics}
"""
self.flyoutDictionary = None
Ui_CommandToolbar.__init__(self, win)
self.setupUi()
self._makeConnections()
if not self.cmdButtonGroup.checkedButton():
self.cmdButtonGroup.button(0).setChecked(True)
self.in_a_mode = None
self.currentAction = None
self._updateFlyoutToolBar(self.cmdButtonGroup.checkedId())
def _makeConnections(self):
"""
Connect signals to slots.
"""
self.win.connect(self.cmdButtonGroup, SIGNAL("buttonClicked(int)"),
self.controlButtonChecked)
def controlButtonChecked(self, btnId):
"""
Updates the flyout toolbar based on the button checked in the
control area.
"""
self._updateFlyoutToolBar(btnId, self.in_a_mode)
def resetToDefaultState(self):
"""
Reset the Command Toolbar to the default state. i.e. the state which
the user sees when NE1 is started -- Build button in the control area
of the toolbar checked and the flyout toolbar on the right hand side
shows the sub-menu items of the build control area.
@see: baseCommand.command_update_flyout() which calls this while entering
the NE1 default command 'SelectChunks_Command(fixes bugs like 2682, 2937)
@see: self._setControlButtonMenu_in_flyoutToolbar()
"""
#First deactivate all the flyouts (i.e current flyout and previous
#flyout if any) This fixes bugs like bug 2937. This ensures that
#all the 'custom' flyouts (seen while in a command) are always
#deactivated if the current command dosesn't have a flyout toolbar
#Thus, clicking on a 'Control Ara button' in the command toolbar
#will always ensure that it will display that control button's menu
#in the flyout area instead of a command specific custom flyout.
#See bug 2937 and self._setControlButtonMenu_in_flyoutToolbar()
for flyout in (self._f_current_flyoutToolbar,
self._f_previous_flyoutToolbar):
if flyout:
flyout.deActivateFlyoutToolbar()
self._f_current_flyoutToolbar = None
self._f_previous_flyoutToolbar = None
self.cmdButtonGroup.button(0).setChecked(True)
#Now update the command toolbar (flyout area) such that it shows
#the sub-menu items of the control button
self._setControlButtonMenu_in_flyoutToolbar(self.cmdButtonGroup.checkedId())
##FYI Same effect can be acheived by using the following commented out
##code. But its not obvious so not using it.
###self.updateCommandToolbar(None, None, entering = False)
def _updateFlyoutToolBar(self, btnId =0, in_a_mode = False):
"""
Update the Flyout toolbar commands based on the checked button
in the control area and the checked action in the 'subcontrol' area
of the flyout toolbar.
"""
##in_a_mode : inside a mode or while editing a feature etc.
if self.currentAction:
action = self.currentAction
else:
action = None
if in_a_mode:
self._showFlyoutToolBar(True)
self.flyoutToolBar.clear()
#Menu of the checked button in the Control Area of the
#command toolbar
menu = self.cmdButtonGroup.checkedButton().menu()
if menu:
for a in menu.actions():
# 'action' is self.currentAction which is obtained
#when the 'updateCommandToolbar method is called
#while entering a mode , for instance.
if a is action :
flyoutActionList = []
flyoutDictionary = self._getFlyoutDictonary()
if not flyoutDictionary:
print_compact_traceback(
"bug: Flyout toolbar not" \
"created as flyoutDictionary doesn't exist")
return
#Dictonary object randomly orders key:value pairs.
#The following ensures that the keys
#(i.e. subcontrol area actions) are properly ordered
#This key order while adding these actions to the
# the flyout toolbar subcontrol area.
keyList = self.getOrderedKeyList(flyoutDictionary)
flyoutActionList.extend(keyList)
#Now append commands corresponding to
#the checked action in sub-control area
for k in keyList:
if k.isChecked():
#ordered_key is a list that contains a tuple
#i.e. ordered_key = [(counter, key)]
#so order_key[0] = (counter, key)
#This is required because our flyoutDictinary
#has keys of type:[ (counter1, key1),
# (counter2, key2), ..]
ordered_key = [(counter, key)
for (counter, key)
in flyoutDictionary.keys()
if key is k]
#Values corresponding to the ordered_key
commands = flyoutDictionary[ordered_key[0]]
#Add these commands after the subcontrol area.
flyoutActionList.extend(commands)
#Now add all actions collected in 'flyoutActionList
#to the flyout toolbar.
self.flyoutToolBar.addActions(flyoutActionList)
return
self._setControlButtonMenu_in_flyoutToolbar(
self.cmdButtonGroup.checkedId())
else:
self._showFlyoutToolBar(True)
try:
if self.cmdButtonGroup.button(btnId).isChecked():
self._setControlButtonMenu_in_flyoutToolbar(
self.cmdButtonGroup.checkedId())
except:
print_compact_traceback(
"command button doesn't have a menu, or other exception; No actions added " \
"to the flyout toolbar: " )
return
def _setControlButtonMenu_in_flyoutToolbar(self, btnId):
"""
Sets the menu of a control area button in the flyout toolbar
on the right.
@param btnId: The index of the toolbutton in the control area,
that user clicked on.
@type btnId: int
@see: AbstractFlyout.activateFlyoutToolbar() , another place
where the self._f_current_flyoutToolbar value is changed.
@see: self.resetToDefaultState()
"""
#When the menu in the Control button is set in the flyout toolbar,
#make sure to reset the self._f_current_flyoutToolbar value to 'None'
#This value may change again when baseCommand.command_update_flyout()
#is called (when command stack changes)
#First store the _f_current_flyoutToolbar to _f_previous_flyoutToolbar.
#before setting _f_current_flyoutToolbar to None. Note that when command
#stack changes, both current flyout and previous flyout will be 'deactivated'
# if, the new command doesn't have a flyout toolbar of its own.
#see baseCommand.command_update_flyout()
#@see: self.resetToDefaultState() which actually deactivates the current
#and previous flyouts.
#Why do we need to the value of current flyout to previous flyout?
#-- consider steps in bug 2937. When user does step 2, two things happen
#a) We store the current flyout value to previous flyout and b) then
#the current flyout is set to None. If we don't do (a), the Move flyout
#will never be deactivated by the current command and cause bug 2937
if self._f_current_flyoutToolbar:
self._f_previous_flyoutToolbar = self._f_current_flyoutToolbar
self._f_current_flyoutToolbar = None
self.flyoutToolBar.clear()
menu = self.cmdButtonGroup.button(btnId).menu()
self.flyoutToolBar.addActions(menu.actions())
def check_controlAreaButton_containing_action(self, action = None):
"""
This method is called once while entering a command. It makes
sure to check the control area button which lists <action> as its
menu item. (not true for BuildAtoms subclasses such as partlib mode)
This ensures that when you enter a command, the flyout toolbar is set
to show the Exit command etc buttons specific to that command. (The user
can then always click on some other control button to display some
different flyout toolbar. But by default, we ensure to show the flyout
toolbar that the user will obviously expect to see after entering a
command
@see: bug 2600, 2801 (this method fixes these bugs)
@TODO: Ater more testing, deprecate code in
Ui_DnaFlyout.activateFlyoutToolbar and some other files that fixes
bug 2600.
"""
buttonToCheck = None
if action:
for controlAreaButton in self.cmdButtonGroup.buttons():
if buttonToCheck:
break
menu = controlAreaButton.menu()
if menu:
for a in menu.actions():
if a is action:
buttonToCheck = controlAreaButton
break
if buttonToCheck:
buttonToCheck.setChecked(True)
def updateCommandToolbar(self, action, obj, entering = True): #Ninad 070125
"""
Update the command toolbar (i.e. show the appropriate toolbar)
depending upon, the command button pressed .
It calls a private method that updates the SubcontrolArea and flyout
toolbar based on the ControlArea button checked and also based on the
SubcontrolArea button checked.
@param obj: Object that requests its own Command Manager flyout toolbar
This can be a B{mode} or a B{generator}.
"""
if entering:
self._createFlyoutToolBar(obj)
self.in_a_mode = entering
#This fixes bugs like 2600, 2801
self.check_controlAreaButton_containing_action(action)
self.currentAction = action
self._updateFlyoutToolBar(in_a_mode = self.in_a_mode)
else:
self.in_a_mode = False
self._updateFlyoutToolBar(self.cmdButtonGroup.checkedId(),
in_a_mode = self.in_a_mode )
def _createFlyoutDictionary(self, params):
"""
Create the dictonary objet with subcontrol area actions as its
'keys' and corresponding command actions as the key 'values'.
@param params: A tuple that contains 3 lists:
(subControlAreaActionList, commandActionLists, allActionsList)
@return: flyoutDictionary (dictionary object)
"""
subControlAreaActionList, commandActionLists, allActionsList = params
#The subcontrol area button and its command list form a 'key:value pair
#in a python dictionary object
flyoutDictionary = {}
counter = 0
for k in subControlAreaActionList:
# counter is used to sort the keys in the order in which they
#were added
key = (counter, k)
flyoutDictionary[key] = commandActionLists[counter]
#Also add command actions to the 'allActionsList'
allActionsList.extend(commandActionLists[counter])
counter += 1
return flyoutDictionary
def _createFlyoutToolBar(self, obj):
"""
Creates the flyout tool bar in the Command Manager.
@see: NE1_QWidgetAction.setToolButtonPalette()
"""
#This was earlier defined in each mode needing a flyout toolbar
params = obj.getFlyoutActionList()
# XXXXXXXXXXXXXXXX The semantics of these lists need to be clearly defined!
subControlAreaActionList, commandActionLists, allActionsList = params
flyoutDictionary = self._createFlyoutDictionary(params)
#Following counts the total number of actions in the 'command area'
# if this is zero, that means our subcontrol area itself is a command
#area, In this case, no special rendering is needed for this subcontrol
#area, and so its buttons are rendered as if they were command area
#buttons (but internally its still the 'subcontrol area'.--ninad20070622
cmdActionCount = 0
for l in commandActionLists:
cmdActionCount += len(l)
widgetActions = filter(lambda act:
isinstance(act, QtGui.QWidgetAction),
allActionsList)
self.flyoutToolBar.clear()
#========
#Do this customization BEFORE adding actions to the toolbar.
#Reason: custom properties for the button that appears
#can not be set AFTER the actions are added to the QToolBar given that
#the NE1_QWidgetAction.createWidget() method is implemented. (This
#method gets called when we do toolbar.addAction()
#Example: The commented out code below won't change the toolbutton
#properties -- a leaset in Qt4.3
##for widget in action.createdWidgets():
##if isinstance(widget, QToolButton):
##btn = widget
##break
##btn.setPalette(somePalette)
##btn.setText('someText')
# [Ninad 2008-07-15 comment]
#=========
for action in widgetActions:
#Set a different color palette for the 'SubControl' buttons in
#the command toolbar.
if [key for (counter, key) in flyoutDictionary.keys()
if key is action]:
if cmdActionCount > 0:
subControlPalette = self.getCmdMgrSubCtrlAreaPalette()
action.setToolButtonPalette(subControlPalette)
else:
cmdPalette = self.getCmdMgrCommandAreaPalette()
action.setToolButtonPalette(cmdPalette)
self.flyoutToolBar.addActions(allActionsList)
##for action in allActionsList:
##if isinstance(action, QtGui.QWidgetAction):
##btn = None
####for widget in action.associatedWidgets():
##for widget in action.createdWidgets():
##if isinstance(widget, QToolButton):
##btn = widget
##break
##if btn is None:
##print_compact_stack("bug: Action to be added to the flyout toolbar has no"\
##"ToolButton associated with it")
##continue
self._setFlyoutDictionary(flyoutDictionary)
def _showFlyoutToolBar(self, bool_show):
"""
Hide or show flyout toolbar depending upon what is requested.
At the same time toggle the display of the spacer item
(show it when the toolbar is hidden).
"""
if bool_show:
self.flyoutToolBar.show()
self.layout().removeItem(self.spacerItem)
else:
self.flyoutToolBar.hide()
self.layout().addItem(self.spacerItem)
def _setFlyoutDictionary(self, dictionary):
"""
Set the flyout dictionary that stores subcontrol area buttons in the
flyout toolbar as the keys and their corresponding command buttons
as values
@param dictionary: dictionary object.
Its key is of the type (counter, subcontrolAreaAction). The counter is
used to sort the keys in the order in which they were created.
and value is the 'list of commands' corresponding to the
sub control button.
"""
if isinstance(dictionary, dict):
self.flyoutDictionary = dictionary
else:
assert isinstance(dictionary, dict)
self.flyoutDictionary = None
def _getFlyoutDictonary(self):
"""
Returns the flyout dictonary object.
@return: self.flyoutDictionary whose
key : is of the type (counter, subcontrolAreaAction). The counter is
used to sort the keys in the order in which they were created.
and value is the 'list of commands' corresponding to the
sub control button.
"""
if self.flyoutDictionary:
return self.flyoutDictionary
else:
print "fyi: flyoutDictionary doesn't exist. Returning None"
return self.flyoutDictionary
def getOrderedKeyList(self, dictionary):
"""
Orders the keys of a dictionary and returns it as a list.
Effective only when the dictonary key is a tuple of format
(counter, key)
@param: dictinary object
@return: a list object which contains ordered keys of the dictionary
object.
"""
keyList = dictionary.keys()
keyList.sort()
return [keys for (counter, keys) in keyList]
def _getNumberOfVisibleToolButtons(self):
"""
"""
numOfVisibletoolbuttons = 0
rect = self.flyoutToolBar.visibleRegion().boundingRect()
visibleWidth = rect.width()
numOfVisibletoolbuttons = int(visibleWidth)/ 75
return numOfVisibletoolbuttons
| NanoCAD-master | cad/src/commandToolbar/CommandToolbar.py |
NanoCAD-master | cad/src/commandToolbar/__init__.py |
|
# Copyright 2007 Nanorex, Inc. See LICENSE file for details.
"""
CommandToolbar_Constants.py
@author: Ninad
@version: $Id$
@copyright: 2007 Nanorex, Inc. See LICENSE file for details.
Module classification: [bruce 071228]
This is used only in Ui_CommandToolbar, slated for "ne1_ui" package.
But when we refactor Ui_CommandToolbar as described in its docstring,
this will end up used only by the part that goes into the toplevel
"CommandToolbar" package. So we might as well classify it there now.
History:
ninad 20070622 Created this file that defines various constants (e.g. color)
used in the command toolbar.
"""
from PyQt4.Qt import QColor
# Colors for Command Manager Control Areas
cmdTbarCntrlAreaBtnColor = QColor(204, 204, 255)
cmdTbarSubCntrlAreaBtnColor = QColor(190, 210, 190)
cmdTbarCmdAreaBtnColor = QColor(230, 230, 230)
| NanoCAD-master | cad/src/commandToolbar/CommandToolbar_Constants.py |
# Copyright 2004-2009 Nanorex, Inc. See LICENSE file for details.
"""
move_atoms_and_normalize_bondpoints.py -- post-simulation helper function
@author: Josh, Bruce
@version: $Id$
@copyright: 2004-2009 Nanorex, Inc. See LICENSE file for details.
History:
090112 renamed from move_alist_and_snuggle, moved into new file from chem.py
"""
from geometry.VQT import A
def move_atoms_and_normalize_bondpoints(alist, newPositions):
"""
Move the atoms in alist to the new positions in the given array or sequence
(which must have the same length);
then for any singlets in alist, correct their positions using Atom.snuggle.
@warning: it would be wrong to call this on several alists in a row if they
might overlap or were connected by bonded atoms, for the same
reason that the snuggle has to be done in a separate loop
(see snuggle docstring for details, re bug 1239).
@warning: I'm not sure this does all required invals; doesn't do gl_update.
"""
#bruce 051221 split this out of class Movie so its bug1239 fix can be used
# in jig_Gamess. [later: Those callers have duplicated code which should be
# cleaned up.]
#bruce 090112 renamed from move_alist_and_snuggle
#todo: refile into a new file in operations package
assert len(alist) == len(newPositions)
singlets = []
for a, newPos in zip(alist, newPositions):
#bruce 050406 this needs a special case for singlets, in case they are H
# in the xyz file (and therefore have the wrong distance from their base
# atom). Rather than needing to know whether or not they were H during
# the sim, we can just regularize the singlet-baseatom distance for all
# singlets. For now I'll just use setposn to set the direction and
# snuggle to fix the distance.
# REVIEW: should it also regularize the distance for H itself? Maybe
# only if sim value is wildly wrong, and it should also complain.
# I won't do this for now.
a.setposn(A(newPos))
if a.is_singlet(): # same code as in movend()
#bruce 051221 to fix bug 1239: do all snuggles after all moves;
# see snuggle docstring warning
singlets.append(a)
continue
for a in singlets:
a.snuggle() # includes a.setposn
return
# end
| NanoCAD-master | cad/src/operations/move_atoms_and_normalize_bondpoints.py |
# Copyright 2004-2009 Nanorex, Inc. See LICENSE file for details.
"""
ops_select.py -- operations and internal methods for changing what's selected
and maintaining other selection-related state. (Not well-organized.)
@version: $Id$
@copyright: 2004-2009 Nanorex, Inc. See LICENSE file for details.
History:
bruce 050507 made this by collecting appropriate methods from class Part.
Various improvements since then, by various developers.
TODO: probably needs some refactoring, judging by the imports
as of 080414.
"""
from utilities.constants import SELWHAT_CHUNKS, SELWHAT_ATOMS
from utilities.constants import diINVISIBLE, diDEFAULT
from model.global_model_changedicts import _changed_picked_Atoms
from model.chunk import Chunk
from model.elements import Singlet
from geometry.VQT import V, A, norm, cross
from Numeric import dot, transpose
import foundation.env as env
from utilities.Log import redmsg, greenmsg, orangemsg
from utilities.debug import print_compact_traceback
from utilities import debug_flags
from platform_dependent.PlatformDependent import fix_plurals
from utilities.GlobalPreferences import permit_atom_chunk_coselection
from utilities.icon_utilities import geticon
from dna.model.DnaGroup import DnaGroup
from dna.model.DnaStrand import DnaStrand
from foundation.Group import Group
from dna.model.DnaLadderRailChunk import DnaAxisChunk
from dna.model.DnaLadderRailChunk import DnaStrandChunk
from dna.model.DnaMarker import DnaMarker
from dna.model.DnaSegment import DnaSegment
from cnt.model.NanotubeSegment import NanotubeSegment
# Object flags, used by objectSelected() and its callers.
ATOMS = 1
CHUNKS = 2
JIGS = 4
DNASTRANDS = 8
DNASEGMENTS = 16
DNAGROUPS = 32
ALLOBJECTS = ATOMS | CHUNKS | JIGS | DNASTRANDS | DNASEGMENTS | DNAGROUPS
def objectSelected(part, objectFlags = ALLOBJECTS): # Mark 2007-06-24
"""
Returns True if anything is selected (i.e. atoms, chunks, jigs, etc.).
Returns False if nothing is selected.
<objectFlags> is an enum used to test for specific object types, where:
ATOMS = 1
CHUNKS = 2
JIGS = 4
DNASTRANDS = 8
DNASEGMENTS = 16
DNAGROUPS = 32
ATOMS | CHUNKS | JIGS | DNASTRANDS | DNASEGMENTS | DNAGROUPS
"""
if objectFlags & ATOMS:
if part.selatoms_list():
return True
if objectFlags & CHUNKS:
if part.selmols:
return True
if objectFlags & JIGS:
if part.getSelectedJigs():
return True
if objectFlags & DNASTRANDS:
if part.getSelectedDnaStrands():
return True
if objectFlags & DNASEGMENTS:
if part.getSelectedDnaSegments():
return True
if objectFlags & DNAGROUPS:
if part.getSelectedDnaGroups():
return True
return False
def renameableLeafNode(obj, groups_renameable = False): # probably by Mark
"""
Returns True if obj is a visible, renameable leaf node in the model tree.
Otherwise, returns False.
If obj is a Group or DnaGroup and groups_renameable is True,
return True.
"""
# TODO: refactor this so that it doesn't hardcode a lot of classes.
# (The result is probably deducible from existing class attrs;
# if not, we should add one. There might already be a method on
# class Node related to this -- see try_rename or its calls to find out.)
# [bruce 081124 comment]
_nodeList = [[DnaAxisChunk, False], # Chunk subclass
[DnaStrandChunk, False], # Chunk subclass
[DnaMarker, False], # Jig subclass
[DnaSegment, True], # Group subclass
[DnaStrand, True], # Group subclass
[NanotubeSegment, True], # Group subclass
[DnaGroup, groups_renameable], # Group subclass
[Group, groups_renameable]] # Group must be last in list.
if not obj.rename_enabled():
return False
for _class, _renameable in _nodeList:
if isinstance(obj, _class):
return _renameable
return True
class ops_select_Mixin:
"""
Mixin class for providing selection methods to class L{Part}.
"""
# functions from the "Select" menu
# [these are called to change the set of selected things in this part,
# when it's the current part; these are event handlers which should
# do necessary updates at the end, e.g. win_update, and should print
# history messages, etc]
def selectionContainsAtomsWithOverriddenDisplay(self):
"""
Checks if the current selection contains any atoms that have
its display mode not set to B{diDEFAULT}.
@return: True if there is one or more selected atoms with its
display mode not set to B{diDEFAULT}.
@rtype: bool
@note: It doesn't consider atoms in a chunk or of a jig if they
(the atoms) are not explicitely selected.
"""
for a in self.getOnlyAtomsSelectedByUser():
if a.display != diDEFAULT:
return True
return False
def selectionContainsInvisibleAtoms(self):
"""
Checks if the current selection contains any atoms that have
its display mode set to B{diINVISIBLE}.
@return: True if there is one or more selected atoms with its display
mode set to B{diINVISIBLE}.
@rtype: bool
@note: It doesn't consider atoms in a chunk or of a jig if they
(the atoms) are not explicitely selected.
"""
for a in self.getOnlyAtomsSelectedByUser():
if a.display == diINVISIBLE:
return True
return False
def getSelectedAtoms(self): #mark 060122
"""
Returns a list of all the selected atoms, including those of selected
chunks and jigs.
@return: List of selected atoms.
@rtype: list
"""
atoms = []
for chunk in self.assy.selmols[:]:
atoms += chunk.atoms.values()
for jig in self.assy.getSelectedJigs():
atoms += jig.atoms
atoms += self.assy.selatoms_list()
return atoms
def getOnlyAtomsSelectedByUser(self): #ninad 0600818
"""
Returns a list of atoms selected by the user. It doesn't consider atoms
in a chunk or of a jig if they (the atoms) are not explicitely selected.
"""
#ninad060818 is using this function to get distance and other info in the DynamiceTooltip class.
atoms = []
atoms += self.assy.selatoms_list()
return atoms
def getSelectedJigs(self):
"""
Returns a list of all the currently selected jigs.
@see: MWsemantics.activateDnaTool
"""
selJigs = []
def addSelectedJig(obj, jigs=selJigs):
if obj.picked and isinstance(obj, self.win.assy.Jig):
jigs += [obj]
self.topnode.apply2all(addSelectedJig)
return selJigs
def getSelectedPlanes(self):
"""
Returns a list of selected planes.
@see: self.getSelectedJigs()
"""
selectedJigs = self.getSelectedJigs()
selectedPlanes = filter(lambda p:
isinstance(p, self.win.assy.Plane),
selectedJigs)
return selectedPlanes
def getSelectedDnaGroups(self):
"""
Returns a list of the currently selected DnaGroup(s).
"""
selDnaGroupList = []
def addSelectedDnaGroup(obj, dnaList = selDnaGroupList):
if obj.picked and isinstance(obj, DnaGroup):
dnaList += [obj]
self.topnode.apply2all(addSelectedDnaGroup)
return selDnaGroupList
def getSelectedDnaStrands(self):
"""
Returns a list of the currently selected DnaStrand(s).
"""
selDnaStrandList = []
def addSelectedDnaStrand(obj, dnaList = selDnaStrandList):
if obj.picked and isinstance(obj, DnaStrand):
dnaList += [obj]
self.topnode.apply2all(addSelectedDnaStrand)
return selDnaStrandList
def getSelectedDnaSegments(self):
"""
Returns a list of the currently selected DnaSegment(s).
"""
selDnaSegmentList = []
def addSelectedDnaSegment(obj, dnaList = selDnaSegmentList):
if obj.picked and isinstance(obj, self.win.assy.DnaSegment):
dnaList += [obj]
self.topnode.apply2all(addSelectedDnaSegment)
return selDnaSegmentList
def getSelectedNanotubeSegments(self):
"""
@return: a list of the currently selected NanotubeSegments
"""
selNanotubeSegmentList = []
def addSelectedNanotubeSegment(obj, ntSegmentList = selNanotubeSegmentList):
if obj.picked and isinstance(obj, self.win.assy.NanotubeSegment):
ntSegmentList += [obj]
return
self.topnode.apply2all(addSelectedNanotubeSegment)
return selNanotubeSegmentList
def getSelectedNanotubeSegment(self):
"""
Returns only the currently selected nanotubeSegment, if any.
@return: the currently selected nanotubeSegment or None if no
nanotubeSegments are selected. Also returns None if more
than one nanotubeSegment is select.
@rtype: L{Chunk}
@note: use L{getSelectedNanotubeSegments()} to get the list of all
selected nanotubeSegments.
"""
selectedNanotubeSegmentList = self.getSelectedNanotubeSegments()
if len(selectedNanotubeSegmentList) == 1:
return selectedNanotubeSegmentList[0]
else:
return None
return
def getSelectedProteinChunks(self):
"""
@return: a list of the currently selected Protein chunks
"""
selProteinList = []
def addSelectedProteinChunk(obj, proteinList = selProteinList):
if obj.picked and \
isinstance(obj, self.win.assy.Chunk) and \
obj.isProteinChunk():
proteinList += [obj]
return
self.topnode.apply2all(addSelectedProteinChunk)
return selProteinList
def getSelectedProteinChunk(self):
"""
Returns only the currently selected protein chunk, if any.
@return: the currently selected protein chunk or None if no peptide
chunks are selected. Also returns None if more than one
peptide chunk is select.
@rtype: L{Chunk}
@note: use L{getSelectedProteinChunks()} to get the list of all
selected proteins.
"""
selectedProteinList = self.getSelectedProteinChunks()
if len(selectedProteinList) == 1:
return selectedProteinList[0]
else:
return None
return
def getNumberOfSelectedChunks(self):
"""
Returns the number of selected chunks.
@note:Atoms and jigs are not counted.
"""
return len(self.assy.selmols)
def getNumberOfSelectedJigs(self):
"""
Returns the number of selected jigs.
@note:Atoms and chunks are not counted.
"""
return len(self.assy.getSelectedJigs())
def getSelectedMovables(self): # Renamed from getMovables(). mark 060124.
"""
Returns the list of all selected nodes that are movable.
"""
selected_movables = []
def addMovableNode(obj, nodes=selected_movables):
if obj.picked and obj.is_movable:
nodes += [obj]
self.topnode.apply2all(addMovableNode)
return selected_movables
def getSelectedRenameables(self):
"""
Returns the list of all selected nodes that can be renamed.
"""
selected_renameables = []
def addRenameableNode(obj, nodes=selected_renameables):
if obj.picked and obj.rename_enabled():
nodes += [obj]
self.topnode.apply2all(addRenameableNode)
return selected_renameables
def getSelectedNodes(self):
"""
Return a list of all selected nodes in the MT.
"""
selected_nodes = []
def func(obj):
if obj.picked:
selected_nodes.append(obj)
self.topnode.apply2all(func)
return selected_nodes
def selectAll(self):
"""
Select all atoms or all chunks, depending on the select mode.
@note: The selection filter is applied if it is enabled.
"""
self.begin_select_cmd() #bruce 051031
if self.selwhat == SELWHAT_CHUNKS:
for m in self.molecules:
m.pick()
#Call Graphics mode API method to do any additinal selection
#(example select an entire DnaGroup if all its contents are selected
#@see: basicGraphicsMode.end_selection_from_GLPane()
currentCommand = self.w.commandSequencer.currentCommand
currentCommand.graphicsMode.end_selection_from_GLPane()
else:
assert self.selwhat == SELWHAT_ATOMS
for m in self.molecules:
for a in m.atoms.itervalues():
a.pick()
self.w.win_update()
def selectNone(self):
self.begin_select_cmd() #bruce 051031
self.unpickall_in_win()
self.w.win_update()
def selectInvert(self):
"""
If some parts are selected, select the other parts instead.
If some atoms are selected, select the other atoms instead
(even in chunks with no atoms selected, which end up with
all atoms selected). (And unselect all currently selected
parts or atoms.)
@note: when atoms are selected, only affects atoms as permitted by the
selection filter.
""" #bruce 060331 revised docstring
#bruce 060721 comments: this is problematic #####@@@@@ as we move to more general selection semantics.
# E.g. -- can it select atoms inside a CylinderChunk? It probably shouldn't, but now it can.
# (If some in it are already selected, then maybe, but not if they are not, and maybe that should
# never be permitted to occur.)
# E.g. -- what if there are atoms selected inside chunks that are selected?
# (Even if not, how do you decide whether unselected stuff gets selected as atoms or chunks?
# Now, this depends on the mode (Build -> atoms, Extrude -> Chunks); maybe that makes sense --
# select things in the smallest units that could be done by clicking (for CylChunks this will mean chunks).)
self.begin_select_cmd() #bruce 051031
cmd = "Invert Selection: "
env.history.message(greenmsg(cmd))
# revised by bruce 041217 after discussion with Josh;
# previous version inverted selatoms only in chunks with
# some selected atoms.
if self.selwhat == SELWHAT_CHUNKS:
newpicked = filter( lambda m: not m.picked, self.molecules )
self.unpickparts()
for m in newpicked:
m.pick()
#Call Graphics mode API method to do any additinal selection
#(example select an entire DnaGroup if all its contents are selected
#@see: basicGraphicsMode.end_selection_from_GLPane()
currentCommand = self.w.commandSequencer.currentCommand
currentCommand.graphicsMode.end_selection_from_GLPane()
else:
assert self.selwhat == SELWHAT_ATOMS
for m in self.molecules:
for a in m.atoms.itervalues():
if a.picked: a.unpick()
else: a.pick()
# Print summary msg to history widget. Always do this before win/gl_update.
env.history.message("Selection Inverted")
self.w.win_update()
def expandDnaComponentSelection(self, dnaStrandOrSegment):
"""
Expand the DnaComponent selection. DnaComponent can be a strand or a
segment.
For DnaSegment -- it selects that dna segment and the adjacent segments
reachable through crossovers.
For DnaStrand it selects that strand and all the complementary strands.
@see: self._expandDnaSegmentSelection()
@see: SelectChunks_GraphicsMode.chunkLeftDouble()
@see: DnaStrand.get_DnaStrandChunks_sharing_basepairs()
@see: DnaSegment.get_DnaSegments_reachable_thru_crossovers()
@see: NFR bug 2749 for details.
"""
if isinstance(dnaStrandOrSegment, self.win.assy.DnaStrand):
self._expandDnaStrandSelection(dnaStrandOrSegment)
elif isinstance(dnaStrandOrSegment, self.win.assy.DnaSegment):
self._expandDnaSegmentSelection(dnaStrandOrSegment)
currentCommand = self.w.commandSequencer.currentCommand
currentCommand.graphicsMode.end_selection_from_GLPane()
self.win.win_update()
def _expandDnaSegmentSelection(self, dnaSegment):
"""
Expand the selection of such that the segment <dnaSegment> and all its
adjacent DnaSegments reachable through the crossovers, are selected.
@see:self.expandDnaComponentSelection()
"""
assert isinstance(dnaSegment, self.win.assy.DnaSegment)
segmentList = [dnaSegment]
segmentList.extend(dnaSegment.get_DnaSegments_reachable_thru_crossovers())
for segment in segmentList:
if not segment.picked:
segment.pick()
def _expandDnaStrandSelection(self, dnaStrand):
"""
Expand the selection such that the <dnaStrand> and all its complementary
strand chunks are selected.
"""
assert isinstance(dnaStrand, self.win.assy.DnaStrand)
lst = dnaStrand.getStrandChunks()
lst.extend(dnaStrand.get_DnaStrandChunks_sharing_basepairs())
for c in lst:
if not c.picked:
c.pick()
def contractDnaComponentSelection(self, dnaStrandOrSegment):
"""
Contract the selection such that:
If is a DnaStrand, then that strand and all its complementary
strand chunks are deselected.
If its a DnaSegment, then that segment and its adjacent segments reachable
through cross overs are deselected.
@see:self._contractDnaStrandSelection()
@see: self._contractDnaSegmentSelection()
@see: SelectChunks_GraphicsMode.chunkLeftDouble()
@see: DnaStrand.get_DnaStrandChunks_sharing_basepairs()
@see: DnaSegment.get_DnaSegments_reachable_thru_crossovers()
@see: NFR bug 2749 for details.
"""
if isinstance(dnaStrandOrSegment, self.win.assy.DnaStrand):
self._contractDnaStrandSelection(dnaStrandOrSegment)
elif isinstance(dnaStrandOrSegment, self.win.assy.DnaSegment):
self._contractDnaSegmentSelection(dnaStrandOrSegment)
def _contractDnaStrandSelection(self, dnaStrand):
assert isinstance(dnaStrand, self.win.assy.DnaStrand)
assert isinstance(dnaStrand, self.win.assy.DnaStrand)
lst = dnaStrand.getStrandChunks()
lst.extend(dnaStrand.get_DnaStrandChunks_sharing_basepairs())
for c in lst:
if c.picked:
c.unpick()
def _contractDnaSegmentSelection(self, dnaSegment):
"""
Contract the selection of the picked DnaSegments such that the segment
<dnaSegment> and all its adjacent DnaSegments reachable through the
crossovers, are deselected.
"""
assert isinstance(dnaSegment, self.win.assy.DnaSegment)
segmentList = [dnaSegment]
segmentList.extend(dnaSegment.get_DnaSegments_reachable_thru_crossovers())
for segment in segmentList:
if segment.picked:
segment.unpick()
def selectExpand(self):
"""
Select any atom that is bonded to any currently selected atom,
and whose selection is permitted by the selection filter.
""" #bruce 060331 revised docstring
# Eric really needed this. Added by Mark 050923.
# (See also Selection.expand_atomset method. [bruce 051129])
self.begin_select_cmd() #bruce 051031
cmd = "Expand Selection: "
env.history.message(greenmsg(cmd))
#bruce 051129 comment: redundancy of greenmsg is bad, but self.selatoms can take time to compute,
# so I decided against fixing the redundancy by moving this below the "No atoms selected" test.
if not self.selatoms:
env.history.message(greenmsg(cmd) + redmsg("No atoms selected."))
return
num_picked = 0 # Number of atoms picked in the expand selection.
for a in self.selatoms.values():
if a.picked: #bruce 051129 comment: this is presumably always true
for n in a.neighbors():
if not n.picked:
n.pick()
if n.picked:
#bruce 051129 added condition to fix two predicted miscount bugs (don't know if reported):
# - open bonds can't be picked (.pick is always a noop for them)
# - some elements can't be picked when selection filter is on (.pick is a noop for them, too)
# Note that these bugs might have caused those unselected atoms to be counted more than once,
# not merely once (corrected code counts them 0 times).
num_picked += 1
# Print summary msg to history widget. Always do this before win/gl_update.
msg = fix_plurals(str(num_picked) + " atom(s) selected.")
env.history.message(msg)
self.w.win_update()
def selectContract(self):
"""
Unselects any atom which has a bond to an unselected atom, or which has any open bonds,
and whose unselection is permitted by the selection filter.
""" #bruce 060331 revised docstring
# Added by Mark 050923.
self.begin_select_cmd() #bruce 051031
cmd = "Contract Selection: "
env.history.message(greenmsg(cmd))
if not self.selatoms:
env.history.message(greenmsg(cmd) + redmsg("No atoms selected."))
return
contract_list = [] # Contains list of atoms to be unselected.
assert self.selwhat == SELWHAT_ATOMS
for a in self.selatoms.values():
if a.picked:
# If a selected atom has an unpicked neighbor, it gets added to the contract_list
# Bruce mentions: you can just scan realNeighbors if you want to only scan
# the non-singlet atoms. Users may desire this behavior - we can switch it on/off
# via a dashboard checkbox or user pref if we want. Mark 050923.
for n in a.neighbors():
if not n.picked:
contract_list.append(a)
break
# Unselect the atom in the contract_list
#bruce 051129 comment: this appears to contain only unique picked atoms (based on above code),
# and any atom can be unpicked (regardless of selection filter) [this later became WRONG; see below],
# so using its len as a count of changed atoms, below, is probably correct.
#bruce 060331 comment & bugfix: sometime since the above comment, unpick started using selection filter.
# So I'll fix the atom count for the history message.
natoms = 0
for a in contract_list:
if not a.picked:
continue #bruce 060331 precaution, for correct count (not needed for current code above)
a.unpick()
if not a.picked: # condition is due to selection filter
natoms += 1
# Print summary msg to history widget.
msg = fix_plurals(str(natoms) + " atom(s) unselected.")
env.history.message(msg)
self.w.win_update() # Needed? Mark 2008-02-14
def lockSelection(self, lockState):
"""
Enable/disable the mouse "selection lock". When enabled, selection
operations using the mouse (i.e. clicks and drags) are disabled in the
3D graphics area (glpane). All other selection commands via the
toolbar, menus, model tree and keyboard shortcuts are not affected by
the selection lock state.
@param lockState: The selection lock state, where:
- True = selection locked
- False = selection unlocked
@type lockState: boolean
"""
if lockState:
self.w.selectLockAction.setIcon(
geticon("ui/actions/Tools/Select/Selection_Locked.png"))
else:
self.w.selectLockAction.setIcon(
geticon("ui/actions/Tools/Select/Selection_Unlocked.png"))
self.o.mouse_selection_lock_enabled = lockState
# Update the cursor and statusbar.
self.o.setCursor()
if 0:
print "mouse_selection_lock_enabled=", \
self.o.mouse_selection_lock_enabled
def hideSelection(self):
"""
Hides the current selection. Selected atoms are made invisible.
Selected chunks and/or any other object (i.e. jigs, planes, etc.)
are hidden.
"""
# Added by Mark 2008-02-14. [slight revisions, bruce 080305]
cmd = "Hide: "
env.history.message(greenmsg(cmd))
# Hide selected objects.
self.assy.Hide()
if self.selatoms:
# Hide selected atoms by changing their display style to invisible.
for a in self.selatoms.itervalues():
a.setDisplayStyle(diINVISIBLE)
return
def unhideSelection(self):
"""
Unhides the current selection.
If the current selection mode is "Select Chunks", the selected nodes
(i.e. chunks, jigs, planes, etc.) are unhidden. If all the nodes
were already visible (unhidden), then we unhide any invisble atoms
inside chunks by changing their display style to default (even if
their display style before they were hidden was something different).
If the current selection mode is "Select Atoms (i.e. Build Atoms), then
the selected atoms are made visible by changing their display style
to default (even if their display style before they were hidden
was something different).
"""
# Added by Mark 2008-02-25. [slight revisions, bruce 080305]
# [subsequently modified and/or bugfixed by Ninad]
# TODO: fix possible speed issue: this looks like it might be slow for
# deep nesting in model tree, since it may unhide selected groups
# as a whole, as well as each node they contain. [bruce 081124 comment]
cmd = "Unhide: "
env.history.message(greenmsg(cmd))
_node_was_unhidden = False
selectedNodes = self.getSelectedNodes()
# Unhide any movables. This includes chunks, jigs, etc. (but not atoms).
for node in selectedNodes:
if node.hidden:
_node_was_unhidden = True
node.unhide()
if _node_was_unhidden:
self.w.win_update()
return
if not self.selatoms:
# Unhide any invisible atoms in the selected chunks.
for chunk in self.assy.selmols[:]:
for a in chunk.atoms.itervalues():
a.setDisplayStyle(diDEFAULT)
else:
# Unhide selected atoms by changing their display style to default.
for a in self.selatoms.itervalues():
a.setDisplayStyle(diDEFAULT)
self.w.win_update()
return
# ==
def selectChunksWithSelAtoms(self): #bruce 060721 renamed from selectParts; see also permit_pick_parts
"""
Change this Part's assy to permit selected chunks, not atoms,
but select all chunks which contained selected atoms;
then win_update
[warning: not for general use -- doesn't change which select mode is in use]
"""
# This is called by Move_GraphicsMode.Enter_GraphicsMode.
# (Why not selectChunksMode? because SelectChunks_GraphicsMode calls it w/o update, instead:
# self.o.assy.selectChunksWithSelAtoms_noupdate() # josh 10/7 to avoid race in assy init
# )
# BTW, MainWindowUI.{py,ui} has an unused slot with the same name this method used to have [selectParts]
# [bruce 050517/060721 comment and docstring]
self.selectChunksWithSelAtoms_noupdate()
self.w.win_update()
def selectChunksWithSelAtoms_noupdate(self): #bruce 060721 renamed from pickParts; see also permit_pick_parts
"""
Change this Part's assy to permit selected chunks, not atoms,
but select all chunks which contained selected atoms; do no updates
[warning: not for general use -- doesn't change which select mode is in use]
"""
#bruce 050517 added docstring
lis = self.selatoms.values()
self.unpickatoms() # (not sure whether this is still always good, but probably it's ok -- bruce 060721)
for atm in lis:
atm.molecule.pick()
self.assy.set_selwhat(SELWHAT_CHUNKS) #bruce 050517 revised API of this call
#bruce 050517: do this at the end, to avoid worry about whether
# it is later given the side effect of unpickatoms.
# It's redundant only if lis has any atoms.
return
def permit_pick_parts(self): #bruce 050125; see also selectChunksWithSelAtoms_noupdate, but that can leave some chunks initially selected
"""
Ensure it's legal to pick chunks using mouse selection, and deselect
any selected atoms (if picking chunks does so).
"""
#bruce 060414 revised this to try to fix bug 1819
# (and perhaps related bugs like 1106, where atoms & chunks are both selected)
if permit_atom_chunk_coselection(): #bruce 060721
return
if self.selatoms and self.assy.selwhat == SELWHAT_CHUNKS and env.debug():
print "debug: bug: permit_pick_parts sees self.selatoms even though self.assy.selwhat == SELWHAT_CHUNKS; unpicking them"
# Note: this happens during bug 1819, and indicates a bug in the code that led up to here,
# probably something about selatoms being per-part, but selwhat (and MT part-switch conventions re selection)
# being for the assy -- maybe we need to deselect atoms, not only chunks, when switching parts (not yet done).
# In the meantime, warn only the developer, and try to fix the situation
# by doing the following anyway (which the pre-060414 code did not).
if self.selatoms:
self.unpickatoms()
self.assy.set_selwhat(SELWHAT_CHUNKS) # not super-fast (could optim using our own test), but that's ok here
return
def permit_pick_atoms(self): #bruce 050517 added this for use in some mode Enter methods -- but not sure they need it!
"""
Ensure it's legal to pick atoms using mouse selection, and deselect any
selected chunks (if picking atoms does so).
"""
if permit_atom_chunk_coselection(): #bruce 060721
return
## if self.assy.selwhat != SELWHAT_ATOMS:
if 1: # this matters, to callers who might have jigs selected
self.unpickchunks() # only unpick chunks, not jigs. mark 060309.
self.assy.set_selwhat(SELWHAT_ATOMS) #bruce 050517 revised API of this call
return
# == selection functions using a mouse position
# REVIEW: we should probably move some of these, especially findAtomUnderMouse,
# to GraphicsMode instead (once it's split from basicMode), since they depend
# on model-specific graphical properties. [bruce 071008]
# (Note: some of these are not toplevel event handlers)
# dumb hack: find which atom the cursor is pointing at by
# checking every atom...
# [bruce 041214 comment: findpick is now mostly replaced by findAtomUnderMouse;
# its only remaining call is in depositMode.getcoords, which uses a constant
# radius other than the atoms' radii, and doesn't use iPic or iInv,
# but that too might be replaced in the near future, once bug 269 response
# is fully decided upon.
# Meanwhile, I'll make this one only notice visible atoms, and clean it up.
# BTW it's now the only caller of atom.checkpick().]
def findpick(self, p1, v1, r=None):
distance = 1000000
atom = None
for mol in self.molecules:
if mol.hidden:
continue
disp = mol.get_dispdef()
for a in mol.atoms.itervalues():
if not a.visible(disp):
continue
dist = a.checkpick(p1, v1, disp, r, None)
if dist:
if dist < distance:
distance = dist
atom = a
return atom
def _decide_water_cutoff(self): #bruce 071008 split this out
"""
Decide what value of water_cutoff to pass to self.findAtomUnderMouse.
"""
# I'm not sure if this is really an aspect of the currentCommand
# or of the graphics mode -- maybe the currentCommand
# since that does or doesn't offer UI control of water,
# or maybe the graphicsMode since that does or doesn't display it.
# Someone should figure this out and clean it up.
# Best guess at the right cleanup: graphicsModes should all have
# common boolean water_enabled and float water_depth attributes,
# used for both drawing and picking in a uniform way.
# But I won't do it that way here, since I'm imitating the prior code.
# (BTW note that self.findAtomUnderMouse is not private, and has
# external callers which pass their own water_enabled flag to it.
# So we can't just inline this into it.)
# [bruce 071008]
#UPDATE 2008-08-01: Water surface is currently an aspect of the
#command class rather than graphicsMode class. The graphicsmode checks
#it by calling self.command.isWaterSurfaceEnabled() --[ Ninad comment]
commandSequencer = self.win.commandSequencer
if commandSequencer.currentCommand.commandName == 'DEPOSIT':
return True
else:
return False
pass
# bruce 041214, for fixing bug 235 and some unreported ones:
def findAtomUnderMouse(self, event, water_cutoff = False, singlet_ok = False):
"""
Return the atom (if any) whose front surface should be visible at the
position of the given mouse event, or None if no atom is drawn there.
This takes into account all known effects that affect drawing, except
bonds and other non-atom things, which are treated as invisible.
(Someday we'll fix this by switching to OpenGL-based hit-detection. #e)
@note: if several atoms are drawn there, the correct one to return is
the one that obscures the others at that exact point, which is not always
the one whose center is closest to the screen!
When water_cutoff is true, also return None if the atom you would
otherwise return (more precisely, if the place its surface is touched by
the mouse) is under the "water surface".
Normally never return a singlet (though it does prevent returning
whatever is behind it). Optional arg singlet_ok permits returning one.
"""
p1, p2 = self.o.mousepoints(event, 0.0)
z = norm(p1-p2)
if 1:
# This computation of matrix is now doable (roughly) by geometry.matrix_putting_axis_at_z().
# Once that's tested, these could probably be replaced by a call to it.
# But this is not confirmed -- the question is whether we cared about this use of self.o.up
# except as a convenient known perpendicular to z. If it matters, we can't use matrix_putting_axis_at_z here.
# [bruce 060608 comment]
x = cross(self.o.up,z)
y = cross(z,x)
matrix = transpose(V(x,y,z))
point = p2
cutoffs = dot( A([p1,p2]) - point, matrix)[:,2]
near_cutoff = cutoffs[0]
if water_cutoff:
far_cutoff = cutoffs[1]
# note: this can be 0.0, which is false, so an expression like
# (water_cutoff and cutoffs[1] or None) doesn't work!
else:
far_cutoff = None
z_atom_pairs = []
for mol in self.molecules:
if mol.hidden:
continue
pairs = mol.findAtomUnderMouse(point, matrix, \
far_cutoff = far_cutoff, near_cutoff = near_cutoff )
z_atom_pairs.extend( pairs)
if not z_atom_pairs:
return None
z_atom_pairs.sort() # smallest z == farthest first; we want nearest
res = z_atom_pairs[-1][1] # nearest hit atom
if res.element == Singlet and not singlet_ok:
return None
return res
#bruce 041214 renamed and rewrote the following pick_event methods, as part of
# fixing bug 235 (and perhaps some unreported bugs).
# I renamed them to distinguish them from the many other "pick" (etc) methods
# for Node subclasses, with common semantics different than these have.
# I removed some no-longer-used related methods.
# All these methods should be rewritten to be more general;
# for more info, see comment about findAtomUnderMouse and jigGLSelect
# in def end_selection_curve in Select_GraphicsMode.py.
# [bruce 080917 comment]
def pick_at_event(self, event): #renamed from pick; modified
"""
Pick whatever visible atom or chunk (depending on
self.selwhat) is under the mouse, adding it to the current selection.
You are not allowed to select a singlet.
Print a message about what you just selected (if it was an atom).
"""
# [bruce 041227 moved the getinfo status messages here, from the Atom
# and Chunk pick methods, since doing them there was too verbose
# when many items were selected at the same time. Original message
# code was by [mark 2004-10-14].]
self.begin_select_cmd() #bruce 051031
atm = self.findAtomUnderMouse(event, water_cutoff = self._decide_water_cutoff())
if atm:
if self.selwhat == SELWHAT_CHUNKS:
if not self.selmols:
self.selmols = []
# bruce 041214 added that, since pickpart used to do it and
# calls of that now come here; in theory it's never needed.
atm.molecule.pick()
env.history.message(atm.molecule.getinfo())
else:
assert self.selwhat == SELWHAT_ATOMS
atm.pick()
env.history.message(atm.getinfo())
return
def delete_at_event(self, event):
"""
Delete whatever visible atom or chunk (depending on self.selwhat)
is under the mouse. You are not allowed to delete a singlet.
This leaves the selection unchanged for any atoms/chunks in the current
selection not deleted. Print a message about what you just deleted.
"""
self.begin_select_cmd()
atm = self.findAtomUnderMouse(event, water_cutoff = self._decide_water_cutoff())
if atm:
if self.selwhat == SELWHAT_CHUNKS:
if not self.selmols:
self.selmols = []
# bruce 041214 added that, since pickpart used to do it and
# calls of that now come here; in theory it's never needed.
env.history.message("Deleted " + atm.molecule.name)
atm.molecule.kill()
else:
assert self.selwhat == SELWHAT_ATOMS
if atm.filtered():
# note: bruce 060331 thinks refusing to delete filtered atoms, as this does, is a bad UI design,
# since if the user clicked on a specific atom they probably knew what they were doing,
# and if (at most) we just printed a warning and deleted it anyway, they could always Undo the delete
# if they had hit the wrong atom. See also similar code and message in delete_atom_and_baggage (selectMode.py).
#bruce 060331 adding orangemsg, since we should warn user we didn't do what they asked.
env.history.message(orangemsg("Cannot delete " + str(atm) + " since it is being filtered. "\
"Hit Escape to clear the selection filter."))
else:
env.history.message("Deleted " + str(atm) )
atm.kill()
return
def onlypick_at_event(self, event): #renamed from onlypick; modified
"""
Unselect everything in the glpane; then select whatever visible atom
or chunk (depending on self.selwhat) is under the mouse at event.
If no atom or chunk is under the mouse, nothing in glpane is selected.
"""
self.begin_select_cmd() #bruce 051031
self.unpickall_in_GLPane() #bruce 060721, replacing the following selwhat-dependent unpickers:
## if self.selwhat == SELWHAT_CHUNKS:
## self.unpickparts()
## else:
## assert self.selwhat == SELWHAT_ATOMS
## self.unpickparts() # Fixed bug 606, partial fix for bug 365. Mark 050713.
## self.unpickatoms()
self.pick_at_event(event)
def unpick_at_event(self, event): #renamed from unpick; modified
"""
Make whatever visible atom or chunk (depending on self.selwhat)
is under the mouse at event get un-selected (subject to selection filter),
but don't change whatever else is selected.
""" #bruce 060331 revised docstring
self.begin_select_cmd() #bruce 051031
atm = self.findAtomUnderMouse(event, water_cutoff = self._decide_water_cutoff())
if atm:
if self.selwhat == SELWHAT_CHUNKS:
atm.molecule.unpick()
else:
assert self.selwhat == SELWHAT_ATOMS
atm.unpick() # this is subject to selection filter -- is that ok?? [bruce 060331 question]
return
# == internal selection-related routines
def unpickatoms(self): #e [should this be private?] [bruce 060721]
"""
Deselect any selected atoms (but don't change selwhat or do any
updates).
""" #bruce 050517 added docstring
if self.selatoms:
## for a in self.selatoms.itervalues():
#bruce 060405 comment/precaution: that use of self.selatoms.itervalues might have been safe
# (since actual .unpick (which would modify it) is not called in the loop),
# but it looks troublesome enough that we ought to make it safer, so I will:
selatoms = self.selatoms
self.selatoms = {}
mols = {}
for a in selatoms.itervalues():
# this inlines and optims Atom.unpick
a.picked = False
_changed_picked_Atoms[a.key] = a #bruce 060321 for Undo (or future general uses)
m = a.molecule
mols[m] = m
for m in mols: #bruce 090119 optimized, revised call
m.changed_selected_atoms()
self.selatoms = {}
return
def unpickparts(self): ##e this is misnamed -- should be unpicknodes #e [should this be private?] [bruce 060721]
"""
Deselect any selected nodes (e.g. chunks, Jigs, Groups) in this part
(but don't change selwhat or do any updates).
See also unpickchunks.
""" #bruce 050517 added docstring; 060721 split out unpickchunks
self.topnode.unpick()
return
def unpickchunks(self): #bruce 060721 made this to replace the misnamed unpick_jigs = False option of unpickparts
"""
Deselect any selected chunks in this part
(but don't change selwhat or do any updates).
See also unpickparts.
"""
# [bruce 060721 comment: unpick_jigs option to unpickparts was misnamed,
# since there are selectable nodes other than jigs and chunks.
# BTW Only one call uses this option, which will be obsolete soon
# (when atoms & chunks can be coselected).]
for c in self.molecules:
if c.picked:
c.unpick()
return
def unpickall_in_GLPane(self): #bruce 060721
"""
Unselect all things that ought to be unselected by a click in empty
space in the GLPane.
As of 060721 this means "everything", but we might decide that MT nodes
that are never drawn in GLPane should remain selected in a case like
this. ###@@@
"""
self.unpickatoms()
self.unpickparts()
return
def unpickall_in_MT(self): #bruce 060721
"""
Unselect all things that ought to be unselected by a click in empty
space in the Model Tree. As of 060721 this means "all nodes", but we
might decide that it should deselect atoms too. ###@@@
"""
self.unpickparts()
return
def unpickall_in_win(self): #bruce 060721
"""
Unselect all things that a general "unselect all" tool button or menu
command ought to. This should unselect all selectable things, and
should be equivalent to doing both L{unpickall_in_GLPane()} and
L{unpickall_in_MT()}.
"""
self.unpickatoms()
self.unpickparts()
return
def begin_select_cmd(self):
# Warning: same named method exists in assembly, GLPane, and ops_select, with different implems.
# More info in comments in assembly version. [bruce 051031]
self.assy.begin_select_cmd() # count selection commands, for use in pick-time records
return
# ==
def selection_from_glpane(self): #bruce 050404 experimental feature for initial use in Minimize Selection; renamed 050523
"""
Return an object which represents the contents of the current selection,
independently of part attrs... how long valid?? Include the data
generally used when doing an op on selection from glpane (atoms and
chunks); see also selection_from_MT().
"""
# the idea is that this is a snapshot of selection even if it changes
# but it's not clear how valid it is after the part contents itself starts changing...
# so don't worry about this yet, consider it part of the experiment...
part = self
return selection_from_glpane( part)
def selection_for_all(self): #bruce 050419 for use in Minimize All; revised 050523
"""
Return a selection object referring to all our atoms (regardless of
the current selection, and not changing it).
"""
part = self
return selection_for_entire_part( part)
def selection_from_MT(self): #bruce 050523; not used as of 080414, but might be someday
"""
[#doc]
"""
part = self
return selection_from_MT( part)
def selection_from_part(self, *args, **kws): #bruce 051005
part = self
return selection_from_part(part, *args, **kws)
pass # end of class ops_select_Mixin (used in class Part)
# ==
def topmost_selected_nodes(nodes):
"""
@param nodes: a list of nodes, to be examined for selected nodes or subnodes
@type nodes: python sequence of Nodes
@return: a list of all selected nodes in or under the given list of nodes,
not including any node which is inside any selected Group
in or under the given list
@see: same-named method in class ModelTreeGui_api and class TreeModel_api
"""
#bruce 050523 split this out from the same-named TreeWidget method,
# and optimized it
res = []
func = res.append
for node in nodes:
node.apply2picked( func)
return res
# ==
def selection_from_glpane( part): #bruce 050523 split this out as intermediate helper function; revised 050523
# note: as of 080414 this is used only in sim_commandruns.py and MinimizeEnergyProp.py
return Selection( part, atoms = part.selatoms, chunks = part.selmols )
def selection_from_MT( part): #bruce 050523; not used as of 080414, but might be someday
return Selection( part, atoms = {}, nodes = topmost_selected_nodes([part.topnode]) )
def selection_from_part( part, use_selatoms = True, expand_chunkset_func = None): #bruce 050523
# note: as of 080414 all ultimate uses of this are in ModelTree.py or ops_copy.py
if use_selatoms:
atoms = part.selatoms
else:
atoms = {}
res = Selection( part, atoms = atoms, nodes = topmost_selected_nodes([part.topnode]) )
if expand_chunkset_func:
res.expand_chunkset(expand_chunkset_func)
return res
def selection_for_entire_part( part): #bruce 050523 split this out, revised it
return Selection( part, atoms = {}, chunks = part.molecules )
def selection_from_atomlist( part, atomlist): #bruce 051129, for initial use in Local Minimize
return Selection( part, atoms = atomdict_from_atomlist(atomlist) )
def atomdict_from_atomlist(atomlist): #bruce 051129 [#e should refile -- if I didn't already implement it somewhere else]
"""
Given a list of atoms, return a dict mapping atom keys to those atoms.
"""
return dict( [(a.key, a) for a in atomlist] )
class Selection: #bruce 050404 experimental feature for initial use in Minimize Selection; revised 050523
"""
Represent a "snapshot-by-reference" of the contents of the current selection,
or any similar set of objects passed to the constructor.
@warning: this remains valid (and unchanged in meaning) if the
selection-state changes, but might become invalid if the Part contents
themselves change in any way! (Or at least if the objects passed to the
constructor (like chunks or Groups) change in contents (atoms or child
nodes).)
""" #bruce 051129 revised docstring
def __init__(self, part, atoms = {}, chunks = [], nodes = []):
"""
Create a snapshot-by-reference of whatever sets or lists of objects
are passed in the args atoms, chunks, and/or nodes (see details and
limitations below).
Objects should not be passed redundantly -- i.e. they should not
contain atoms or nodes twice, where we define chunks as containing
their atoms and Group nodes as containing their child nodes.
Objects must be of the appropriate types (if passed):
atoms must be a dict mapping atom keys to atoms;
chunks must be a list of chunks;
nodes must be a list of nodes, thought of as disjoint node-subtrees
(e.g. "topmost selected nodes").
The current implem also prohibits passing both chunks and nodes lists,
but this limitation is just for its convenience and can be removed
when needed.
The object-containing arguments are shallow-copied immediately, but the
objects they contain are never copied, and in particular, the effect
of changes to the set of child nodes of Group nodes passed in the nodes
argument is undefined. (In initial implem, the effect depends on when
self.selmols is first evaluated.)
Some methods assume only certain kinds of object arguments were
passed (see their docstrings for details).
"""
#bruce 051129 revised docstring -- need to review code to verify its accuracy. ###k
# note: topnodes might not always be provided;
# when provided it should be a list of nodes in the part compatible with selmols
# but containing groups and jigs as well as chunks, and not containing members of groups it contains
# (those are implicit)
self.part = part
## I don't think self.topnode is used or needed [bruce 050523]
## self.topnode = part.topnode # might change...
self.selatoms = dict(atoms) # copy the dict; it's ok that this does not store atoms inside chunks or nodes
# For now, we permit passing chunks or nodes list but not both.
if nodes:
# nodes were passed -- store them, but let selmols be computed lazily
assert not chunks, "don't pass both chunks and nodes arguments to Selection"
self.topnodes = list(nodes)
# selmols will be computed lazily if needed
# (to avoid needlessly computing it, we don't assert not (self.selatoms and self.selmols))
else:
# chunks (or no chunks and no nodes) were passed -- store as both selmols and topnodes
self.selmols = list(chunks) # copy the list
self.topnodes = self.selmols # use the same copy we just made
if (self.selatoms and self.selmols) and debug_flags.atom_debug: #e could this change? try not to depend on it
print_compact_traceback( "atom_debug: self.selatoms and self.selmols: " ) #bruce 051129, replacing an assert
return
def nonempty(self): #e make this the object's boolean value too?
# assume that each selmol has some real atoms, not just singlets! Should always be true.
return self.selatoms or self.topnodes #revised 050523
def atomslist(self):
"""
Return a list of all selected real atoms, whether selected as atoms or
in selected chunks; no singlets or jigs.
"""
#e memoize this!
# [bruce 050419 comment: memoizing it might matter for correctness
# if mol contents change, not only for speed. But it's not yet needed,
# since in the only current use of this, the atomslist is grabbed once
# and stored elsewhere.]
if self.selmols:
res = dict(self.selatoms) # dict from atom keys to atoms
for mol in self.selmols:
# we'll add real atoms and singlets, then remove singlets
# (probably faster than only adding real atoms, since .update is one bytecode
# and (for large mols) most atoms are not singlets)
res.update(mol.atoms)
for s in mol.singlets:
del res[s.key]
else:
res = self.selatoms
items = res.items()
items.sort() # sort by atom key; might not be needed
return [atom for key, atom in items]
def __getattr__(self, attr): # in class Selection
if attr == 'selmols':
# compute from self.topnodes -- can't assume selection state of self.part
# is same as during our init, or even know whether it was relevant then.
res = []
def func(node):
if isinstance(node, Chunk):
res.append(node)
return # from func
for node in self.topnodes:
node.apply2all(func)
self.selmols = res
return res
elif attr == 'selmols_dict': #bruce 050526
res = {}
for mol in self.selmols:
res[id(mol)] = mol
self.selmols_dict = res
return res
raise AttributeError, attr
def picks_atom(self, atom): #bruce 050526
"""
Does this selection include atom, either directly or via its chunk?
"""
return atom.key in self.selatoms or id(atom.molecule) in self.selmols_dict
def picks_chunk(self, chunk): #bruce 080414
"""
Does this selection include chunk, either directly or via a containing
Group in topnodes?
"""
return id(chunk) in self.selmols_dict
def add_chunk(self, chunk): #bruce 080414
"""
If not self.picks_chunk(chunk), add chunk to this selection
(no effect on external selection state i.e. chunk.picked).
Otherwise do nothing.
"""
if not self.picks_chunk(chunk):
self.selmols_dict[id(chunk)] = chunk
self.selmols.append(chunk)
# sometimes self.selmols is the same mutable list as self.topnodes.
# if so, preserve this, otherwise also add to self.topnodes
if self.topnodes and self.topnodes[-1] is chunk:
pass
else:
self.topnodes.append(chunk)
# (note: modifying those attrs is only permissible because
# __init__ shallow-copies those lists)
pass
return
def describe_objects_for_history(self):
"""
Return a string like "5 items" but more explicit if possible, for use
in history messages.
"""
if self.topnodes:
res = fix_plurals( "%d item(s)" % len(self.topnodes) )
#e could figure out their common class if any (esp. useful for Jig and below); for Groups say what they contain; etc
elif self.selmols:
res = fix_plurals( "%d chunk(s)" % len(self.selmols) )
else:
res = ""
if self.selatoms:
if res:
res += " and "
res += fix_plurals( "%d atom(s)" % len(self.selatoms) )
#e could say "other atoms" if the selected nodes contain any atoms
return res
def expand_atomset(self, ntimes = 1): #bruce 051129 for use in Local Minimize; compare to selectExpand
"""
Expand self's set of atoms (to include all their real-atom neighbors)
(repeating this ntimes), much like "Expand Selection" but using no
element filter, and of course having no influence by or effect on
"current selection state" (atom.picked attribute).
Ignore issue of self having selected chunks (as opposed to the atoms
in them); if this ever becomes possible we can decide how to generalize
this method for that case (ignore them, turn them to atoms, etc).
@warning: Current implem [051129] is not optimized for lots of atoms
and ntimes > 1 (which doesn't matter for its initial use).
"""
assert not self.selmols and not self.topnodes # (since current implem would be incorrect otherwise)
atoms = self.selatoms # mutable dict, modified in following loop
# [name 'selatoms' is historical, but also warns that it doesn't include atoms in selmols --
# present implem is only correct on selection objects made only from atoms.]
for i in range(ntimes):
for a1 in atoms.values(): # this list remains fixed as atoms dict is modified by this loop
for a2 in a1.realNeighbors():
atoms[a2.key] = a2 # analogous to a2.pick()
return
def expand_chunkset(self, func): #bruce 080414
"""
func can be called on one chunk to return a list of chunks.
Expand the set of chunks in self (once, not recursively)
by including all chunks in func(orig) for orig being any chunk
originally in self (when this is called). Independently protect
from exceptions in each call of func, and also from the bug
of func returning None.
"""
for chunk in list(self.selmols):
try:
more_chunks = func(chunk)
assert more_chunks is not None
except:
print_compact_traceback("ignoring exception in, or returning of None by, %r(%r): " % (func, chunk))
more_chunks = ()
if more_chunks:
for chunk2 in more_chunks:
self.add_chunk(chunk2)
continue
return
pass # end of class Selection
# end
| NanoCAD-master | cad/src/operations/ops_select.py |
# Copyright 2004-2007 Nanorex, Inc. See LICENSE file for details.
"""
op_select_doubly.py -- workhorse function for the Select Doubly operation.
Needs some comment-cleanup. And some potential optimizations described herein
are worth doing if this op's speed ever matters. And it should now be allowed
to coexist with the Selection Filter. [That is now allowed as of bruce 050629.]
$Id$
History:
bruce 050520 wrote this from scratch, to make Select Doubly fast and correct.
(Before that it was always slow, and was incorrect since at least Alpha1.)
For more detailed history and notes see cvs rev. 1.1 or 1.2 of this file.
(At some point I should edit the comments therein into a new separate text file.)
This algorithm is linear time (in number of bonds 1-connected to initially selected atoms).
For an explanation of why this alg is correct, assuming you already know basically
what it's trying to do and how (which is not documented here, sorry), see a long comment below
(which was shortened in cvs rev 1.3, so see an earlier rev for the proof).
For an overview of how it works, wait until I have more time to document it.
bruce 050629 rewrote two methods to make them non-recursive, to fix bug 725
by not using Python stack when there are lots of atoms traversed.
Some cleanup of this would be useful, and some optims are still possible.
"""
__author__ = "bruce"
class twoconner: #e rename
"""Scan connected parts of a graph to find out which edges are cut-edges for those parts.
Later, use that info to traverse doubly-connected subgraphs from given nodes.
Also keep info which would later let you quickly report whether two nodes are 1-connected, if anyone asked.
(More generally, info which would let you quickly map any node to a fixed representative node in its connected component.)
(But no method for making this query is implemented yet, and not quite enough info is kept,
though comments explain how to fix that, which would be easy. #e)
"""
def __init__(self):
self.nodenums = {} # haven't yet visited any nodes, not even N1
self.cutedges = {} # haven't yet proven any edges are cutedges (this holds cutedges as keys, values of 1)
#e would it be useful to use returning i as value?
#k still don't know if one edge can get here in both directions
self.i = 1 # the first nodenum to assign
#e or could store a list of i values which separate connected components -- probably useful
def start(self, N1):
"""If we didn't already, traverse the connected component containing N1
and record all its cutedges in self.cutedges.
Return True if we had not already traversed N1's connected component (since it's easy) [#e could return i - i1]
"""
# (Do we need to use diff value of i each time? doesn't matter, i think --
# i'll use one in case it helps track connected components later)
if self.nodenums.get(N1): ###e optim: use id(N1) if N1 is a python object (e.g. an atom)
return False
i1 = i = self.i
priornode = None # a fake node -- not anyone's neighbor
diredge = (priornode, N1) # the first diredge to traverse
num, i = self.traverse_nonrec( diredge, i) ### need retval??
self.i = i
# print "that conncomp had %d nodes" % (i - i1)
assert num == i1
# i - i1 is the number of nodes seen in N1's connected component
# but the main result at this point is the dict self.cutedges
return True #e could return i - i1
def traverse_nonrec(self, diredge, i): #bruce 050629, part of fixing bug 725
"non-recursive version of traverse method (see original method below for more extensive comments/docstring)"
stack = [(0, diredge)] # this stack of partly-done traverse calls replaces the Python stack.
del diredge
retval = None
# i changes during the loop but need not appear in the stack
while stack:
top = stack.pop()
if top[0] == 0:
# start of a simulated traverse call
zerojunk, diredge = top
priornode, N = diredge
num = self.nodenums.setdefault(N, i)
## print "nodenums[%r] is %r, setdefault arg was %r" % (N, num, i)
if num < i:
retval = num, i
continue #e optim: could have caller do the part up to here
assert num == i == self.nodenums[N]
Ni = i
i += 1
itinerary = self.neighbors_except(N, priornode) #e could optim when itinerary is empty
###e or better, could optim by not visiting itinerary elts already with nodenums, just grabbing min of those.
# order is arbitrary, so it doesn't matter that we'll scan itinerary
# in reverse compared to recursive version; if it did, we'd reverse it here
todo = (1, itinerary, N, num, Ni, diredge) # some of this might be redundant, e.g. N and diredge
stack.append(todo)
retval = None
continue
# else, continuation of a simulated traverse call, in the loop on itinerary;
# if retval is None it's starting the loop, otherwise it just got retval from the recursive traverse call
onejunk, itinerary, N, num, Ni, diredge = top
if retval is not None:
subnum, i = retval
num = min(num, subnum)
if itinerary:
# more to do (or we might be just starting)
neighbor = itinerary.pop()
# note: itinerary is mutable, and comes on and off stack, but that should be ok
# since it's never on there in more than one place at once
# simulate a recursive call of traverse, but first put our own call's continuation state on the stack
todo = (1, itinerary, N, num, Ni, diredge)
stack.append(todo) # our own continuation
todo = (0, (N, neighbor))
stack.append(todo) # the recursive call (also requires retval = None)
retval = None
continue
# end of the loop in traverse
if num == Ni:
self.cutedges[diredge] = 1
retval = num, i
continue
# done
return retval
def traverse_OLD_RECURSIVE_VERSION(self, diredge, i):
# no longer used, but keep for docstring and comments, for now [bruce 050629]
# to optimize(??), pass the instvars rather than self (like for i), and split diredge in two
"""As this call starts, we're crossing diredge for the first time in either direction,
but we don't yet know whether we've previously visited the node N at other side or not.
(When this call ends, we'll be coming back across diredge in the other direction,
bringing a "lowest number seen during this call" and a new "next number to assign to a node" i.)
If we have seen that node N we're heading toward,
make note of its number and bounce back right away (returning that number, and unchanged i).
If we have never seen it, assign it the number i (which marks it as visited/seen now; do i+=1 for later),
and for each of its other edges (none of which have been traversed either way, to start with),
processed sequentially in some order, if by the time we get to that edge we *still* haven't
traversed it (by coming across it into N from elsewhere, seeing N marked, and bouncing back out across it),
then traverse it using this method recursively; when that loop is done we can finally return,
traversing back out of N for the last time, backwards on diredge, and carrying the minimum number
of all node-numbers seen.
But one more thing: from all info we have as we return, figure out whether we've just
noticed a proof that diredge is a cutedge, and record that fact if so. As explained elsewhere,
all actual cutedges will get noticed this way.
"""
priornode, N = diredge # why not let diredge be no more than this pair?
num = self.nodenums.setdefault(N, i) # if visited, num says the number; if not, num is i and N is marked
if num < i:
# been there before
###k will we notice whether THIS diredge is a cutedge?? Maybe only when we cross it the other way?? (no...)
return num, i
# never been there before, so we're here (at N) now, and i is now assigned to it by setdefault
assert num == i == self.nodenums[N]
Ni = i # cache the value of self.nodenums[N] (just an optim to avoid looking it up later)
i += 1
itinerary = self.neighbors_except(N, priornode) # the neighbors to visit (along the diredges (N, neighbor))
# As we begin the loop, no edge from N to a neighbor has been traversed either way,
# but this doesn't mean those neighbors haven't been visited before (using some other edges into and out of them)!
# And once inside the loop, we can't even say the edges haven't been traversed.
# Suppose some edge has been traversed (into us and out again) -- will it cause any harm to traverse it again?
# We'll definitely bounce right back (seeing the node visited), so the Q is, is it correct to record that number then.
# Not sure yet, but for now assume it is ok! (Since it'll simplify the alg a lot to not record traversed edges.) ###k
for neighbor in itinerary:
subnum, i = self.traverse( (N, neighbor), i ) #e optim: faster to pass the pieces of diredge separately
num = min(num, subnum) # should be ok to start with old value of i in num, returning min node we saw including N ###k
# == Here is where we record some magic info related to cut edges....
# for explanation (and proof of this alg's correctness), see the longer version of this comment
# in cvs rev. 1.1 or 1.2 of this file.
if num == Ni:
self.cutedges[diredge] = 1 #k could an edge get entered into this twice (once in each direction)?? ###k [i don't think so]
return num, i
def neighbors_except(self, N, priornode): # morally this should be an init param or subclass method (but speed > abstractness here)
"Return all N's neighbors except priornode (or all of them, if priornode is None) [assume they can be compared using 'is']"
#e assume N is an atom; this justifies using 'is' and also is why we want to (for speed)
return filter( lambda neighbor: neighbor is not priornode, N.realNeighbors() )
#e might optim by chopping monovalent neighbors right here?
# nontrivial to work out all implications of this... but i bet it's significantly faster,
# esp. if we'd revise our class atom code to actually store the bonds in separate lists
# to the monovalent and other neighbors! (And maybe to store neighbors rather than storing bonds? Not sure...)
# Anyway, we *do* need to chop singlets! And we might as well chop monovalent neighbors if we make sure N1 in .start is never one
# ie specialcase if it is. ####@@@@ DOIT.
def apply_to_2connset_nonrec( self, N, func): #bruce 050629, part of fixing bug 725
"""Non-recursive version of apply_to_2connset.
(Ought to be rewritten to use a non-recursive transcloser helper function
made from Select Connected code.)
"""
didem = {N:1} # set to 1 for N's appended to the todo list ##e could optim by using id(N)?
todo = [N]
while todo:
newtodo = []
for N in todo:
assert self.nodenums.get(N), "self.nodenums should contain N (%r) but doesn't, or contains false %r" % (N, self.nodenums.get(N))
assert didem.get(N)
func(N)
for neighbor in self.neighbors_except( N, None):
if didem.get(neighbor) or (N, neighbor) in self.cutedges or (neighbor, N) in self.cutedges:
continue
newtodo.append(neighbor)
didem[neighbor] = 1
todo = newtodo
return
def apply_to_2connset_OLD_RECURSIVE_VERSION( self, N, func, didem = None):
# no longer used, but keep for docstring and comments, for now [bruce 050629]
"""Apply func to the 2-connected set including N (or to N alone if all its edges are cutedges).
Only works if we previously ran self.start(N1) in a way which hits N
(we assert this, to detect the error).
[Private: didem can be a dict of N's we already hit, for recursion.
It would NOT be correct to try to optim select_doubly_f by passing its resdict as our didem dict
(even if their values were compatible), since we make some assumptions about our didem dict
which are not true for arbitrary atomsets like (I suspect -- not sure) might be in resdict.
Or maybe I'm confused and this worry only applies to atomlist and passing resdict would work fine.
If so, it'd optim this enough to do it sometime, perhaps. ##e]
"""
assert self.nodenums.get(N), "self.nodenums should contain N (%r) but doesn't, or contains false %r" % (N, self.nodenums.get(N))
if didem is None:
didem = {}
func(N)
didem[N] = 1
for neighbor in self.neighbors_except( N, None): #e optim getting that list
if didem.get(neighbor) or (N, neighbor) in self.cutedges or (neighbor, N) in self.cutedges:
#e optim(?? or not, if extra memory usage is bad): store it both ways at once (or, sorted way? sounds slow)
continue #e optim: right here we could check if neighbor was monovalent
self.apply_to_2connset( neighbor, func, didem)
return
pass
def select_doubly_transcloser(atomlist):
"""Return a list of atoms for which there is some ring of bonds
(ok if that ring hits some atoms twice, but it can't include any bonds twice, even in opposite directions)
containing both that atom and some atom in atomlist.
""" #obs comments about func
resdict = {}
one = twoconner()
for atom in atomlist:
one.start(atom) # returns whether this atom showed it a new connected component, but that is not useful here
# now it knows which edges are cutedges. This lets us transitively close over the non-cutedges, like this:
for atom in atomlist: ###e this is what has to optim when func already applied, so *this* is what needs a dict...
if atom not in resdict:
one.apply_to_2connset_nonrec( atom, lambda a2: resdict.setdefault(a2,a2)) # doesn't matter if this applies func to atom when it's isolated
# what about the desire to also apply func to monovalent neighbors?
# Well, nothing prevents caller's func from doing this on its own!
# No need to one.destroy() (tho it holds strongrefs to atoms) since no one refers into it, except our localvar.
return resdict.values()
def select_doubly_func(atom):
atom.pick()
for n in atom.realNeighbors():
if len(n.bonds) == 1: #e BTW, IMHO this is not the best thing to do -- should pick all "pure trees" hanging off this 2connset.
n.pick()
return
def select_doubly(atomlist): #e 1st try is slow if you pass it a highly redundant atomlist. Need to track which ones we picked...
# don't use real picking, in order to be compatible with Selection Filter.
map( select_doubly_func, select_doubly_transcloser( atomlist) )
return
# end
| NanoCAD-master | cad/src/operations/op_select_doubly.py |
# Copyright 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
ops_modify.py provides modifySlotsMixin for MWsemantics,
with modify slot methods and related helper methods.
@author: Mark
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
Note: many ops_*.py files provide mixin classes for Part,
not for MWsemantics like this one.
History:
mark 2008-02-02 split this out of MWsemantics.py.
"""
from utilities import debug_flags
from utilities.debug import reload_once_per_event
class modifySlotsMixin:
"""
Mixin class to provide modify-related methods for class MWsemantics.
Has slot methods and their helper methods.
"""
def modifyAdjustSel(self):
"""
Adjust the current selection.
"""
if debug_flags.atom_debug:
print "debug: reloading sim_commandruns on each use, for development"
import simulation.sim_commandruns as sim_commandruns
reload_once_per_event(sim_commandruns)
from simulation.sim_commandruns import Minimize_CommandRun
cmdrun = Minimize_CommandRun( self, 'Sel', type = 'Adjust')
cmdrun.run()
return
def modifyAdjustAll(self):
"""
Adjust all atoms.
"""
if debug_flags.atom_debug:
print "debug: reloading sim_commandruns on each use, for development"
import simulation.sim_commandruns as sim_commandruns
reload_once_per_event(sim_commandruns)
from simulation.sim_commandruns import Minimize_CommandRun
cmdrun = Minimize_CommandRun( self, 'All', type = 'Adjust')
cmdrun.run()
return
def modifyCheckAtomTypes(self):
"""
Check Atom Types for all atoms.
"""
from simulation.sim_commandruns import CheckAtomTypes_CommandRun
cmdrun = CheckAtomTypes_CommandRun(self)
cmdrun.run()
return
def modifyHydrogenate(self):
"""
Add hydrogen atoms to each singlet in the selection.
"""
self.assy.modifyHydrogenate()
# remove hydrogen atoms from selected atoms/molecules
def modifyDehydrogenate(self):
"""
Remove all hydrogen atoms from the selection.
"""
self.assy.modifyDehydrogenate()
def modifyPassivate(self):
"""
Passivate the selection by changing surface atoms to eliminate singlets.
"""
self.assy.modifyPassivate()
def modifyDeleteBonds(self):
"""
Delete all bonds between selected and unselected atoms or chunks.
"""
self.assy.modifyDeleteBonds()
def modifyStretch(self):
"""
Stretch/expand the selected chunk(s).
"""
self.assy.Stretch()
def modifySeparate(self):
"""
Form a new chunk from the selected atoms.
"""
self.assy.modifySeparate()
def modifyMerge(self):
"""
Create a single chunk from two of more selected chunks.
"""
self.assy.merge()
self.win_update()
def makeChunkFromAtom(self):
"""
Create a new chunk from the selected atoms.
"""
self.assy.makeChunkFromSelectedAtoms()
self.win_update()
def modifyInvert(self):
"""
Invert the atoms of the selected chunk(s).
"""
self.assy.Invert()
def modifyMirror(self):
"""
Mirrors the selected chunks through a Plane (or Grid Plane).
"""
self.assy.Mirror()
def modifyAlignCommonAxis(self):
"""
Align selected chunks to the computed axis of the first chunk.
"""
self.assy.align()
self.win_update()
def modifyCenterCommonAxis(self):
"""
Same as "Align to Common Axis", except that it moves all the selected
chunks to the center of the first selected chunk after
aligning/rotating the other chunks.
"""
# This is still not fully implemented as intended. Instead of moving all the selected
# chunks to the center of the first selected chunk, I want to have them moved to the closest
# (perpendicular) point of the first chunk's axis. I've studied and understand the math involved;
# I just need to implement the code. I plan to ask Bruce for help since the two of us will get it
# done much more quickly together than me doing it alone.
# Mark 050829.
self.assy.alignmove()
self.win_update()
| NanoCAD-master | cad/src/operations/ops_modify.py |
# Copyright 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
ops_display.py provides displaySlotsMixin for MWsemantics,
with display slot methods and related helper methods.
@author: Mark
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
Note: most other ops_*.py files provide mixin classes for Part,
not for MWsemantics like this one.
History:
mark 2008-02-02 split this out of MWsemantics.py.
"""
import foundation.env as env
# Keep these in the same order that they are defined in constants.py.
# It really helps since some lists are dependent on the display mode order.
# Mark 2008-02-13.
from utilities.constants import diDEFAULT
from utilities.constants import diINVISIBLE
from utilities.constants import diLINES
from utilities.constants import diTUBES
from utilities.constants import diBALL
from utilities.constants import diTrueCPK
from utilities.constants import diDNACYLINDER
from utilities.constants import diCYLINDER
from utilities.constants import diSURFACE
from PyQt4.Qt import Qt, QColorDialog, QColor
from utilities.Log import greenmsg, redmsg, orangemsg
from commands.ElementColors.elementColors import elementColors
elementColorsWin = None
class displaySlotsMixin:
"""
Mixin class to provide display-related methods for class MWsemantics.
Has slot methods and their helper methods.
"""
# methods to set display style of whatever is selected
def dispDefault(self):
"""
Sets the display style of the selection to I{Default}.
"""
self.setDisplayStyle_of_selection(diDEFAULT)
def dispInvis(self):
"""
Sets the display style of the selection to I{Invisible}.
"""
self.setDisplayStyle_of_selection(diINVISIBLE)
def dispLines(self):
"""
Sets the display style of the selection to I{Lines}.
"""
self.setDisplayStyle_of_selection(diLINES)
def dispTubes(self):
"""
Sets the display style of the selection to I{Tubes}.
"""
self.setDisplayStyle_of_selection(diTUBES)
def dispBall(self): #e this slot method (here and in .ui file) renamed from dispCPK to dispBall [bruce 060607]
"""
Sets the display style of the selection to I{Ball and Stick}.
"""
self.setDisplayStyle_of_selection(diBALL)
def dispCPK(self): #e this slot method (here and in .ui file) renamed from dispVdW to dispCPK [bruce 060607]
"""
Sets the display style of the selection to I{CPK} (space fill).
"""
self.setDisplayStyle_of_selection(diTrueCPK)
def dispDnaCylinder(self):
"""
Sets the display style of the selection to I{DNA Cylinder}.
"""
# This code was copied from dispCylinder(). Mark 2008-02-13.
cmd = greenmsg("Set Display DNA Cylinder: ")
if self.assy and self.assy.selatoms:
env.history.message(cmd + "Selected atoms cannot have their display mode set to DNA Cylinder.")
return
self.setDisplayStyle_of_selection(diDNACYLINDER)
def dispCylinder(self):
"""
Sets the display style of the selection to I{Cylinder}.
@note: I{Cylinder} is an experimental display style. It is disabled
by default. It can be enabled setting the debug (menu) pref
"enable CylinderChunks next session?" to True.
"""
cmd = greenmsg("Set Display Cylinder: ")
if self.assy and self.assy.selatoms:
# Fixes bug 2005. Mark 060702.
env.history.message(cmd + "Selected atoms cannot have their display mode set to Cylinder.")
return #ninad 061003 fixed bug 2286... Note: Once atoms and chunks are allowed to be sel at the same
#time , this fix might need further mods.
self.setDisplayStyle_of_selection(diCYLINDER)
def dispSurface(self):
"""
Sets the display style of the selection to I{Surface}.
@note: I{Surface} is an experimental display style. It is disabled
by default. It can be enabled setting the debug (menu) pref
"enable CylinderChunks next session?" to True.
"""
cmd = greenmsg("Set Display Surface: ")
if self.assy and self.assy.selatoms:
# Fixes bug 2005. Mark 060702.
env.history.message(cmd + "Selected atoms cannot have their display mode set to Surface.")
return #ninad 061003 fixed bug 2286
self.setDisplayStyle_of_selection(diSURFACE)
def dispHybrid(self): #@@ Ninad 070308
print "Hybrid display is Implemented yet"
pass
# Hide/unhide
def dispHide(self):
"""
Slot for "Hide" which hides the current selection.
This operation works on atoms, chunks and/or any other object that
can be hidden.
"""
self.assy.hideSelection()
def dispUnhide(self):
"""
Slot for "Unhide" which unhides the current selection.
This operation works on atoms, chunks and/or any other object that
can be hidden.
"""
self.assy.unhideSelection()
def setDisplayStyle_of_selection(self, display_style): #bruce 080910 revised this
"""
Set the display style of the selection to I{display_style}.
@param display_style: desired display style code
@type display_style: int
"""
if self.assy and self.assy.selatoms:
for ob in self.assy.selatoms.itervalues():
ob.setDisplayStyle(display_style)
elif self.assy and self.assy.selmols:
for ob in self.assy.selmols:
ob.setDisplayStyle(display_style)
elif 0: # Keep in case we decide to offer a user preference. --Mark 2008-03-16
# Nothing is selected, so change the global display style.
self.glpane.setGlobalDisplayStyle(display_style)
else:
cmd = greenmsg("Set display style: ")
msg = "No atoms or chunks selected. Nothing changed."
env.history.message(cmd + msg)
return
self.win_update() # bruce 041206, needed for model tree display mode icons, as well as glpane
return
def dispObjectColor(self, initialColor = None):
"""
Sets the color of the selected chunks and/or jigs to a color the user
chooses.
@param initialColor: the initial color to display in the color chooser
dialog, or None or missing to use the default (white).
Not used if only one chunk or one jig is selected
(in those cases the object's current color is used).
@type initialColor: QColor
@note: Need better method name (i.e. setObjectColor()).
"""
if initialColor is None:
initialColor = Qt.white
else:
assert isinstance(initialColor, QColor)
_cmd = greenmsg("Change Color: ")
from operations.ops_select import objectSelected, ATOMS, CHUNKS, JIGS
if not objectSelected(self.assy, objectFlags = CHUNKS | JIGS):
if objectSelected(self.assy, objectFlags = ATOMS):
_msg = redmsg("Cannot change color of individual atoms.")
else:
_msg = redmsg("Nothing selected.")
env.history.message(_cmd + _msg)
return
_numSelectedObjects = self.assy.getNumberOfSelectedChunks() \
+ self.assy.getNumberOfSelectedJigs()
if _numSelectedObjects == 1 and self.assy.getNumberOfSelectedChunks() == 1:
# If only one object is selected, and it's a chunk,
# assign initialColor its color.
_selectedChunkColor = self.assy.selmols[0].color
if _selectedChunkColor:
from widgets.widget_helpers import RGBf_to_QColor
initialColor = RGBf_to_QColor(_selectedChunkColor)
elif _numSelectedObjects == 1 and self.assy.getNumberOfSelectedJigs() == 1:
# If only one object is selected, and it's a jig,
# assign initialColor its color.
_selectedJig = self.assy.getSelectedJigs()
_selectedJigColor = _selectedJig[0].normcolor
if _selectedJigColor:
from widgets.widget_helpers import RGBf_to_QColor
initialColor = RGBf_to_QColor(_selectedJigColor)
_c = QColorDialog.getColor(initialColor, self)
if _c.isValid():
from widgets.widget_helpers import QColor_to_RGBf
_newColor = QColor_to_RGBf(_c)
list = []
for ob in self.assy.selmols:
ob.setcolor(_newColor)
list.append(ob)
for ob in self.assy.getSelectedJigs():
ob.color = _newColor # Need jig.setColor() method! --mark
ob.normcolor = _newColor
list.append(ob)
# Ninad 070321: Since the chunk is selected as a colored selection,
# it should be unpicked after changing its color.
# The user has most likely selected the chunk to change its color
# and won't like it still shown 'green'(the selection color)
# even after changing the color. so deselect it.
# The chunk is NOT unpicked IF the color is changed via chunk
# property dialog. see ChunkProp.change_chunk_color for details.
# This is intentional.
for ob in list:
ob.unpick()
self.win_update()
def dispResetChunkColor(self):
"""
Resets the color of any selected jigs to their default color
and the color of any selected chunk's to their atom's (default)
element colors.
"""
_selectedChunks = self.assy.selmols
_selectedJigs = self.assy.getSelectedJigs()
if not _selectedChunks and not _selectedJigs:
env.history.message(redmsg("Reset Color: No chunks or jigs selected."))
return
for chunk in _selectedChunks:
chunk.setcolor(None)
for jig in _selectedJigs:
jig.color = jig.normcolor
self.glpane.gl_update()
def dispResetAtomsDisplay(self):
"""
Resets the display setting for each atom in the selected chunks or
atoms to Default display mode.
"""
cmd = greenmsg("Reset Atoms Display: ")
msg = "No atoms or chunks selected."
if self.assy.selmols:
self.assy.resetAtomsDisplay()
msg = "Display setting for all atoms in selected chunk(s) reset" \
" to Default (i.e. their parent chunk's display mode)."
if self.assy.selectionContainsAtomsWithOverriddenDisplay():
for a in self.assy.selatoms.itervalues(): #bruce 060707 itervalues
if a.display != diDEFAULT:
a.setDisplayStyle(diDEFAULT)
msg = "Display setting for all selected atom(s) reset to Default" \
" (i.e. their parent chunk's display mode)."
env.history.message(cmd + msg)
def dispShowInvisAtoms(self):
"""
Resets the display setting for each invisible atom in the selected
chunks or atoms to Default display mode.
"""
cmd = greenmsg("Show Invisible Atoms: ")
if not self.assy.selmols and not self.assy.selatoms:
msg = "No atoms or chunks selected."
env.history.message(cmd + msg)
return
nia = 0 # nia = Number of Invisible Atoms
if self.assy.selmols:
nia = self.assy.showInvisibleAtoms()
if self.assy.selectionContainsInvisibleAtoms():
for a in self.assy.selatoms.itervalues(): #bruce 060707 itervalues
if a.display == diINVISIBLE:
a.setDisplayStyle(diDEFAULT)
nia += 1
msg = cmd + str(nia) + " invisible atoms found."
env.history.message(msg)
def changeBackgroundColor(self):
"""
Let user change the background color of the 3D Graphics Area,
aka "the glpane" to the developers.
"""
self.userPrefs.show(pagename='Color')
# pop up Element Color Selector dialog
def dispElementColorSettings(self):
"""
Slot for 'Display > Element Color Settings...' menu item.
"""
self.showElementColorSettings()
def showElementColorSettings(self, parent = None):
"""
Opens the Element Color Setting dialog, allowing the user to change
default colors of elements and bondpoints, and save them to a file.
@param parent: The parent of the Element Color Setting dialog.
This allows the caller (i.e. Preferences dialog) to
make it modal.
@type parent: U{B{QDialog}<http://doc.trolltech.com/4/qdialog.html>}
"""
global elementColorsWin
# Huaicai 2/24/05: Create a new element selector window each time,
# so it will be easier to always start from the same states.
# Make sure only a single element window is shown
if elementColorsWin and elementColorsWin.isVisible():
return
if not parent:
parent = self
elementColorsWin = elementColors(parent)
elementColorsWin.setDisplay(self.Element)
# Sync the thumbview bg color with the current mode's bg color. Mark 051216.
elementColorsWin.elemGLPane.setBackgroundColor(
self.glpane.backgroundColor,
self.glpane.backgroundGradient
)
elementColorsWin.show()
def dispLighting(self):
"""
Allows user to change lighting brightness.
"""
self.userPrefs.show(pagename = 'Lighting') # Show Preferences | Lighting.
pass # end of class displaySlotsMixin
# end
| NanoCAD-master | cad/src/operations/ops_display.py |
# Copyright 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
ops_view.py provides viewSlotsMixin for MWsemantics,
with view slot methods and related helper methods.
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
Note: most other ops_*.py files provide mixin classes for Part,
not for MWsemantics like this one.
History:
mark 060120 split this out of MWsemantics.py.
"""
import math
from Numeric import dot
from geometry.geometryUtilities import compute_heuristic_axis
import foundation.env as env
from geometry.VQT import V, Q, A, norm, vlen
from utilities.Log import greenmsg, redmsg, orangemsg
from utilities.prefs_constants import ORTHOGRAPHIC
from utilities.prefs_constants import PERSPECTIVE
from model.NamedView import NamedView
from model.PovrayScene import PovrayScene
class viewSlotsMixin:
"""
Mixin class to provide view-related methods for class MWsemantics.
Has slot methods and their helper methods.
"""
def setViewHome(self):
"""
Reset view to Home view
"""
cmd = greenmsg("Current View: ")
info = 'Home'
env.history.message(cmd + info)
self.glpane.setViewHome()
def setViewFullScreen(self, val):
"""
Full screen mode. (maximize the glpane real estate by hiding/ collapsing
other widgets. (only Menu bar and the glpane are shown)
The widgets hidden or collapsed include:
- MainWindow Title bar
- Command Manager,
- All toolbars,
- ModelTree/PM area,
- History Widget,
- Statusbar
@param val: The state of the QAction (checked or uncheced) If True, it
will show the main window full screen , otherwise show it
with its regular size
@type val: boolean
@see: MWsemantics.showSemiFullScreen, MWsemantics.showNormal
@see: self.setViewSemiFullScreen
"""
if val:
self.showFullScreen()
else:
self.showNormal()
def setViewSemiFullScreen(self, val):
"""
Semi-Full Screen mode. (maximize the glpane real estate by hiding/ collapsing
other widgets. This is different than the 'Full Screen mode' as it hides
or collapses only the following widgets --
- MainWindow Title bar
- ModelTree/PM area,
- History Widget,
- Statusbar
@param val: The state of the QAction (checked or uncheced) If True, it
will show the main window full screen , otherwise show it
with its regular size
@type val: boolean
@see: MWsemantics.showSemiFullScreen, MWsemantics.showNormal
@see: self.setViewFullScreen
"""
if val:
self.showSemiFullScreen()
else:
self.showNormal()
def setViewFitToWindow(self):
"""
Fit to Window
"""
cmd = greenmsg("Fit to Window: ")
info = ''
env.history.message(cmd + info)
self.glpane.setViewFitToWindow()
def setViewZoomToSelection(self):
"""
Zoom to selection (Implemented for only selected jigs and chunks
"""
cmd = greenmsg("Zoom To Selection:")
info = ''
env.history.message(cmd + info)
self.glpane.setViewZoomToSelection()
def setViewHomeToCurrent(self):
"""
Changes Home view of the model to the current view in the glpane.
"""
cmd = greenmsg("Set Home View to Current View: ")
info = 'Home'
env.history.message(cmd + info)
self.glpane.setViewHomeToCurrent()
def setViewRecenter(self):
"""
Recenter the view around the origin of modeling space.
"""
cmd = greenmsg("Recenter View: ")
info = 'View Recentered'
env.history.message(cmd + info)
self.glpane.setViewRecenter()
def zoomToArea(self, val):
"""
Zoom to Area Tool, allowing the user to specify a rectangular area
by holding down the left button and dragging the mouse to zoom
into a specific area of the model.
val = True when Zoom tool button was toggled on, False when it
was toggled off.
"""
self._zoomPanRotateTool(val, 'ZOOMTOAREA', "Zoom to Area Tool")
def zoomInOut(self, val):
"""
Basic Zoom for zooming in and/or out.
Zoom out as the user pushes the mouse away (cursor moves up).
Zoom in as the user pulls the mouse closer (cursor moves down).
@param val: True when Zoom in/out button is toggled on, False when it
is toggled off.
@type val: boolean
"""
self._zoomPanRotateTool(val, 'ZOOMINOUT', "Zoom In/Out Tool")
def panTool(self, val):
"""
Pan Tool allows X-Y panning using the left mouse button.
val = True when Pan tool button was toggled on, False when it
was toggled off.
"""
self._zoomPanRotateTool(val, 'PAN', "Pan Tool")
def rotateTool(self, val):
"""
Rotate Tool allows free rotation using the left mouse button.
val = True when Rotate tool button was toggled on, False when it
was toggled off.
"""
self._zoomPanRotateTool(val, 'ROTATE', "Rotate Tool")
def _zoomPanRotateTool(self, val, commandName, user_mode_name):
"""
Common code for Zoom, Pan, and Rotate tools.
"""
commandSequencer = self.commandSequencer
## modes_we_are_called_for = ['ZOOM', 'PAN', 'ROTATE']
# Note: some logic in here was revised by bruce 070814, especially
# for the case when entering one of these temporary modes needs to
# autoexit another one. This has allowed these tools to work properly
# during Extrude Mode (and presumably other commands with internal state).
# But all this logic should be replaced by something more principled
# and general, using the Command Sequencer, when we have that.
# This fixes bug 1081. mark 060111.
if not val:
# The Zoom/Pan/Rotate button was toggled off. We are presumably
# in the associated temporary command, and the user wants us to
# exit it. Do so and return to parent command.
command = commandSequencer.currentCommand
## if command.commandName in modes_we_are_called_for:
if command.commandName == commandName:
#bruce 071011 change, an educated guess, may increase prints, may cause bugs ### TEST
# we're now in the command being turned off, as expected.
if commandSequencer._f_command_stack_is_locked:
# this is normal when the command is exiting on its own
# and changes the state of its action programmatically.
# In this case, redundant exit causes bugs, so skip it.
# It might be better to avoid sending the signal when
# programmatically changing the action state.
# See similar code and comment in Move_Command.py.
# [bruce 080829]
## print "DEBUG fyi: _zoomPanRotateTool skipping Done of %r since command stack locked" % commandName
## # remove when works, or soon after
pass
else:
#Exit this temporary command.
command.command_Done()
else:
if command is not commandSequencer.nullmode:
# bruce 071009 add condition to fix bug 2512
# (though the cause remains only guessed at)
print "bug: _zoomPanRotateTool sees unexpected current command: %r" % (command,)
# Note: This can happen on nullMode after certain other exceptions occur.
# [In fact, it seems to happen whenever we exit zoom/pan/rotate normally...
# that is now bug 2512, and its cause is not known, but it might relate
# to the comment above from 070814 (guess). [bruce 070831 comment]]
# Don't run Done in this case.
pass
pass
else:
# The Zoom/Pan/Rotate button was toggled on.
commandSequencer.userEnterCommand(commandName, always_update = True)
#bruce 071011, encapsulating the code that was here before
# Emit a help message on entering the new temporary command. Ideally this
# should be done in its command_entered method, but that made it
# appear before the green "Entering Mode: Zoom" msg. So I put it here.
# [Mark 050130; comment paraphrased by bruce 070814]
# TODO: do this in a new postEnter command-specific method which is called
# late enough to have the desired effect (later: such as command_entered,
# after the ongoing command stack refactoring).
env.history.message("You may hit the Esc key to exit %s." % user_mode_name)
###REVIEW: put this in statusbar instead?
return
# GLPane.ortho is checked in GLPane.paintGL
def setViewOrtho(self):
self.glpane.setViewProjection(ORTHOGRAPHIC)
def setViewPerspec(self):
self.glpane.setViewProjection(PERSPECTIVE)
def stereoSettings(self):
self.enterStereoPropertiesCommand()
def viewNormalTo(self): #
"""
Set view to the normal vector of the plane defined by 3 or more
selected atoms or a jig's (Motor or RectGadget) axis.
"""
cmd = greenmsg("Set View Normal To: ")
chunks = self.assy.selmols
jigs = self.assy.getSelectedJigs()
atoms = self.assy.selatoms_list()
#following fixes bug 1748 ninad 061003.
if len(chunks) > 0 and len(atoms) == 0:
# Even though chunks have an axis, it is not necessarily the same
# axis attr stored in the chunk. Get the chunks atoms and let
# compute_heuristic_axis() recompute them.
for c in range(len(chunks)):
atoms += chunks[c].atoms.values()
elif len(jigs) == 1 and len(atoms) == 0:
# Warning: RectGadgets have no atoms. We handle this special case below.
atoms = jigs[0].atoms
elif len(atoms) < 3:
# There is a problem when allowing only 2 selected atoms.
# Changing requirement to 3 atoms fixes bug 1418. mark 060322
msg = redmsg("Please select some atoms, jigs, and/or chunks, covering at least 3 atoms")
print "ops_view.py len(atoms) = ", len(atoms)
env.history.message(cmd + msg)
return
# This check is needed for jigs that have no atoms. Currently, this
# is the case for RectGadgets (ESP Image and Grid Plane) only.
if len(atoms):
pos = A( map( lambda a: a.posn(), atoms ) )
nears = [ self.glpane.out, self.glpane.up ]
axis = compute_heuristic_axis( pos, 'normal', already_centered = False, nears = nears, dflt = None )
else: # We have a jig with no atoms.
axis = jigs[0].getaxis() # Get the jig's axis.
# If axis is pointing into the screen, negate (reverse) axis.
if dot(axis, self.glpane.lineOfSight) > 0:
axis = -axis
if not axis:
msg = orangemsg( "Warning: Normal axis could not be determined. No change in view." )
env.history.message(cmd + msg)
return
# Compute the destination quat (q2).
q2 = Q(V(0,0,1), axis)
q2 = q2.conj()
self.glpane.rotateView(q2)
info = 'View set to normal vector of the plane defined by the selected atoms.'
env.history.message(cmd + info)
def viewNormalTo_NEW(self):
"""
Set view to the normal vector of the plane defined by 3 or more
selected atoms or a jig's (Motor or RectGadget) axis.
"""
# This implementation has two serious problems:
# 1. it selects a normal based on the atoms and not the axis of a jig (e.g. a moved rotary motor).
# 2. doesn't consider selected jigs that have no atoms.
# Bruce and I will discuss this and determine the best implem.
# For A7, I've decide to use the original version. This version will be reinstated in A8
# after fixing these problems. mark 060322.
cmd = greenmsg("Set View Normal To: ")
atoms = self.assy.getSelectedAtoms()
if len(atoms) < 3:
# There is a problem when allowing only 2 selected atoms.
# Changing requirement to 3 atoms fixes bug 1418. mark 060322
msg = redmsg("Please select some atoms, jigs, and/or chunks, covering at least 3 atoms")
env.history.message(cmd + msg)
return
pos = A( map( lambda a: a.posn(), atoms ) ) # build list of atom xyz positions.
nears = [ self.glpane.out, self.glpane.up ]
axis = compute_heuristic_axis( pos, 'normal', already_centered = False, nears = nears, dflt = None )
if not axis:
msg = orangemsg( "Warning: Normal axis could not be determined. No change in view." )
env.history.message(cmd + msg)
return
# Compute the destination quat (q2).
q2 = Q(V(0,0,1), axis)
q2 = q2.conj()
self.glpane.rotateView(q2)
info = 'View set to normal of the plane defined by the selection.'
env.history.message(cmd + info)
def viewParallelTo(self):
"""
Set view parallel to the vector defined by 2 selected atoms.
"""
cmd = greenmsg("Set View Parallel To: ")
atoms = self.assy.selatoms_list()
if len(atoms) != 2:
msg = redmsg("You must select 2 atoms.")
env.history.message(cmd + msg)
return
v = norm(atoms[0].posn()-atoms[1].posn())
if vlen(v) < 0.0001: # Atoms are on top of each other.
info = 'The selected atoms are on top of each other. No change in view.'
env.history.message(cmd + info)
return
# If vec is pointing into the screen, negate (reverse) vec.
if dot(v, self.glpane.lineOfSight) > 0:
v = -v
# Compute the destination quat (q2).
q2 = Q(V(0,0,1), v)
q2 = q2.conj()
self.glpane.rotateView(q2)
info = 'View set parallel to the vector defined by the 2 selected atoms.'
env.history.message(cmd + info)
def viewFlipViewVert(self):
"""
Flip view vertically.
"""
self.glpane.rotateView(self.glpane.quat + Q(V(0,1,0), math.pi))
def viewFlipViewHorz(self):
"""
Flip view horizontally.
"""
self.glpane.rotateView(self.glpane.quat + Q(V(1,0,0), math.pi))
def viewRotatePlus90(self): # Added by Mark. 051013.
"""
Increment the current view by 90 degrees around the vertical axis.
"""
self.glpane.rotateView(self.glpane.quat + Q(V(0,1,0), math.pi/2))
def viewRotateMinus90(self): # Added by Mark. 051013.
"""
Decrement the current view by 90 degrees around the vertical axis.
"""
self.glpane.rotateView(self.glpane.quat + Q(V(0,1,0), -math.pi/2))
def viewBack(self):
cmd = greenmsg("Back View: ")
info = 'Current view is Back View'
env.history.message(cmd + info)
self.glpane.rotateView(Q(V(0,1,0),math.pi))
def viewBottom(self):
cmd = greenmsg("Bottom View: ")
info = 'Current view is Bottom View'
env.history.message(cmd + info)
self.glpane.rotateView(Q(V(1,0,0),-math.pi/2))
def viewFront(self):
cmd = greenmsg("Front View: ")
info = 'Current view is Front View'
env.history.message(cmd + info)
self.glpane.rotateView(Q(1,0,0,0))
def viewLeft(self):
cmd = greenmsg("Left View: ")
info = 'Current view is Left View'
env.history.message(cmd + info)
self.glpane.rotateView(Q(V(0,1,0),math.pi/2))
def viewRight(self):
cmd = greenmsg("Right View: ")
info = 'Current view is Right View'
env.history.message(cmd + info)
self.glpane.rotateView(Q(V(0,1,0),-math.pi/2))
def viewTop(self):
cmd = greenmsg("Top View: ")
info = 'Current view is Top View'
env.history.message(cmd + info)
self.glpane.rotateView(Q(V(1,0,0),math.pi/2))
def viewIsometric(self):
"""
This sets the view to isometric. For isometric view, it needs
rotation around the vertical axis by pi/4 *followed* by rotation
around horizontal axis by asin(tan(pi/6) - ninad060810
"""
# This is not yet called from the MainWindow. Need UI for this.
# Also need code review -ninad060810
cmd = greenmsg("Isometric View: ")
info = 'Current view is Isometric View'
env.history.message(cmd + info)
self.quatX = Q(V(1,0,0), math.asin(math.tan(math.pi/6)))
self.quatY = Q(V(0,1,0), -math.pi/4)
self.glpane.rotateView(self.quatY+self.quatX)
# If you put quatX first, it won't give isometric view ninad060810
def saveNamedView(self):
csys = NamedView(self.assy, None,
self.glpane.scale,
self.glpane.pov,
self.glpane.zoomFactor,
self.glpane.quat)
self.assy.addnode(csys)
self.mt.mt_update()
return
def getNamedViewList(self):
"""
Returns a list of all the named view nodes in the MT inside a part.
"""
namedViewList = [] # Hold the result list
def function(node):
if isinstance(node, NamedView):
namedViewList.append(node)
return
# Append all NamedView nodes to the namedview list
self.assy.part.topnode.apply2all(function)
return namedViewList
def showStandardViewsMenu(self):
"""
When Standard Views button is activated, show its QMenu
"""
# By default, nothing happens if you click on the
# toolbutton with submenus. The menus are displayed only when you click
# on the small downward arrow of the tool button.
# Therefore the following slot is added. ninad 070109
if self.standardViewsMenu.isVisible():
self.standardViewsMenu.hide()
else:
self.standardViews_btn.showMenu()
def viewQuteMol(self):
"""
Slot for 'View > QuteMolX'. Opens the QuteMolX Property Manager.
@note: The QuteMolX PM will not open if there are no atoms in the part.
"""
cmd = greenmsg("QuteMolX : ")
if self.assy.molecules:
self.enterQuteMolCommand()
else:
msg = orangemsg("No atoms in the current part.")
env.history.message(cmd + msg)
def viewRaytraceScene(self):
"""
Slot for 'View > POV-Ray'.
Raytraces the current scene. This version does not add a POV-Ray Scene
node to the model tree. This is preferred since it allows the user to
preview POV-Ray renderings without having to save the current part
and/or delete unwanted nodes from the model tree. If the user wants to
add the node to the model tree, the user must use
'Insert > POV-Ray Scene'.
"""
assy = self.assy
glpane = self.glpane
#pov = PovrayScene(assy, None, params = (glpane.width, glpane.height, 'png')) #bruce 060620 revised this
pov = PovrayScene(assy, None)
pov.raytrace_scene(tmpscene=True) # this emits whatever history messages are needed [bruce 060710 comment]
## def viewRaytraceScene_ORIG(self):
## """
## Slot for 'View > Raytrace Scene'.
## Raytraces the current scene. This version adds a POV-Ray Scene node to the model tree.
## """
## cmd = greenmsg("Raytrace Scene: ")
##
## assy = self.assy
## glpane = self.glpane
##
## pov = PovrayScene(assy, None, params = (glpane.width, glpane.height, 'png')) #bruce 060620 revised this
## #bruce 060620 comment: I doubt it's correct to render the image before adding the node,
## # in case rendering it takes a long time. Also, if the rendering is aborted, the node
## # should perhaps not be added (or should be removed if it was already added,
## # or should be changed to indicate that the rendering was aborted).
## errorcode, errortext = pov.raytrace_scene() # [note: as of long before 060710 the return value no longer fits this pattern]
## if errorcode:
## env.history.message( cmd + redmsg(errortext) )
## return
## assy.addnode(pov)
## self.mt.mt_update()
##
## msg = "POV-Ray rendering complete."
## env.history.message( cmd + msg )
pass # end of class viewSlotsMixin
# end
| NanoCAD-master | cad/src/operations/ops_view.py |
# Copyright 2007-2008 Nanorex, Inc. See LICENSE file for details.
"""
bond_chains.py -- helper functions related to chains of bonds
(See also: pi_bond_sp_chain.py)
@author: Bruce
@version: $Id$
@copyright: 2007-2008 Nanorex, Inc. See LICENSE file for details.
"""
from model.bond_constants import DIRBOND_CHAIN_MIDDLE
from model.bond_constants import DIRBOND_CHAIN_END
from model.bond_constants import DIRBOND_NONE
from model.bond_constants import DIRBOND_ERROR
from model.bond_constants import find_bond
def grow_bond_chain(bond, atom, next_bond_in_chain): #bruce 070415; generalized from grow_pi_sp_chain
"""
Given a bond and one of its atoms, grow the bond chain containing bond
(of the kind defined by next_bond_in_chain, called on a bond and one of its
atoms) in the direction of atom,
adding newly found bonds and atoms to respective lists (listb, lista) which we'll return,
until the chain ends or comes back to bond and forms a ring
(in which case return as much of the chain as possible, but not another ref to bond or atom).
Return value is the tuple (ringQ, listb, lista) where ringQ says whether a ring was detected
and len(listb) == len(lista) == number of new (bond, atom) pairs found.
Note that each (bond, atom) pair found (at corresponding positions in the lists)
has a direction (in bond, from atom to bond.other(atom)) which is backwards along the direction of chain growth.
Note that listb never includes the original bond, so it is never a complete list of bonds in the chain.
In general, to form a complete chain, a caller must piece together a starting bond and two bond lists
grown in opposite directions, or one bond list if bond is part of a ring.
(See also the method find_chain_or_ring_from_bond [modified from make_pi_bond_obj],
which calls us twice and does this.)
The function next_bond_in_chain(bond, atom) must return another bond containing atom
in the same chain or ring, or None if the chain ends at bond (on the end of bond which is atom),
and must be defined in such a way that its progress from bond to bond, through any atom, is consistent from
either direction. (I.e., if given bond1 and atom1 it returns bond2 and atom2, then given bond2 and atom1 it
must return bond1 and bond1.other(atom1).) However, we don't promise to do full checking on whether the function
satisfies this requirement.
That requirement means it's not possible to find a ring which comes back to bond
but does not include bond (by coming back to atom before coming to bond's other atom),
so if that happens, we raise an exception.
"""
listb, lista = [], []
origbond = bond # for detecting a ring
origatom = atom # for error checking
while 1:
nextbond = next_bond_in_chain(bond, atom) # the function called here is the main difference from grow_pi_sp_chain
if nextbond is None:
return False, listb, lista
if nextbond is origbond:
assert atom is not origatom, "grow_bond_chain(%r, %r, %r): can't have 3 bonds in chain at atom %r; data: %r" % \
(origbond, origatom, next_bond_in_chain, atom, (nextbond, listb, lista)) #revised to fix bug 2328 [bruce 070424]
assert nextbond.other(atom) is origatom
return True, listb, lista
nextatom = nextbond.other(atom)
listb.append(nextbond)
lista.append(nextatom)
bond, atom = nextbond, nextatom
pass
def grow_directional_bond_chain(bond, atom): #bruce 070415
"""
Grow a chain of directional bonds. For details, see docstring of grow_bond_chain.
"""
return grow_bond_chain(bond, atom, next_directional_bond_in_chain)
def next_directional_bond_in_chain(bond, atom):
"""
Assuming bond is in a chain of directional bonds,
being traversed towards atom (one of bond's atoms),
return the next bond in the chain if there is one,
or None if there is not one (due to an error,
such as the chain branching, or due to the chain ending).
For some errors, print error messages (this behavior needs REVIEW)
and return None.
"""
#bruce 070415, revised 071016; should be ok with or without open bonds being directional
assert bond.is_directional()
# note, as of mark 071014, atom might be a Singlet
statuscode, bond1, bond2 = atom.directional_bond_chain_status()
if statuscode == DIRBOND_CHAIN_MIDDLE:
assert bond1
assert bond2
if bond is bond1:
return bond2
elif bond is bond2:
return bond1
else:
# error -- REVIEW: how should we report it? not sure, so print them for now.
# anyway, stop propogating the chain on any error (ie return None).
print "error or bug: atom %r has directional bond %r reaching chain containing %r and %r" % \
(atom, bond, bond1, bond2)
return None
pass
elif statuscode == DIRBOND_CHAIN_END:
assert bond1
assert bond2 is None
if bond is not bond1:
print "error or bug: atom %r has directional bond %r different than chain-end bond %r" % \
(atom, bond, bond1)
return None
elif statuscode == DIRBOND_NONE:
print "bug: atom %r has directional bond %r but directional_bond_chain_status of DIRBOND_NONE" % \
(atom, bond)
return None
elif statuscode == DIRBOND_ERROR:
print "error: atom %r with directional bond %r has directional_bond_chain_status of DIRBOND_ERROR" % \
(atom, bond)
return None
else:
assert 0, "bug: atom %r with directional bond %r has unrecognized directional_bond_chain_status %r" % \
(atom, bond, statuscode)
return None
pass
# ==
# dict utils to be refiled:
def pop_arbitrary_item(dict1):
"""
Assume dict1 is not empty; efficiently pop and return
an arbitrary item (key, value pair) from it.
"""
key, val_unused = item = arbitrary_item(dict1)
del dict1[key]
return item
def arbitrary_item(dict1):
"""
If dict1 is not empty, efficiently return an arbitrary item
(key, value pair) from it. Otherwise return None.
"""
for item in dict1.iteritems():
return item
return None
# ==
class abstract_bond_chain_analyzer:
"""
Abstract helper class for testing atoms and bonds as qualified
to belong to chains or rings, and finding those chains or rings.
Specific subclasses have specific definitions of qualifying atoms
and bonds, and methods for creating found chain or ring or lone-atom
objects, and for handling atoms with more than two qualifying bonds.
"""
#bruce 071126, synthesizing some existing related code and new code
# TODO: recode existing helper functions to use this.
# per-subclass constants
branches_ok = False # the other value is untested
def atom_ok(self, atom):
"""
Subclass-specific primitive for whether an atom qualifies.
@note: if an atom is only ok if it matches the prior atom in some way,
use bond_ok instead, for that part of the condition.
[subclass should override]
"""
return True
def bond_ok(self, bond):
"""
Subclass-specific primitive for whether a bond qualifies,
if its atoms do.
The default version (always True) is often good enough.
This class's methods never call self.bond_ok on a bond
unless at least one of that bond's atoms satisfies atom_ok.
And they never pay attention to what bond_ok returns
unless both its atoms satisfy atom_ok.
"""
return True
def atom_list_of_ok_bonds(self, atom):
"""
Assume self.atom_ok(atom). Return a list of
atom's bonds for which the bond and its atoms
satisfy bond_ok and atom_ok respectively.
This list might have any length (including 0
or more than 2); we don't assume it's an error
if the length is more than 2, though callers
of this method in certain subclasses are free to do so.
The implementation assumes that self.atom_ok might be
expensive enough to justify comparing bond's atoms to atom
to avoid calling self.atom_ok redundantly on atom.
"""
atom_ok = self.atom_ok
assert atom_ok(atom)
res = []
for bond in atom.bonds:
if self.bond_ok(bond):
other = bond.other(atom)
if atom_ok(other):
res.append(bond)
return res
def atom_is_branchpoint(self, atom):
return len(self.atom_list_of_ok_bonds(atom)) > 2
def next_bond_in_chain(self, bond, atom):
"""
Assume bond & both its atoms qualify (but not that atom has been checked
for having too many qualifying bonds); return the next bond on atom
in a bond chain (never the given bond; always one with both atoms
and itself qualifying), or None if the chain ends at atom
due to there being fewer or more than two qualifying bonds on atom.
(If the caller cares which of those conditions cause the chain
to end at atom, it should check the last atom separately.
This method doesn't assume that too many qualifying bonds
is an error, even though it stops the chain if it finds this
condition, including that atom at the end of the chain.
Note that if a caller extends two chains in opposite
directions from a bond and both of them end on an atom with
too many qualifying bonds, those two ends might be the same atom.)
"""
bonds = self.atom_list_of_ok_bonds(atom)
assert bond in bonds # remove when works? #####
if len(bonds) != 2:
return None
for otherbond in bonds:
if otherbond is not bond:
return otherbond
assert 0, "bond %r occurs twice in %r -- should be impossible" % (bond, bonds)
return None
def find_chain_or_ring_from_bond(self, bond):
"""
Return the maximal chain or ring of qualifying atoms
connected by qualifying bonds, which includes bond.
(See below for what is returned to represent the result.)
(Return None if bond or either of its atoms doesn't qualify.) [### REVIEW - compare to lone-atom error return]
If any qualifying atom has more than two qualifying bonds,
behave differently depending on the per-subclass constant
self.branches_ok: if it's False,
treat this as an error which in some sense disqualifies that atom --
complain, stop growing the chain, and don't include that atom in it
(or any of its bonds). This error can lead to a chain with one atom
and no bonds (which won't include the given bond), or to a longer
chain which doesn't include bond (if one of its atoms had this error
but the other one connected to a longer chain than bond).
But do nothing to modify the offending atom or its bonds in the model
(except perhaps to set flags in it which affect only future complaints).
But if self.branches_ok is true, just include the branchpoint atom as one
end of the returned chain. Note that the other end may or may not also
be a branchpoint; if both ends are branchpoints, they may be the same
atom, but the result will still be considered a chain rather than a
ring.
@note: The effect of self.branches_ok is implemented in self._found_chain,
which could be extended in subclasses to change that behavior.
@note: If any qualifying atom has no qualifying bonds,
we'll never encounter it, since we encounter atoms
only via qualifying bonds on them.)
@return: None, or an object returned by one of the
methods make_1atom_chain, make_chain, make_ring (unless the
corresponding _found methods which call them are overridden,
which is not encouraged). (Note: make_chain can return None if bond's
atoms both have too many qualifying chain-bonds; also we can return None
directly, as described elsewhere.)
"""
#bruce 071126 made this from make_pi_bond_obj; TODO: recode that to use this
atom_ok = self.atom_ok
atom1 = bond.atom1
atom2 = bond.atom2
if not atom_ok(atom1) or not atom_ok(atom2):
return None
bond_ok = self.bond_ok
if not bond_ok(bond):
return None
ringQ, listb1, lista1 = grow_bond_chain(bond, atom1, self.next_bond_in_chain)
assert len(listb1) == len(lista1) # guess, bruce 080119
if ringQ:
# branchpoint atoms can't occur in rings
assert atom2 is lista1[-1]
res = self._found_ring( [bond] + listb1 ,
## [atom2, atom1] + lista1 # wrong, has atom2 twice
[atom1] + lista1 #bruce 080119 bugfix
)
else:
ringQ, listb2, lista2 = grow_bond_chain(bond, atom2, self.next_bond_in_chain)
assert len(listb2) == len(lista2) # guess, bruce 080119
assert not ringQ
### consider: reverse lista2/listb2 instead, concat other way,
# so as to keep listb1 in same order in ring or chain case
# [bruce 080119 comment]
listb1.reverse()
lista1.reverse()
# Note: depending on branches_ok, we worry about branchpoint atoms
# at either or both ends, inside _found_chain.
res = self._found_chain( listb1 + [bond] + listb2,
lista1 + [atom1, atom2] + lista2 )
return res
def _found_ring(self, listb, lista):
"""
@see: make_ring
[subclasses should extend make_ring, which we call,
rather than this method]
"""
assert len(listb) == len(lista), \
"%r finds ring but #bonds %r != #atoms %r" % \
(self, len(listb), len(lista))
if 0 and 'debug, but REMOVE WHEN WORKS, very slow':
for i in range(len(listb)):
assert find_bond(lista[i] , lista[(i-1) % len(lista)]) is listb[i]
print "remove when works! in _found_ring len %d" % len(lista)
return self.make_ring(listb, lista)
def make_ring(self, listb, lista):
"""
Return a representation of the ring of bonds and atoms
in listb and lista, which have the same length,
and in which listb[i] is a bond which connects the two atoms
lista[i] and lista[(i+1) % len(lista)].
The default implementation just returns (True, listb, lista),
which has the same format as the grow_bond_chain return value.
The return value is used by other methods of self in several ways:
* as a possible return value of find_chain_or_ring_from_bond and
related methods;
* therefore, as a value passed to self.found_object_iteratoms
when calling self.find_chains_or_rings.
Subclasses can extend this method to return a different representation
of a ring of bonded atoms, but they will probably also need to extend
found_object_iteratoms to handle it.
(Or they could extend it to do something and return None even for a
real ring, but only if they never needed to call a method like
self.find_chains_or_rings which needs to use self.found_object_iteratoms
on the result.)
"""
return (True, listb, lista)
def _found_chain(self, listb, lista):
"""
#doc [similar to _found_ring; usually return the output of make_chain];
if not self.branches_ok, we worry about branchpoint atoms at either or both ends...
this can cause us to return the output of make_1atom_chain, or even to return None.
@see: make_chain
[subclasses should extend make_chain, which we call,
rather than this method]
"""
assert len(lista) - 1 == len(listb) # one more atom than bond
assert len(listb) > 0 # somewhat arbitrary - could easily be recoded to not assume this
if not self.branches_ok: # a per-subclass constant
is_branchpoint = self.atom_is_branchpoint
if is_branchpoint( lista[0] ):
del lista[0]
del listb[0]
if is_branchpoint( lista[-1] ):
if not listb:
return None # i.e. a 0-atom chain
del lista[-1]
del listb[-1]
if not listb:
# note: this can only happen when self.branches_ok and both atoms
# were branchpoints, but if we recoded this to relax our initial
# assumption that len(listb) > 0, then testing it here would be correct.
return self._found_1atom_chain(lista)
# recheck these, in case things changed
assert len(lista) - 1 == len(listb)
assert len(listb) > 0
assert not is_branchpoint( lista[0] )
assert not is_branchpoint( lista[-1] )
return self.make_chain(listb, lista)
def make_chain(self, listb, lista): # TODO: doc similar to make_ring
"""
TODO: doc similar to make_ring
"""
return (False, listb, lista)
def _found_1atom_chain(self, lista):
assert len(lista) == 1
return self.make_1atom_chain(lista[0])
def make_1atom_chain(self, atom):
"""
[subclasses may need to override this method]
"""
return self.make_chain([], [atom])
def found_object_iteratoms(self, chain_or_ring):
"""
For anything returnable by one of the methods
make_1atom_chain, make_chain, make_ring
(or our methods which return their results, if overridden),
return an iterator over that thing's contained atoms
(or a sequence of them).
This method must be extended (or replaced)
to handle objects returnable by those methods,
if they are extended (if the use of a method which
calls this one, like self.find_chains_or_rings,
is desired).
"""
if chain_or_ring is None:
return ()
assert type(chain_or_ring) is type((False,[],[])) # ringQ, listb, lista
# todo: define an API for those found/made objects,
# so this default implem can handle them when they are instance objects
return chain_or_ring[2]
def find_chains_or_rings(self, atoms_dict):
"""
Take ownership of the atom.key -> atom dict, atoms_dict,
and find all chains, rings, or lone atoms that contain any atoms
in that dict. (The search along bond chains ignores the dict,
except to remove found atoms that reside in it -- typically
the found objects contain many atoms besides those in the dict.)
Remove atoms from atoms_dict as we search from them.
Found objects are returned only once even if several of their
atoms are initially in the dict. Upon normal return (anything
except raising an exception), atoms_dict will be empty.
@note: We treat all atoms equally (even if killed, or bondpoints);
caller may want to filter them out before passing us atoms_dict.
@return: a list of found objects, each returned by one of the
methods make_1atom_chain, make_chain, make_ring (unless the
corresponding _found methods which call them are overridden,
which is not encouraged), with results of None filtered out.
(Note that these methods are permitted to be overridden to
have side effects and return None, so in some subclasses,
the side effects for found objects may be the main result.)
"""
assert not self.branches_ok # see review comment below for why
res = []
while atoms_dict:
key_unused, atom = pop_arbitrary_item(atoms_dict)
subres = self.find_chain_or_ring_from_atom(atom)
if subres is not None:
res.append(subres)
# remove found object's atoms from atoms_dict, if present
# (REVIEW: this removal might be wrong if self.branches_ok)
for atom in self.found_object_iteratoms(subres):
atoms_dict.pop(atom.key, None)
continue
return res
def find_chain_or_ring_from_atom(self, atom):
assert not self.branches_ok # until review; see comment below for why
if not self.atom_ok(atom):
return None
bonds = self.atom_list_of_ok_bonds(atom)
if bonds:
bond = bonds[0]
# We'll search from bond, but from no other bond of atom.
# The result should not be affected by which bond we choose
# except perhaps in ways like ring or chain index-origins,
# if those are arbitrary.
#
# Warning: doing at most one search from atom is only
# correct when not self.branches_ok. Otherwise, one atom
# can be in more than one ring or chain (though not if
# "in" means "in interior"). If that case is ever used,
# we'll need to revise this API to return a list, or just
# inline this (modified) into find_chains_or_rings, or outlaw
# branchpoint atoms in this method and have callers replace them
# with all their non-branchpoint neighbors.
#
# (Note: we can ignore len(bonds), since the following
# will check whether it's too long, finding it again
# from atom.)
subres = self.find_chain_or_ring_from_bond(bond)
else:
subres = self._found_1atom_chain([atom])
return subres
pass # end of class abstract_bond_chain_analyzer
# end
| NanoCAD-master | cad/src/operations/bond_chains.py |
# Copyright 2005-2007 Nanorex, Inc. See LICENSE file for details.
"""
bond_utils.py -- helper functions for bond-related UI code and its operations
(should be renamed, maybe to bond_menu_helpers.py)
@author: bruce
@version: $Id$
@copyright: 2005-2007 Nanorex, Inc. See LICENSE file for details.
History:
created by bruce 050705 to help with higher-order bonds for Alpha6.
"""
from geometry.VQT import Q
from utilities.constants import noop
import foundation.env as env
from utilities.Log import greenmsg, redmsg, orangemsg, quote_html
from utilities.debug import print_compact_stack
from model.bond_constants import bonded_atoms_summary
from model.bond_constants import btype_from_v6
from model.bond_constants import v6_from_btype
from model.bond_constants import bond_left_atom
def intersect_sequences(s1, s2):
"""
Return the intersection of two sequences. If they are sorted
in a compatible way, so will be the result.
"""
return filter( lambda s: s in s2, s1)
def complement_sequences(big, little):
return filter( lambda s: s not in little, big)
def possible_bond_types(bond):
"""
Return a list of names of possible bond types for the given bond,
in order of increasing bond order,
based on its atoms' current elements and atomtypes.
This list is never empty since single bonds are always deemed possible
(even if they always induce valence errors, e.g. for H bonded to O(sp2) or in O2).
For warnings about some choices of bond type (e.g. S=S), see the function bond_type_warning.
[If you want to permit bonds requiring other atomtypes, when those are reachable
by altering only open bonds on this bond's actual atoms, see possible_bond_types_for_elements
(related to that goal, but might not do exactly that).]
Warning: this ignores geometric issues, so it permits double bonds even if they
would be excessively twisted, and it ignores bond length, bond arrangement in space
around each atom, etc.
"""
s1 = bond.atom1.atomtype.permitted_v6_list # in order of v6
s2 = bond.atom2.atomtype.permitted_v6_list
s12 = intersect_sequences( s1, s2 ) # order comes from s1; we depend on its coming from one of them or the other
#e could be faster (since these lists are prefixes of a standard order), but doesn't need to be
return map( btype_from_v6, s12)
def possible_bond_types_for_elements(bond):
"#doc, incl details of what's permitted"
permitted1 = bond.atom1.permitted_btypes_for_bond(bond) # dict from v6 to atomtypes which permit it
permitted2 = bond.atom2.permitted_btypes_for_bond(bond)
poss_v6 = intersect_sequences(permitted1.keys(), permitted2.keys()) # arbitrary order
poss_v6.sort() # smallest bond order first
poss2 = map( btype_from_v6, poss_v6)
return poss2, permitted1, permitted2
#partly-obs comment:
#e should we put element rules into the above possible_bond_types, or do them separately?
# and should bonds they disallow be shown disabled, or not even included in the list?
# and should "unknown" be explicitly in the list?
def bond_type_warning(bond, btype): # 050707
"""
Return a warning (short text suitable to be added to menu item text), or "" for no warning,
about the use of btype (bond type name) for bond.
This can be based on its atomtypes or perhaps on more info about the surroundings
(#e we might need to add arguments to pass such info).
Presently, this only warns about S=S being unstable, and about bonds whose type could not
permit both atoms (using their current atomtypes) to have the right valence
regardless of their other bonds (which only happens now when they have no other bonds).
This might return warnings for illegal btypes, even though it's not presently called
for illegal btypes for the given bond. It doesn't need to return any warning for illegal btypes.
"""
atype1 = bond.atom1.atomtype
atype2 = bond.atom2.atomtype
if btype == 'double' and atype1.is_S_sp2 and atype2.is_S_sp2:
return "unstable"
elif btype == 'single' and (atype1.bond1_is_bad or atype2.bond1_is_bad):
return "bad valence"
elif btype != 'triple' and (atype1.is_N_sp or atype2.is_N_sp):
return "bad valence"
# if there are any other numbonds=1 atoms which show up here, they should be valence-checked too (generalizing the above)
# (which might be easiest if atomtype stores a "valence-permitted btypes" when numbonds is 1), but I don't think there are others
return ""
_BOND_DIR_TEXTS = {0:"unset", 1:" --->", -1:"<--- "} # see also _BOND_DIR_NAMES #e refile
def bond_menu_section(bond, quat = Q(1,0,0,0)):
"""
Return a menu_spec subsection for displaying info about a highlighted bond,
changing its bond_type, offering commands about it, etc.
If given, use the quat describing the rotation used for displaying it
to order the atoms in the bond left-to-right (e.g. in text strings).
"""
res = []
res.append(( bonded_atoms_summary(bond, quat = quat), noop, 'disabled' ))
res.extend( _bond_type_menu_section(bond) )
if bond.is_directional():
### REVIEW: Do we want to do this for open bonds, after mark's 071014 change
# which allows them here? Or do we want "and not bond.is_open_bond()"?
# (BTW, I'm not sure this gets called at all, for open bonds.)
# Guess: open bonds would be safe here, so allow them, though I'm
# not sure it's always a good idea. Caveat: if we treat them as non-directional
# when the base atom has three or more directional bonds, we should probably
# make that exception here too -- probably using a higher-level method in place
# of is_directional, namely directional_bond_chain_status for both atoms in bond.
# We'd want a new method on bond to call that for both atoms and look at the
# results (perhaps there's already code like that elsewhere). Without this,
# we may get a bug if a user can try to change direction on an open bond
# that hits a strand but is not in it. But since I suspect the UI never allows
# an open bond here, I won't bother to write that code just yet. [bruce 071016]
submenu_contents = bond_direction_submenu_contents(bond, quat)
left_atom = bond_left_atom(bond, quat) # same one that comes first in bonded_atoms_summary
#e ideally, for mostly vertical bonds, we'd switch to an up/down distinction for the menu text about directions
#e and whatever the direction names, maybe we should explore farther along the strand to see what they are...
# unless it hairpins or crosses over... hmm.
current_dir = bond.bond_direction_from(left_atom)
current_dir_str = _BOND_DIR_TEXTS[current_dir]
text = "strand direction (%s)" % current_dir_str
item = (text, submenu_contents)
res.append(item)
return res
def _bond_type_menu_section(bond): #bruce 050716; replaces bond_type_submenu_spec for Alpha6
"""
Return a menu_spec for changing the bond_type of this bond
(as one or more checkmark items, one per permitted bond-type given the atomtypes),
or if the bond-type is unchangeable, a disabled menu item for displaying the type
(which looks the same as when the bond type is changeable, except for being disabled).
(If the current bond type is not permitted, it's still present and checked, but disabled,
and it might have a warning saying it's illegal.)
"""
# this assert is true, but it would cause an import loop:
## assert isinstance(bond, Bond)
btype_now = btype_from_v6(bond.v6)
poss1 = possible_bond_types(bond) # a list of strings which are bond-type names, in order of increasing bond order
poss, permitted1, permitted2 = possible_bond_types_for_elements(bond) # new feature 060703
##e could put weird ones (graphitic, carbomeric) last and/or in parens, in subtext below
types = list(poss)
for btype in poss1:
if btype not in types:
print "should never happen: %r not in %r" % (btype, poss) # intentional: "not in types" above, "not in poss" here
types.append(btype)
if btype_now not in types:
types.append(btype_now) # put this one last, since it's illegal; warning for it is computed later
assert len(types) > 0
# types is the list of bond types for which to make menu items, in order;
# now make them, and figure out which ones are checked and/or disabled;
# we disable even legal ones iff there is only one bond type in types
# (which means, if current type is illegal, it is disabled and the sole legal type is enabled).
disable_legal_types = (len(types) == 1)
res = []
for btype in types: # include current value even if it's illegal
subtext = "%s bond" % btype # this string might be extended below
checked = (btype == btype_now)
command = ( lambda arg1=None, arg2=None, btype=btype, bond=bond: apply_btype_to_bond(btype, bond) )
warning = warning2 = ""
if btype not in poss:
# illegal btype (note: it will be the current one, and thus be the only checked one)
warning = "illegal"
disabled = True
else:
# legal btype
warning = bond_type_warning(bond, btype) # might be "" (or None??) for no warning
if btype not in poss1:
# new feature 060703
# try1: too long and boring (when in most menu entries):
## warning2 = "would change atomtypes"
# try2: say which atomtypes we'd change to, in same order of atoms as the bond name
v6 = v6_from_btype(btype)
atype1 = best_atype(bond.atom1, permitted1[v6])
atype2 = best_atype(bond.atom2, permitted2[v6])
in_order = [atype1, atype2] ##e stub; see code in Bond.__str__
warning2 = "%s<->%s" % tuple([atype.name for atype in in_order])
disabled = disable_legal_types
# might change this if some neighbor bonds are locked (nim), or if we want to show non-possible choices
if warning2:
subtext += " (%s)" % warning2
if warning:
subtext += " (%s)" % warning
res.append(( subtext, command,
disabled and 'disabled' or None,
checked and 'checked' or None ))
##e if >1 legal value, maybe we should add a toggleable checkmark item to permit "locking" the bond to its current bond type;
# this won't be needed until we have better bond inference (except maybe for bondpoints),
# since right now [still true 060703] we never alter real bond types except when the user does an action on that specific bond.
if not bond.is_open_bond():
## command = ( lambda arg1 = None, arg2 = None, bond = bond: bond.bust() )
command = ( lambda bond = bond: delete_bond(bond) )
res.append(None) # separator
res.append(("Delete Bond", command))
return res
def delete_bond(bond): #bruce 080228 to fix update bug reported by EricM
# see also: SelectAtoms_GraphicsMode.bondDelete
# (should we print to history like it does?)
assy = bond.atom1.molecule.assy
if assy.glpane.selobj is bond:
assy.glpane.selobj = None
bond.bust()
assy.changed()
assy.glpane.gl_update()
return
##def bond_type_submenu_spec(bond): #bruce 050705 (#e add options??); probably not used in Alpha6
## """Return a menu_spec for changing the bond_type of this bond,
## or if that is unchangeable, a disabled menu item for displaying the type.
## """
## v6 = bond.v6
## btype0 = btype_from_v6(v6)
## poss = possible_bond_types(bond) # a list of strings which are bond-type names
## ##e could put weird ones (graphitic, carbomeric) last and/or in parens, in subtext below
## maintext = 'Bond Type: %s' % btype0
## if btype0 not in poss or len(poss) > 1:
## # use the menu
## submenu = []
## for btype in poss: # don't include current value if it's illegal
## subtext = btype
## warning = bond_type_warning(bond, btype)
## if warning:
## subtext += " (%s)" % warning
## command = ( lambda arg1=None, arg2=None, btype=btype, bond=bond: apply_btype_to_bond(btype, bond) )
## checked = (btype == btype0)
## disabled = False # might change this if some neighbor bonds are locked, or if we want to show non-possible choices
## submenu.append(( subtext, command,
## disabled and 'disabled' or None,
## checked and 'checked' or None ))
## ##e if >1 legal value could add checkmark item to permit "locking" this bond type
## return ( maintext, submenu)
## else:
## # only one value is possible, and it's the current value -- just show it
## return ( maintext, noop, 'disabled' )
## pass
#bruce 060523 unfinished aspects of new more permissive bondtype changing: ####@@@@
# - verify it can't be applied to open bonds from dashboard tools (since not safe yet)
# - make sure changing atomtypes doesn't remove bond (if open)
# (possible implem of that: maybe remove it, set_atomtype, then add it back, then remake singlets?)
# - then it's safe to let bond cmenu have more entries (since they might be open bonds)
def apply_btype_to_bond(btype,
bond,
allow_remake_bondpoints = True,
suppress_history_message = False): #bruce 060703 added allow_remake_bondpoints for bug 833-1
"""
Apply the given bond-type name (e.g. 'single') to the given bond, iff this is permitted by its atomtypes
(or, new feature 060523, if it's permitted by its real atoms' possible atomtypes and their number of real bonds),
and do whatever inferences are presently allowed [none are implemented as of 050727].
Emit an appropriate history message. Do appropriate invals/updates.
[#e should the inference policy and/or some controlling object be another argument? Maybe even a new first arg 'self'?]
@param suppress_history_message: If True, it quietly converts the bondtypes
without printing any history message.
"""
# Note: this can be called either from a bond's context menu, or by using a Build mode dashboard tool to click on bonds
# (or bondpoints as of 060702) and immediately change their types.
#This flag will be returned by this function to tell the caller whether the
#bond type of the given bond was changed
bond_type_changed = True
v6 = v6_from_btype(btype)
oldname = quote_html( str(bond) )
def changeit(also_atypes = None):
if v6 == bond.v6:
bond_type_changed = False
if not suppress_history_message:
env.history.message( "bond type of %s is already %s" % (oldname, btype))
else:
if also_atypes:
# change atomtypes first (not sure if doing this first matters)
atype1, atype2 = also_atypes
def changeatomtype(atom, atype):
if atom.atomtype is not atype:
if not suppress_history_message:
msg = "changed %r from %s to %s" % (atom,
atom.atomtype.name,
atype.name )
env.history.message(msg)
atom.set_atomtype(atype)
### note[ probably 060523]:
# if we're an open bond, we have to prevent this process from removing us!
# (this is nim, so we're not yet safe to offer on open bonds.
# Thus in fix for 833-1 [060703], atomtype changes are not allowed.)
pass
return # from changeatomtype
changeatomtype(bond.atom1, atype1)
changeatomtype(bond.atom2, atype2)
bond.set_v6(v6) # this doesn't affect anything else or do any checks ####k #####@@@@@ check that
##e now do inferences on other bonds
bond.changed() ###k needed?? maybe it's now done by set_v6??
if not suppress_history_message:
env.history.message( "changed bond type of %s to %s" % (oldname,
btype))
###k not sure if it does gl_update when needed... how does menu use of this do that?? ###@@@
return # from changeit
poss = poss1 = possible_bond_types(bond) # only includes the ones which don't change the atomtypes -- try these first
if btype in poss1:
changeit()
return bond_type_changed
# otherwise figure out if we can change the atomtypes to make this work.
# (The following code is predicted to work for either real or open bonds,
# but it is not safe to offer on open bonds for other reasons (commented above in changeatomtype).
# But we'll still figure out the situation, so the history message can be more useful.)
if 1:
# this is needed for allow_remake_bondpoints,
# or for history advice about what that could have permitted:
poss2, permitted1, permitted2 = possible_bond_types_for_elements(bond)
# the only purpose of having the whole sequence poss2
# (not just one element of it, equal to btype) is the error message
if btype in poss2:
atype1 = best_atype(bond.atom1, permitted1[v6])
atype2 = best_atype(bond.atom2, permitted2[v6])
if allow_remake_bondpoints:
poss = poss2 # poss is whichever of poss1 or poss2 was actually allowed
if btype in poss2:
changeit((atype1, atype2))
return bond_type_changed
# It failed, but a variety of situations should be handled in the error message.
# For error messages, sort them all the same way.
poss1.sort()
poss2.sort()
poss.sort() #k not really needed, it's same mutable list, but keep this in case someone changes that
if poss2 == poss : # note, this happens if poss2 == poss1, or if they differ but allow_remake_bondpoints is true
# permitting changing of atomtypes wouldn't make any difference
if not suppress_history_message:
msg = "can't change bond type of %s to %s" % (oldname, btype)
msg2 = " -- permitted types are %s" % (poss)
#e improve message -- %s of list looks like repr (for strings too)
env.history.message( orangemsg( msg) + msg2 )
bond_type_changed = False
elif btype in poss2:
if allow_remake_bondpoints:
print_compact_stack( "bug: allow_remake_bondpoints should not be true here: " )
# the only reason we refused is that the UI won't allow remaking of bondpoints;
# explain what the user would have to do to make it work (using the things computed above as if it had been permitted)
# (as of 060703 this happens only when you click a bond type changing tool on a bondpoint,
# but following code will try to cover this for a real bond as well)
unless = ""
for atom, atype in [(bond.atom1, atype1), (bond.atom2, atype2)]: ##e ideally, in same order as printed in bond name
if atype != atom.atomtype:
if atom.is_singlet():
# should never happen
if env.debug:
print "debug: bug: %r is bondpoint but user is advised to change its atomtype" % atom
if not unless:
unless = "change atomtype of %s to %s" % (atom, atype.name)
else:
# this is not expected to ever happen, when called from UI as of 060703; it's untested ##@@
unless += ", and of %s to %s" % (atom, atype.name)
msg = "can't change bond type of %s to %s, " % (oldname, btype,)
bond_type_changed = False
if unless:
unless_msg = greenmsg( "unless you %s" % (unless,) )
else:
unless_msg = redmsg( "due to a bug")
if not suppress_history_message:
env.history.message( orangemsg( msg) + ( unless_msg) )
else:
# changing atomtypes makes a difference, but either way you're not allowed to change to this bond type
if allow_remake_bondpoints:
print_compact_stack( "bug: allow_remake_bondpoints should not be true here: " )
extra = complement_sequences(poss2, poss1)
if not extra:
print_compact_stack( "bug: extra should not be empty here: " )
msg = "can't change bond type of %s to %s" % (oldname, btype)
msg2 = " -- permitted types are %s, or %s if you change atomtypes" % (poss1, extra)
#e improve message -- %s of list looks like repr (for strings too)
bond_type_changed = False
if not suppress_history_message:
env.history.message( orangemsg( msg) + msg2 )
return bond_type_changed # from apply_btype_to_bond
def best_atype(atom, atomtypes = None): #bruce 060523
"""
Which atomtype for atom is best, among the given or possible ones,
where best means the fewest number of bondpoints need removing to get to it?
(Break ties by favoring current one (never matters as presently called, 060523)
or those earlier in the list.)
"""
# I don't think we have to consider types for which bondpoints would be *added*...
# but in case we do, let those be a last resort, but for them, best means fewest added.
# Note: this is related to Atom.best_atomtype_for_numbonds, but that has a quite different cost function
# since it assumes it's not allowed to change the number of bondpoints, only to compare severity of valence errors.
atomtypes = atomtypes or atom.element.atomtypes
atomhas = len(atom.bonds)
def cost(atype):
atypewants = atype.numbonds
nremove = atomhas - atypewants
if nremove >= 0:
cost1 = nremove
else:
nadd = - nremove
cost1 = 100 + nadd
if atype is atom.atomtype:
cost2 = -1
else:
cost2 = atomtypes.index(atype)
return (cost1, cost2)
costitems = [(cost(atype), atype) for atype in atomtypes]
costitems.sort()
return costitems[0][1]
_BOND_DIR_NAMES = {0:"unset", 1:"right", -1:"left"} # see also _BOND_DIR_TEXTS #e refile
def bond_direction_submenu_contents(bond, quat): #bruce 070415
res = []
left_atom = bond_left_atom(bond, quat)
direction = bond.bond_direction_from(left_atom)
## # order will be: if this bond has a dir: this dir, opp dir, unset;
## # or if not: unset, right, left. So current dir is always first. Not sure this is good! In fact, I'm sure it's not!
## if direction:
## dir_order = [direction, - direction, 0]
## else:
## dir_order = [0, 1, -1]
dir_order = [1, -1, 0]
for dir in dir_order:
text = "make it %s" % _BOND_DIR_NAMES[dir]
# how do we say concisely:
# "make the bond dirs all one way along entire strand,
# so they're going (eg) right, when they pass thru this bond"?
if dir == direction:
text += " (like this bond)" #e also use checkmark, or is that confusing since it's not a noop? for now, use it.
command = (lambda _guard = None, bond = bond, left_atom = left_atom, dir = dir:
bond.set_bond_direction_from(left_atom, dir, propogate = True))
checkmark = (dir == direction) and 'checked' or None
item = (text, command, checkmark)
res.append(item)
res.append(('set to fit minor groove (not implemented)', noop, 'disabled'))
return res
# end
| NanoCAD-master | cad/src/operations/bond_utils.py |
NanoCAD-master | cad/src/operations/__init__.py |
|
# Copyright 2004-2007 Nanorex, Inc. See LICENSE file for details.
"""
ops_motion.py -- various ways of moving or spatially distorting
selected atoms or chunks (and someday, attached jigs).
These operations don't create or destroy atoms or bonds.
@version: $Id$
@copyright: 2004-2007 Nanorex, Inc. See LICENSE file for details.
History:
bruce 050507 made this by collecting appropriate methods from class Part.
"""
from utilities.Log import greenmsg, redmsg
from platform_dependent.PlatformDependent import fix_plurals
from geometry.VQT import V, norm, Q, vlen, orthodist
import foundation.env as env
from math import pi
from utilities.debug import print_compact_traceback
from model.chunk import Chunk
from model.jigs import Jig
from model.jigs_motors import Motor
from analysis.ESP.ESPImage import ESPImage
from foundation.Group import Group
class ops_motion_Mixin:
"""
Mixin class for providing these methods to class Part
"""
###@@@ move/rot should be extended to apply to jigs too (and fit into some naming convention)
def movesel(self, offset):
"""
move selected chunks and jigs in space
"""
movables = self.getSelectedMovables()
self.translateSpecifiedMovables(offset, movables = movables)
def translateSpecifiedMovables(self, offset, movables =()):
"""
Translate the specified movables.
@param movables: a list of movables (default value is empty tuple)
@type movables: list
"""
for m in movables:
self.changed()
m.move(offset)
def rotsel(self, quat):
"""
Rotate selected chunks/jigs in space. [Huaicai 8/30/05: Fixed the problem of each rotating
around its own center, they will now rotate around their common center]
"""
movables = self.getSelectedMovables()
self.rotateSpecifiedMovables(quat, movables = movables)
return
def rotateSpecifiedMovables(self, quat, movables =(), commonCenter = None):
"""
Rotate the movables specified in the 'movables' list.
(Rotated as a unit)
@param quat: Quaternion for the rotation.
@param movables: A list of movables. These movables will be
rotated around a common axis
@type movables: list
"""
numMovables = len(movables)
if commonCenter is None:
# Find the common center of all selected chunks to fix bug 594
#--Huaicai 8/30/05
comCenter = V(0.0, 0.0, 0.0)
if numMovables:
for m in movables:
comCenter += m.center
comCenter /= numMovables
else:
comCenter = commonCenter
# Move the selected chunks
for m in movables:
self.changed() #Not sure if this can be combined into one call
# Get the moving offset because of the rotation around each
# movable's own center
rotOff = quat.rot(m.center - comCenter)
rotOff = comCenter - m.center + rotOff
m.move(rotOff)
m.rot(quat)
def Stretch(self):
"""
stretch a Chunk
"""
mc = env.begin_op("Stretch")
try:
cmd = greenmsg("Stretch: ")
if not self.selmols:
msg = redmsg("No selected chunks to stretch")
env.history.message(cmd + msg)
else:
self.changed()
for m in self.selmols:
m.stretch(1.1)
self.o.gl_update()
# Added history message. Mark 050413.
info = fix_plurals( "Stretched %d chunk(s)" % len(self.selmols))
env.history.message( cmd + info)
finally:
env.end_op(mc)
return
def Invert(self):
"""
Invert the atoms of the selected chunk(s) around the chunk centers
"""
mc = env.begin_op("Invert")
cmd = greenmsg("Invert: ")
if not self.selmols:
msg = redmsg("No selected chunks to invert")
env.history.message(cmd + msg)
return
self.changed()
for m in self.selmols:
m.stretch(-1.0)
self.o.gl_update()
info = fix_plurals( "Inverted %d chunk(s)" % len(self.selmols))
env.history.message( cmd + info)
env.end_op(mc) #e try/finally?
def Mirror(self):
"""
Mirror the selected chunk(s) about a selected grid plane.
"""
cmd = greenmsg("Mirror: ")
#ninad060814 this is necessary to fix a bug. Otherwise program will
#crash if you try to mirror when the top node of the part
#(main part of clipboard) is selected
if self.topnode.picked:
self.topnode.unpick_top()
self.mirrorJigs = self.getQualifiedMirrorJigs()
jigCounter = len(self.mirrorJigs)
if jigCounter < 1:
msg1 = "No mirror plane selected."
msg2 = " Please select a Reference Plane or a Grid Plane first."
msg = redmsg(msg1+msg2)
instr1 = "(If it doesn't exist, create it using"
instr2 = "<b>Insert > Reference Geometry menu </b> )"
instruction = instr1 + instr2
env.history.message(cmd + msg + instruction)
return
elif jigCounter >1:
msg = redmsg("More than one plane selected. Please select only one plane and try again")
env.history.message(cmd + msg )
return
for j in self.mirrorJigs:
j.unpick()
copiedObject = self.o.assy.part.copy_sel_in_same_part()
# ninad060812 Get the axis vector of the Grid Plane. Then you need to
#rotate the inverted chunk by pi around this axis vector
self.mirrorAxis = self.mirrorJigs[0].getaxis()
if isinstance(copiedObject, Chunk):
copiedObject.name = copiedObject.name + "-Mirror"
self._mirrorChunk(copiedObject)
return
elif isinstance(copiedObject, Group):
copiedObject.name = "Mirrored Items"
def mirrorChild(obj):
if isinstance(obj, Chunk):
self._mirrorChunk(obj)
elif isinstance(obj, Jig):
self._mirrorJig(obj)
copiedObject.apply2all(mirrorChild)
return
def _mirrorChunk(self, chunkToMirror):
"""
Converts the given chunk into its own mirror.
@param chunkToMirror: The chunk that needs to be converted into its own
mirror chunk.
@type chunkToMirror: instance of class Chunk
@see: self.Mirror
"""
m = chunkToMirror
# ninad060813 Following gives an orthogonal distance between the
#chunk center and mirror plane.
self.mirrorDistance, self.wid = orthodist(m.center,
self.mirrorAxis,
self.mirrorJigs[0].center)
# @@@@ ninad060813 This moves the mirror chunk on the other side of
# the mirror plane. It surely moves the chunk along the axis of the
# mirror plane but I am still unsure if this *always* moves the
# chunk on the other side of the mirror.
#Probably the 'orthodist' function has helped me here??
m.move(2*(self.mirrorDistance)*self.mirrorAxis)
m.stretch(-1.0)
m.rot(Q(self.mirrorAxis, pi))
return
def _mirrorJig(self, jigToMirror):
"""
Converts the given jig into its own mirror. If the jig is a motor,
it also reverses its direction.
@param jigToMirror: The jig that needs to be converted into its own
mirror jig.
@type jigToMirror: instance of class Jig
@see: self.Mirror
"""
j = jigToMirror
# ninad060813 This gives an orthogonal distance between the chunk
# center and mirror plane.
#Fixed bug 2503.
if not (isinstance(j, Motor) or isinstance(j, ESPImage)):
return
self.mirrorDistance, self.wid = orthodist(j.center, self.mirrorAxis,
self.mirrorJigs[0].center)
j.move(2*(self.mirrorDistance)*self.mirrorAxis)
j.rot(Q(self.mirrorAxis, pi))
#Reverse the direction of Linear and Rotary motor for correct
#mirror operation
if isinstance(j, Motor):
j.reverse_direction()
return
def getQualifiedMirrorJigs(self):
"""
Returns a list of objects that can be used as a
reference in Mirror Feature. (referece plane and grid planes are valid
objects). Only the first object in this list is used for mirror.
See Mirror method for details
"""
jigs = self.assy.getSelectedJigs()
mirrorJigs = []
for j in jigs:
if j.mmp_record_name is "gridplane" or j.mmp_record_name is "plane":
mirrorJigs.append(j)
return mirrorJigs
def align_NEW(self):
"""
Align the axes of the selected movables to the axis of the movable
that is placed at the highest order in the Model Tree
"""
#@@This is not called yet.
#method *always* uses the MT order to align chunks or jigs
#This supports jigs (including reference planes) but it has following
#bug -- It always uses the selected movable that is placed highest
#in the Model Tree, as the reference axis for alignment. (Instead
#it should align to the 'first selected movable'
#(this doesn't happen (or very rarely happens) in old align method where
#'selmols' is used.)
cmd = greenmsg("Align to Common Axis: ")
movables = self.assy.getSelectedMovables()
for m in movables:
print "movable =", m.name
numMovables = len(movables)
if len(movables) < 2:
msg = redmsg("Need two or more selected chunks to align")
env.history.message(cmd + msg)
return
self.changed()
try:
firstAxis = movables[0].getaxis()
for m in movables[1:]:
m.rot(Q(m.getaxis(),firstAxis))
self.o.gl_update()
except:
print_compact_traceback ("bug: selected movable object doesn't have an \
axis")
msg = redmsg("bug: selected movable object doesn't have an axis")
env.history.message(cmd + msg)
return
self.o.gl_update()
info = fix_plurals( "Aligned %d item(s)" % (len(movables) - 1) ) \
+ " to axis of %s" % movables[0].name
env.history.message( cmd + info)
return
def align(self):
"""
"""
cmd = greenmsg("Align to Common Axis: ")
if len(self.selmols) < 2:
msg = redmsg("Need two or more selected chunks to align")
env.history.message(cmd + msg)
return
self.changed() #bruce 050131 bugfix or precaution
#ax = V(0,0,0)
#for m in self.selmols:
# ax += m.getaxis()
#ax = norm(ax)
ax = self.selmols[0].getaxis() # Axis of first selected chunk
for m in self.selmols[1:]:
m.rot(Q(m.getaxis(),ax))
self.o.gl_update()
info = fix_plurals( "Aligned %d chunk(s)" % (len(self.selmols) - 1) ) \
+ " to chunk %s" % self.selmols[0].name
env.history.message( cmd + info)
#Ninad 060904 The following is not called from UI. Need to see if this is useful to the user.
def alignPerpendicularToPlane(self):
"""
Aligns the axes of selected jigs or chunks perpendicular to a reference plane
"""
cmd = greenmsg("Align to Plane:")
referencePlaneList = self.getQualifiedReferencePlanes()
jigCounter = len(referencePlaneList)
self.changed()
if jigCounter:
referencePlane = referencePlaneList[0] #ninad060904 If more than 1 ref planes are selected, it selectes the first in the order in the mmp file
if jigCounter < 1:
msg = redmsg("Please select a plane first.")
instruction = " Planes can also be created using the <b>Insert > Plane</b> command."
env.history.message(cmd + msg + instruction)
return
movables = self.assy.getSelectedMovables()
numMovables = len(movables)
#print len(movables)
if numMovables >1:
for obj in movables:
if obj is referencePlane:
pass
refAxis = referencePlane.getaxis()
obj.rot(Q(obj.getaxis(),refAxis))
self.o.gl_update()
else:
msg = redmsg("No chunks or movable jigs selected to align perpendicular to the reference plane.")
env.history.message(cmd + msg + instruction)
return
def getQualifiedReferencePlanes(self, jigs): #Ninad 060904
"""
Returns a list of jigs that can be used a reference plane in align to plane feature.
"""
referencePlaneList = []
for j in jigs:
if j.mmp_record_name is "gridplane":
referencePlaneList += [j]
return referencePlaneList
def alignmove(self):
cmd = greenmsg("Move to Axis: ")
if len(self.selmols) < 2:
msg = redmsg("Need two or more selected chunks to align")
env.history.message(cmd + msg)
return
self.changed()
#ax = V(0,0,0)
#for m in self.selmols:
# ax += m.getaxis()
#ax = norm(ax)
ax = self.selmols[0].getaxis() # Axis of first selected chunk
ctr = self.selmols[0].center # Center of first selected chunk
for m in self.selmols[1:]:
m.rot(Q(m.getaxis(),ax))
m.move(ctr-m.center) # offset
self.o.gl_update()
info = fix_plurals( "Aligned %d chunk(s)" % (len(self.selmols) - 1) ) \
+ " to chunk %s" % self.selmols[0].name
env.history.message( cmd + info)
pass # end of class ops_motion_Mixin
# end
| NanoCAD-master | cad/src/operations/ops_motion.py |
# Copyright 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
jigmakers_Mixin.py -- provides a mixin class to be inherited by class Part,
for providing operations for making specific kinds of Jigs, and associated
public helper functions.
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
History:
071023 bruce split this out of jigs.py, to remove some needless
or misleading import dependencies on jigs.py.
"""
import sys
from PyQt4.Qt import QMessageBox
from utilities.Log import redmsg, greenmsg, orangemsg
import foundation.env as env
from model.jigs import Anchor
from model.jigs import Stat
from model.jigs import Thermo
from model.jigs import AtomSet
class jigmakers_Mixin:
"""
Provide Jig-making methods to class Part.
These should be refactored into some common code
and new methods in the specific Jig subclasses.
"""
def makeRotaryMotor(self):
"""
Creates a Rotary Motor edit controller, whhich in turn creates a
rotory motor connected to the selected atoms.
"""
atoms = self.win.assy.selatoms_list()
#This check fixes bug 2697. Simply don't enter the command (to create
#a new motor), if the there aren't enough atoms selected.
if len(atoms) < 2:
logMessage = "To create a rotary motor, you must select at least" \
" two atoms. Rotary motor not created"
env.history.message(redmsg(logMessage))
return
commandSequencer = self.assy.w.commandSequencer
commandSequencer.userEnterCommand('ROTARY_MOTOR')
assert commandSequencer.currentCommand.commandName == 'ROTARY_MOTOR'
commandSequencer.currentCommand.runCommand()
def makeLinearMotor(self):
"""
Creates a Linear Motor edit controller, which in turn creates a
linear motor connected to the selected atoms.
"""
atoms = self.win.assy.selatoms_list()
#This check fixes bug 2697. Simply don't enter the command (to create
#a new motor), if the there aren't enough atoms selected.
if len(atoms) < 1:
logMessage = "To create a linear motor, you must select at least" \
" one atom. Linear motor not created"
env.history.message(redmsg(logMessage))
return
commandSequencer = self.assy.w.commandSequencer
commandSequencer.userEnterCommand('LINEAR_MOTOR')
assert commandSequencer.currentCommand.commandName == 'LINEAR_MOTOR'
commandSequencer.currentCommand.runCommand()
def makegamess(self):
"""
Makes a GAMESS jig from the selected chunks or atoms.
"""
# [mark 2007-05-07 modified docstring]
if sys.platform == "win32":
gms_str = "PC GAMESS"
else:
gms_str = "GAMESS"
cmd = greenmsg(gms_str + ": ")
atoms = []
# Get a list of atoms from the selected chunks or atoms.
atoms = self.assy.selected_atoms_list(
include_atoms_in_selected_chunks = True)
if not atoms:
msg = "At least one atom must be selected to create a " + \
gms_str + " jig."
env.history.message(cmd + redmsg(msg))
return
# Make sure that no more than 200 atoms are selected.
nsa = len(atoms)
if nsa > 200:
msg = str(nsa) + " atoms selected. The limit is 200."
env.history.message(cmd + redmsg(msg))
return
# Bug 742. Mark 050731.
if nsa > 50:
ret = QMessageBox.warning( self.assy.w, "Too many atoms?",
gms_str + " jigs with more than 50 atoms may take an\n"
"excessively long time to compute (days or weeks).\n"
"Are you sure you want to continue?",
"&Continue", "Cancel", "",
0, 1 )
if ret == 1: # Cancel
return
from analysis.GAMESS.jig_Gamess import Gamess
m = Gamess(self.assy, atoms)
m.edit()
#bruce 050701 split edit method out of the constructor, so the
# dialog doesn't show up when the jig is read from an mmp file
if m.cancelled: # User hit 'Cancel' button in the jig dialog.
env.history.message(cmd + "Cancelled")
return
self.unpickall_in_GLPane()
self.place_new_jig(m)
env.history.message(cmd + gms_str + " jig created")
self.assy.w.win_update()
def makeAnchor(self):
"""
Anchors the selected atoms so that they will not move
during a minimization or simulation run.
"""
cmd = greenmsg("Anchor: ")
atoms = self.assy.selatoms_list()
if not atoms:
msg = "You must select at least one atom to create an Anchor."
env.history.message(cmd + redmsg(msg))
return
# Print warning if over 200 atoms are selected.
if atom_limit_exceeded_and_confirmed(self.assy.w,
len(atoms),
limit=200):
return
m = Anchor(self.assy, atoms)
self.unpickall_in_GLPane()
self.place_new_jig(m)
env.history.message(cmd + "Anchor created")
self.assy.w.win_update()
def makestat(self):
"""
Attaches a Langevin thermostat to the single atom selected.
"""
cmd = greenmsg("Thermostat: ")
atoms = self.assy.selatoms_list()
if not atoms:
msg = "You must select an atom on the chunk you want to " \
"associate with a Thermostat."
env.history.message(cmd + redmsg(msg))
return
# Make sure only one atom is selected.
if len(atoms) != 1:
msg = "To create a Thermostat, only one atom may be selected."
env.history.message(cmd + redmsg(msg))
return
m = Stat(self.assy, atoms)
self.unpickall_in_GLPane()
self.place_new_jig(m)
env.history.message(cmd + "Thermostat created")
self.assy.w.win_update()
def makethermo(self):
"""
Attaches a thermometer to the single atom selected.
"""
cmd = greenmsg("Thermometer: ")
atoms = self.assy.selatoms_list()
if not atoms:
msg = "You must select an atom on the chunk you want to " \
"associate with a Thermometer."
env.history.message(cmd + redmsg(msg))
return
# Make sure only one atom is selected.
if len(atoms) != 1:
msg = "To create a Thermometer, only one atom may be selected."
env.history.message(cmd + redmsg(msg))
return
m = Thermo(self.assy, atoms)
self.unpickall_in_GLPane()
self.place_new_jig(m)
env.history.message(cmd + "Thermometer created")
self.assy.w.win_update()
def makeGridPlane(self):
cmd = greenmsg("Grid Plane: ")
atoms = self.assy.selatoms_list()
if not atoms:
msg = "You must select 3 or more atoms to create a Grid Plane."
env.history.message(cmd + redmsg(msg))
return
# Make sure only one atom is selected.
if len(atoms) < 3:
msg = "To create a Grid Plane, at least 3 atoms must be selected."
env.history.message(cmd + redmsg(msg))
return
from model.jigs_planes import GridPlane
m = GridPlane(self.assy, atoms)
m.edit()
if m.cancelled: # User hit 'Cancel' button in the jig dialog.
env.history.message(cmd + "Cancelled")
return
self.unpickall_in_GLPane()
self.place_new_jig(m)
#After placing the jig, remove the atom list from the jig.
m.atoms = []
env.history.message(cmd + "Grid Plane created")
self.assy.w.win_update()
return
def makeESPImage(self):
cmd = greenmsg("ESP Image: ")
atoms = self.assy.selatoms_list()
if len(atoms) < 3:
msg = "You must select at least 3 atoms to create an ESP Image."
env.history.message(cmd + redmsg(msg))
return
from analysis.ESP.ESPImage import ESPImage
m = ESPImage(self.assy, atoms)
m.edit()
if m.cancelled: # User hit 'Cancel' button in the jig dialog.
env.history.message(cmd + "Cancelled")
return
self.unpickall_in_GLPane()
self.place_new_jig(m)
# After placing the jig, remove the atom list from the jig.
m.atoms = []
env.history.message(cmd + "ESP Image created.")
self.assy.w.win_update()
return
def makeAtomSet(self):
cmd = greenmsg("Atom Set: ")
atoms = self.assy.selatoms_list()
if not atoms:
msg = "You must select at least one atom to create an Atom Set."
env.history.message(cmd + redmsg(msg))
return
# Print warning if over 200 atoms are selected.
if atom_limit_exceeded_and_confirmed(self.assy.w,
len(atoms),
limit = 200):
return
m = AtomSet(self.assy, atoms)
self.place_new_jig(m)
m.pick() # This is required to display the Atom Set wireframe boxes.
env.history.message(cmd + "Atom Set created.")
self.assy.w.win_update()
return
def makeMeasureDistance(self):
"""
Creates a Measure Distance jig between two selected atoms.
"""
cmd = greenmsg("Measure Distance Jig: ")
atoms = self.assy.selatoms_list()
if len(atoms) != 2:
msg = "You must select 2 atoms to create a Distance jig."
env.history.message(cmd + redmsg(msg))
return
from model.jigs_measurements import MeasureDistance
d = MeasureDistance(self.assy, atoms)
self.unpickall_in_GLPane()
self.place_new_jig(d)
env.history.message(cmd + "Measure Distance jig created")
self.assy.w.win_update()
def makeMeasureAngle(self):
"""
Creates a Measure Angle jig connected to three selected atoms.
"""
cmd = greenmsg("Measure Angle Jig: ")
atoms = self.assy.selatoms_list()
if len(atoms) != 3:
msg = "You must select 3 atoms to create an Angle jig."
env.history.message(cmd + redmsg(msg))
return
from model.jigs_measurements import MeasureAngle
d = MeasureAngle(self.assy, atoms)
self.unpickall_in_GLPane()
self.place_new_jig(d)
env.history.message(cmd + "Measure Angle jig created")
self.assy.w.win_update()
def makeMeasureDihedral(self):
"""
Creates a Measure Dihedral jig connected to three selected atoms.
"""
cmd = greenmsg("Measure Dihedral Jig: ")
atoms = self.assy.selatoms_list()
if len(atoms) != 4:
msg = "You must select 4 atoms to create a Dihedral jig."
env.history.message(cmd + redmsg(msg))
return
from model.jigs_measurements import MeasureDihedral
d = MeasureDihedral(self.assy, atoms)
self.unpickall_in_GLPane()
self.place_new_jig(d)
env.history.message(cmd + "Measure Dihedral jig created")
self.assy.w.win_update()
pass # end of class jigmakers_Mixin
# ==
def atom_limit_exceeded_and_confirmed(parent, natoms, limit = 200):
"""
Displays a warning message if 'natoms' exceeds 'limit'.
Returns False if the number of atoms does not exceed the limit or if the
user confirms that the jigs should still be created even though the limit
was exceeded.
If parent is 0, the message box becomes an application-global modal dialog
box.
If parent is a widget, the message box becomes modal relative to parent.
"""
if natoms < limit:
return False # Atom limit not exceeded.
wmsg = "Warning: Creating a jig with " + str(natoms) \
+ " atoms may degrade performance.\nDo you still want to add the jig?"
dialog = QMessageBox("Warning", wmsg,
QMessageBox.Warning,
QMessageBox.Yes,
QMessageBox.No,
QMessageBox.NoButton,
parent)
# We want to add a "Do not show this message again." checkbox to the dialog
# like this:
# checkbox = QCheckBox("Do not show this message again.", dialog)
# The line of code above works, but places the checkbox in the upperleft
# corner of the dialog, obscuring important text. I'll fix this later.
# Mark 051122.
ret = dialog.exec_()
if ret != QMessageBox.Yes:
return True
# Print warning msg in history widget whenever the user adds new jigs with
# more than 'limit' atoms.
wmsg = "Warning: " + str(natoms) + " atoms selected. A jig with more " \
"than " + str(limit) + " atoms may degrade performance."
env.history.message(orangemsg(wmsg))
return False # from atom_limit_exceeded_and_confirmed
# end
| NanoCAD-master | cad/src/operations/jigmakers_Mixin.py |
# Copyright 2006-2008 Nanorex, Inc. See LICENSE file for details.
"""
chem_patterns.py -- finding simple patterns of bonded atoms
see also ND-1's pattern matching facility, which is faster and more general
@author: Bruce
@version: $Id$
@copyright: 2006-2008 Nanorex, Inc. See LICENSE file for details.
Todo later:
- really turn O into 8 and reverse rules as needed
- might need to also check bondtypes or atomtypes someday... problematic since they might be wrong to start with.
- might want to also select bad-valence atoms, or have another command for that.
- might need a few more kinds of patterns, like one for just 2 atoms... wait and see what's suggested.
"""
import foundation.env as env
import utilities.debug as debug
from platform_dependent.PlatformDependent import fix_plurals
cmdname = "Select Bad Atoms"
def compile_patterns():
## bad_patterns = [('O', 'O', 'O')] # this could be any list of triples of element symbols, and could be an argument.
bad_patterns_compiled = [(8,8,8)] # corresponding element numbers
# (easily computed from the above using PeriodicTable.getElement(sym).eltnum)
# if any are not symmetric, we should compile them in both directions, e.g. if one was OON we'd also put NOO in this table.
bad_patterns_dict = dict([(p,p) for p in bad_patterns_compiled]) # dict version for fast lookup #e could easily optimize further
root_eltnums = {}
other_eltnums = {}
for o1e,ae,o2e in bad_patterns_compiled:
other_eltnums[o1e] = o1e
root_eltnums[ae] = ae
other_eltnums[o2e] = o2e
root_eltnums = root_eltnums.values() # a list might be faster to search than a dict, since the list is so short (I don't know)
other_eltnums = other_eltnums.values()
return bad_patterns_dict, root_eltnums, other_eltnums
def select_bad_atoms_cmd(widget): #bruce 060615 demo of simple "spelling checker" with hardcoded rules
"""
Out of the selected atoms or chunks, select the atoms which have "bad spelling".
"""
from utilities.Log import orangemsg, redmsg, greenmsg
greencmd = greenmsg("%s: " % cmdname)
orangecmd = orangemsg("%s: " % cmdname) # used when bad atoms are found, even though no error occurred in the command itself
win = env.mainwindow()
assy = win.assy
# 1. compile the patterns to search for. This could be done only once at init time, but it's fast so it doesn't matter.
bad_patterns_dict, root_eltnums, other_eltnums = compile_patterns()
# 2. Find the atoms to search from (all selected atoms, or atoms in selected chunks, are potential root atoms)
checked_in_what = "selected atoms or chunks"
contained = "contained"
atoms = {}
for m in assy.selmols:
atoms.update(m.atoms)
atoms.update(assy.selatoms)
if 0:
# do this if you don't like the feature of checking the entire model when nothing is selected.
if not atoms:
env.history.message(redmsg("%s: nothing selected to check." % cmdname))
return
else:
# if nothing is selected, work on the entire model.
if not atoms:
checked_in_what = "model"
contained = "contains"
for m in assy.molecules:
atoms.update(m.atoms)
if not atoms:
env.history.message(redmsg("%s: model contains no atoms." % cmdname))
return
pass
# 3. Do the search.
bad_triples = [] # list of bad triples of atoms (perhaps with overlap)
for a in atoms.itervalues():
ae = a.element.eltnum
if ae not in root_eltnums:
continue
checkbonds = []
for b in a.bonds:
o = b.other(a)
oe = o.element.eltnum
if oe in other_eltnums:
checkbonds.append((o,oe))
nbonds = len(checkbonds)
if nbonds > 1: #e we could easily optimize the following loop for fixed nbonds like 2,3,4... or code it in pyrex.
for i in xrange(nbonds-1):
for j in xrange(i+1,nbonds):
if (checkbonds[i][1], ae, checkbonds[j][1]) in bad_patterns_dict:
# gotcha!
bad_triples.append((checkbonds[i][0], a, checkbonds[j][0]))
if not bad_triples:
env.history.message(greencmd + "no bad patterns found in %s." % checked_in_what)
return
# done - deselect all, then select bad atoms if any. (Should we also deselect if we found no bad atoms, above??)
win.glpane.gl_update()
assy.unpickall_in_GLPane() #bruce 060721; was unpickatoms and unpickparts
bad_atoms = {}
for a1,a2,a3 in bad_triples:
bad_atoms[a1.key] = a1
bad_atoms[a2.key] = a2
bad_atoms[a3.key] = a3
reallypicked = 0
for a in bad_atoms.itervalues():
a.pick()
reallypicked += (not not a.picked) # check for selection filter effect
env.history.message(orangecmd + fix_plurals(
"%s %s %d bad atom(s), in %d bad pattern(s)." % \
(checked_in_what, contained, len(bad_atoms), len(bad_triples)) ))
if reallypicked < len(bad_atoms):
env.history.message( orangemsg("Warning: ") + fix_plurals(
"%d bad atom(s) were/was not selected due to the selection filter." % \
(len(bad_atoms) - reallypicked) ))
win.update_select_mode()
return
def initialize():
debug.register_debug_menu_command("%s" % cmdname, select_bad_atoms_cmd)
# end
| NanoCAD-master | cad/src/operations/chem_patterns.py |
# Copyright 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
ops_files.py - provides fileSlotsMixin for MWsemantics,
with file slot methods and related helper methods.
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
Note: most other ops_*.py files provide mixin classes for Part,
not for MWsemantics like this one.
History:
bruce 050907 split this out of MWsemantics.py.
[But it still needs major cleanup and generalization.]
mark 060730 removed unsupported slot method fileNew();
refined and added missing docstrings
"""
import re
import sys
import os
import shutil
import time
from PyQt4.Qt import Qt
from PyQt4.Qt import QFileDialog, QMessageBox, QString, QSettings
from PyQt4.Qt import QApplication
from PyQt4.Qt import QCursor
from PyQt4.Qt import QProcess
from PyQt4.Qt import QStringList
import foundation.env as env
from utilities import debug_flags
from platform_dependent.PlatformDependent import find_or_make_Nanorex_subdir
from model.assembly import Assembly
from operations.move_atoms_and_normalize_bondpoints import move_atoms_and_normalize_bondpoints
from simulation.runSim import readGromacsCoordinates
from files.pdb.files_pdb import insertpdb, writepdb
from files.pdb.files_pdb import EXCLUDE_BONDPOINTS, EXCLUDE_HIDDEN_ATOMS
from files.mmp.files_mmp import readmmp, insertmmp, fix_assy_and_glpane_views_after_readmmp
from files.amber_in.files_in import insertin
from files.ios.files_ios import exportToIOSFormat,importFromIOSFile
from graphics.rendering.povray.writepovfile import writepovfile
from graphics.rendering.mdl.writemdlfile import writemdlfile
from graphics.rendering.qutemol.qutemol import write_qutemol_pdb_file
from utilities.debug import print_compact_traceback
from utilities.debug import linenum
from utilities.debug import begin_timing, end_timing
from utilities.Log import greenmsg, redmsg, orangemsg, _graymsg
from utilities.prefs_constants import getDefaultWorkingDirectory
from utilities.prefs_constants import workingDirectory_prefs_key
from utilities.prefs_constants import toolbar_state_prefs_key
from utilities.debug_prefs import Choice_boolean_False
from utilities.debug_prefs import debug_pref
from utilities.constants import SUCCESS, ABORTED, READ_ERROR
from utilities.constants import str_or_unicode
from ne1_ui.FetchPDBDialog import FetchPDBDialog
from PyQt4.Qt import SIGNAL
from urllib import urlopen
debug_babel = False # DO NOT COMMIT with True
def set_waitcursor(on_or_off):
"""
For on_or_off True, set the main window waitcursor.
For on_or_off False, revert to the prior cursor.
[It might be necessary to always call it in matched pairs,
I don't know [bruce 050401].]
"""
if on_or_off:
QApplication.setOverrideCursor( QCursor(Qt.WaitCursor) )
else:
QApplication.restoreOverrideCursor() # Restore the cursor
return
debug_part_files = False #&&& Debug prints to history. Change to False after QA. [Mark 060703]
def _fileparse(name):
"""
DEPRECATED; in new code use filesplit (not compatible) instead.
Breaks name into directory, main name, and extension in a tuple.
Example:
_fileparse('~/foo/bar/gorp.xam') ==> ('~/foo/bar/', 'gorp', '.xam')
"""
# bruce 050413 comment: deprecated in favor of filesplit
# wware 060811: clean things up using os.path functions
# [REVIEW: did that change behavior in edge cases like "/"?
# bruce 071030 question]
# bruce 071030: renamed to make it private.
dir, x = os.path.split(name)
if not dir:
dir = '.'
fil, ext = os.path.splitext(x)
return dir + os.path.sep, fil, ext
def _convertFiletypesForMacFileDialog(filetypes):
"""
Returns a QString file type list that includes "- suffix"
in the name of each file type so that the extension (suffix)
will appear in the file dialog file type menu.
@note: Mac only.
@see: QFileDialog
"""
if sys.platform != "darwin":
return filetypes
def munge_ext(filetype):
"""
Return filetype with "- suffix " just before "(*.ext")
"""
if filetype.find("(*.*)") != -1:
return filetype # Was found.
# rsplit based on the last open paren
_tmpstr = filetype.rsplit("(",1)
# save the front part as the type description,
# also replace "." in the descriptor with a " " as extra "."'s can cause
# display problems on Mac computers.
type_descriptor = _tmpstr[0].strip().replace(".", " ")
# split based on the matching close paren
_tmpstr = _tmpstr[1].rsplit(")",1)
# save the end of the string for later
type_end = _tmpstr[1]
filter_string = _tmpstr[0]
# if the filter is empty or has parens, return it
if len(filter_string.strip()) < 1 or filter_string.count("(") > 0 or \
filter_string.count(")") > 0:
return filetype
# replace all occurances of ";" inside because we don't care about that
# for the purposes of splitting up the file types, then split on " "
typelist = filter_string.replace(";"," ").strip().split(" ")
# run a list comprehension to append the separate strings and remove
# "*" and "."
type_filter = "".join(\
[" "+x.replace('*','').replace('.','') for x in typelist]).strip()
#assemble the string back together in the new format
if type_descriptor != "":
filetype = "%s - %s (%s)%s" % \
(type_descriptor, type_filter, filter_string, type_end)
else:
filetype = "%s (%s)%s" % \
(type_filter, filter_string, type_end)
return filetype
separator = ";;"
filetypes = str(filetypes)
if filetypes.endswith(separator):
filetypeList = filetypes.split(separator)
else:
filetypeList = [filetypes, ""]
_newFileTypes = ""
# Rebuild and return the file type list string.
for ftype in filetypeList[:-1]:
_newFileTypes += munge_ext(ftype) + separator
_newFileTypes.rstrip(";")
return QString(_newFileTypes)
class fileSlotsMixin: #bruce 050907 moved these methods out of class MWsemantics
"""
Mixin class to provide file-related methods for class MWsemantics.
May not be safe to mix in to any other class, as it creates an
Assembly(self), and Assembly expects an MWsemantics. Has slot
methods and their helper methods.
"""
#UM 20080702: required for fetching pdb files from the internet
_pdbCode = ''
currentOpenBabelImportDirectory = None
currentImportDirectory = None
currentPDBSaveDirectory = None
currentFileInsertDirectory = None
currentFileOpenDirectory = None
def getCurrentFilename(self, extension = False):
"""
Returns the filename of the current part.
@param extension: If True, include the filename extension (i.e. .mmp).
The default is False.
@type extension: boolean
@return: the fullpath of the current part. If the part hasn't been
saved by the user yet, the fullpath returned will be
'$CURRENT_WORKING_DIRECTORY/Untitled'.
@rtype: string
@note: Callers typically call this method to get a fullpath to supply as
an argument to QFileDialog, which displays the basename in the
filename field. Normally, we'd like to include the extension
so that it is included in the filename field of the QFileDialog,
but when the user changes the filter (i.e. *.mmp to *.pdb),
the extension in the filename field does not get updated to
match the selected filter. This is a Qt bug and is why we do
not return the extension by default.
"""
if self.assy.filename:
fullpath, ext = os.path.splitext(self.assy.filename)
else:
# User hasn't saved the current part yet.
fullpath = \
os.path.join(env.prefs[workingDirectory_prefs_key],
"Untitled" )
if extension:
return fullpath + ".mmp" # Only MMP format is supported now.
else:
return fullpath
def fileOpenBabelImport(self): # Code copied from fileInsert() slot method. Mark 060731.
"""
Slot method for 'File > Import'.
"""
cmd = greenmsg("Import File: ")
# This format list generated from the Open Babel wiki page:
# http://openbabel.sourceforge.net/wiki/Babel#File_Formats
formats = _convertFiletypesForMacFileDialog(\
"All Files (*.*);;"\
"Molecular Machine Part (*.mmp);;"\
"Accelrys/MSI Biosym/Insight II CAR (*.car);;"\
"Alchemy (*.alc);;"\
"Amber Prep (*.prep);;"\
"Ball and Stick (*.bs);;"\
"Cacao Cartesian (*.caccrt);;"\
"CCC (*.ccc);;"\
"Chem3D Cartesian 1 (*.c3d1);;"\
"Chem3D Cartesian 2 (*.c3d2);;"\
"ChemDraw Connection Table (*.ct);;"\
"Chemical Markup Language (*.cml);;"\
"Chemical Resource Kit 2D diagram (*.crk2d);;"\
"Chemical Resource Kit 3D (*.crk3d);;"\
"CML Reaction (*.cmlr);;"\
"DMol3 coordinates (*.dmol);;"\
"Dock 3.5 Box (*.box);;"\
"FastSearching Index (*.fs);;"\
"Feature (*.feat);;"\
"Free Form Fractional (*.fract);;"\
"GAMESS Output (*.gam);;"\
"GAMESS Output (*.gamout);;"\
"Gaussian98/03 Output (*.g03);;"\
"Gaussian98/03 Output (*.g98);;"\
"General XML (*.xml);;"\
"Ghemical (*.gpr);;"\
"HyperChem HIN (*.hin);;"\
"Jaguar output (*.jout);;"\
"MacroModel (*.mmd);;"\
"MacroModel (*.mmod);;"\
"MDL MOL (*.mdl);;"\
"MDL MOL (*.mol);;"\
"MDL MOL (*.sd);;"\
"MDL MOL (*.sdf);;"\
"MDL RXN (*.rxn);;"\
"MOPAC Cartesian (*.mopcrt);;"\
"MOPAC Output (*.mopout);;"\
"MPQC output (*.mpqc);;"\
"MSI BGF (*.bgf);;"\
"NWChem output (*.nwo);;"\
"Parallel Quantum Solutions (*.pqs);;"\
"PCModel (*.pcm);;"\
"Protein Data Bank (*.ent);;"\
"Protein Data Bank (*.pdb);;"\
"PubChem (*.pc);;"\
"Q-Chem output (*.qcout);;"\
"ShelX (*.ins);;"\
"ShelX (*.res);;"\
"SMILES (*.smi);;"\
"Sybyl Mol2 (*.mol2);;"\
"TurboMole Coordinate (*.tmol);;"\
"UniChem XYZ (*.unixyz);;"\
"ViewMol (*.vmol);;"\
"XYZ cartesian coordinates (*.xyz);;"\
"YASARA YOB (*.yob);;")
if (self.currentOpenBabelImportDirectory == None):
self.currentOpenBabelImportDirectory = self.currentWorkingDirectory
import_filename = QFileDialog.getOpenFileName(self,
"Open Babel Import",
self.currentOpenBabelImportDirectory,
formats
)
if not import_filename:
env.history.message(cmd + "Cancelled")
return
if import_filename:
import_filename = str(import_filename)
if not os.path.exists(import_filename):
#bruce 050415: I think this should never happen;
# in case it does, I added a history message (to existing if/return code).
env.history.message( cmd + redmsg( "File not found: [ " + import_filename + " ]") )
return
# Anything that isn't an MMP file, we will import with Open Babel.
# Its coverage of MMP files is imperfect so it makes mistakes, but
# it would be good to use it enough to find those mistakes.
if import_filename[-3:] == "mmp":
try:
success_code = insertmmp(self.assy, import_filename)
except:
print_compact_traceback( "MWsemantics.py: fileInsert(): error inserting MMP file [%s]: " % import_filename )
env.history.message( cmd + redmsg( "Internal error while inserting MMP file: [ " + import_filename +" ]") )
else:
###TODO: needs history message to depend on success_code
# (since Insert can be cancelled or see a syntax error or
# read error). [bruce 080606 comment]
self.assy.changed() # The file and the part are not the same.
env.history.message( cmd + "MMP file inserted: [ " + os.path.normpath(import_filename) + " ]" ) # fix bug 453 item. ninad060721
# Is Open Babel better than our own? Someone should test it someday.
# Mark 2007-06-05
# elif import_filename[-3:] in ["pdb","PDB"]:
# try:
# insertpdb(self.assy, import_filename)
# except:
# print_compact_traceback( "MWsemantics.py: fileInsert(): error inserting PDB file [%s]: " % import_filename )
# env.history.message( redmsg( "Internal error while inserting PDB file: [ " + import_filename + " ]") )
# else:
# self.assy.changed() # The file and the part are not the same.
# env.history.message( cmd + "PDB file inserted: [ " + os.path.normpath(import_filename) + " ]" )
else: # All other filetypes, which will be translated to MMP and inserted into the part.
dir, fil, ext = _fileparse(import_filename)
tmpdir = find_or_make_Nanorex_subdir('temp')
mmpfile = os.path.join(tmpdir, fil + ".mmp")
result = self.launch_ne1_openbabel(in_format = ext[1:], infile = import_filename,
out_format = "mmp", outfile = mmpfile)
if result:
success_code = insertmmp(self.assy, mmpfile)
# Theoretically, we have successfully imported the file at this point.
# But there might be a warning from insertmmp.
# We'll assume it went well. Mark 2007-06-05
###TODO: needs history message to depend on success_code
# (since Insert can be cancelled or see a syntax error or
# read error). [bruce 080606 comment]
msg = cmd + "File imported: [ " + os.path.normpath(import_filename) + " ]"
env.history.message(msg)
else:
print "Open Babel had problem converting ", import_filename, "->", mmpfile
env.history.message(cmd + redmsg("File translation failed."))
self.glpane.scale = self.assy.bbox.scale()
self.glpane.gl_update()
self.mt.mt_update()
dir, fil = os.path.split(import_filename)
self.currentOpenBabelImportDirectory = dir
self.setCurrentWorkingDirectory(dir)
def fileIOSImport(self): #Urmi 20080618
"""
Slot method for 'File > Import'.
Imports IOS file outputted by Parabon Computation Inc
Optimizer into the NE-1 model.
"""
#IOS files do not have positional info, hence a structure has to be existing
# in the screen for this to work.
# Note that the optimized sequences only get assigned if the structure on
# the NE-1 window matches the structure in the IOS file
cmd = greenmsg("IOS Import: ")
#check if screen is empty
if hasattr(self.assy.part.topnode, 'members'):
numberOfMembers = len(self.assy.part.topnode.members)
else:
#Its a clipboard part, probably a chunk or a jig not contained in
#a group.
print "No support for clipboard at this time"
return
if numberOfMembers == 0:
msg = "IOS import aborted since there aren't any DNA strands in "\
"the current model."
from PyQt4.Qt import QMessageBox
QMessageBox.warning(self.assy.win, "Warning!", msg)
return
formats = \
"Extensive Markup Language (*.xml);;"
if (self.currentImportDirectory == None) :
self.currentImportDirectory = currentWorkingDirectory
import_filename = QFileDialog.getOpenFileName(self,
"IOS Import",
self.currentImportDirectory,
formats
)
if not import_filename:
env.history.message(cmd + "Cancelled")
return
success = importFromIOSFile(self.assy, import_filename)
if success:
env.history.message(cmd + "Successfully imported optimized strands from " + import_filename)
dir, fil = os.path.split(import_filename)
self.currentImportDirectory = dir
self.setCurrentWorkingDirectory(dir)
else:
env.history.message(cmd + redmsg("Cannot import " + import_filename))
return
def fileIOSExport(self): #Urmi 20080610
"""
Slot method for 'File > Export'.
Creates File in IOS format to be used by Parabon Computation Inc
Optimizer from the NE-1 model.
"""
cmd = greenmsg("IOS Export: ")
if hasattr(self.assy.part.topnode, 'members'):
numberOfMembers = len(self.assy.part.topnode.members)
else:
#Its a clipboard part, probably a chunk or a jig not contained in
#a group.
print "No support for clipboard at this time"
return
if numberOfMembers == 0:
print "Nothing to export"
return
currentFilename = self.getCurrentFilename()
sfilter = QString("Extensive Markup Language (*.xml)")
formats = \
"Extensive Markup Language (*.xml);;"
export_filename = \
QFileDialog.getSaveFileName(self,
"IOS Export",
currentFilename,
formats,
sfilter
)
if not export_filename:
env.history.message(cmd + "Cancelled")
return
dir, fil, ext = _fileparse(str(export_filename))
if ext == "":
export_filename = str(export_filename) + ".xml"
exportToIOSFormat(self.assy, export_filename)
env.history.message(cmd + "Successfully exported structure info to " + export_filename)
return
def fileOpenBabelExport(self): # Fixed up by Mark. 2007-06-05
"""
Slot method for 'File > Export'.
Exported files contain all atoms, including invisible and hidden atoms.
This is considered a bug.
"""
# To Do: Mark 2007-06-05
#
# - Export only visible atoms, etc.
if debug_flags.atom_debug:
linenum()
print "start fileOpenBabelExport()"
cmd = greenmsg("Export File: ")
# This format list generated from the Open Babel wiki page:
# http://openbabel.sourceforge.net/wiki/Babel#File_Formats
# -- * * * NOTE * * * --
# The "MDL" file format used for Animation Master is not the
# MDL format that Open Babel knows about. It is an animation
# format, not a chemistry format.
# Chemistry: http://openbabel.sourceforge.net/wiki/MDL_Molfile
# Animation: http://www.hash.com/products/am.asp
# For file export, we will use Open Babel's chemistry MDL format.
currentFilename = self.getCurrentFilename()
sfilter = _convertFiletypesForMacFileDialog(
QString("Protein Data Bank format (*.pdb)"))
formats = _convertFiletypesForMacFileDialog(\
"Alchemy format (*.alc);;"\
"MSI BGF format (*.bgf);;"\
"Dock 3.5 Box format (*.box);;"\
"Ball and Stick format (*.bs);;"\
"Chem3D Cartesian 1 format (*.c3d1);;"\
"Chem3D Cartesian 2 format (*.c3d2);;"\
"Cacao Cartesian format (*.caccrt);;"\
"CAChe MolStruct format (*.cache);;"\
"Cacao Internal format (*.cacint);;"\
"Chemtool format (*.cht);;"\
"Chemical Markup Language (*.cml);;"\
"CML Reaction format (*.cmlr);;"\
"Gaussian 98/03 Cartesian Input (*.com);;"\
"Copies raw text (*.copy);;"\
"Chemical Resource Kit 2D diagram format (*.crk2d);;"\
"Chemical Resource Kit 3D format (*.crk3d);;"\
"Accelrys/MSI Quanta CSR format (*.csr);;"\
"CSD CSSR format (*.cssr);;"\
"ChemDraw Connection Table format (*.ct);;"\
"DMol3 coordinates format (*.dmol);;"\
"Protein Data Bank format (*.ent);;"\
"Feature format (*.feat);;"\
"Fenske-Hall Z-Matrix format (*.fh);;"\
"SMILES FIX format (*.fix);;"\
"Fingerprint format (*.fpt);;"\
"Free Form Fractional format (*.fract);;"\
"FastSearching (*.fs);;"\
"GAMESS Input (*.gamin);;"\
"Gaussian 98/03 Cartesian Input (*.gau);;"\
"Ghemical format (*.gpr);;"\
"GROMOS96 format (*.gr96);;"\
"HyperChem HIN format (*.hin);;"\
"InChI format (*.inchi);;"\
"GAMESS Input (*.inp);;"\
"Jaguar input format (*.jin);;"\
"Compares first molecule to others using InChI (*.k);;"\
"MacroModel format (*.mmd);;"\
"MacroModel format (*.mmod);;"\
"Molecular Machine Part format (*.mmp);;"\
"MDL MOL format (*.mol);;"\
"Sybyl Mol2 format (*.mol2);;"\
"MOPAC Cartesian format (*.mopcrt);;"\
"Sybyl descriptor format (*.mpd);;"\
"MPQC simplified input format (*.mpqcin);;"\
"NWChem input format (*.nw);;"\
"PCModel Format (*.pcm);;"\
"Protein Data Bank format (*.pdb);;"\
"Protein Data Bank for QuteMolX (*.qdb);;"\
"POV-Ray input format (*.pov);;"\
"Parallel Quantum Solutions format (*.pqs);;"\
"Q-Chem input format (*.qcin);;"\
"Open Babel report format (*.report);;"\
"MDL MOL format (*.mdl);;"\
"MDL RXN format (*.rxn);;"\
"MDL MOL format (*.sd);;"\
"MDL MOL format (*.sdf);;"\
"SMILES format (*.smi);;"\
"Test format (*.test);;"\
"TurboMole Coordinate format (*.tmol);;"\
"Tinker MM2 format (*.txyz);;"\
"UniChem XYZ format (*.unixyz);;"\
"ViewMol format (*.vmol);;"\
"XED format (*.xed);;"\
"XYZ cartesian coordinates format (*.xyz);;"\
"YASARA YOB format (*.yob);;"\
"ZINDO input format (*.zin);;")
export_filename = \
QFileDialog.getSaveFileName(self,
"Open Babel Export",
currentFilename,
formats,
sfilter
)
if not export_filename:
env.history.message(cmd + "Cancelled")
if debug_flags.atom_debug:
linenum()
print "fileOpenBabelExport cancelled because user cancelled"
return
export_filename = str(export_filename)
sext = re.compile('(.*)\(\*(.+)\)').search(str(sfilter))
assert sext is not None
formatName = sext.group(1)
sext = sext.group(2)
if not export_filename.endswith(sext):
export_filename += sext
if debug_flags.atom_debug:
linenum()
print "export_filename", repr(export_filename)
dir, fil, ext = _fileparse(export_filename)
if ext == ".mmp":
self.save_mmp_file(export_filename, brag = True)
elif formatName.startswith("Protein Data Bank for QuteMolX"):
write_qutemol_pdb_file(self.assy.part, export_filename,
EXCLUDE_BONDPOINTS | EXCLUDE_HIDDEN_ATOMS)
else:
# Anything that isn't an MMP file we will export with Open Babel.
# Its coverage of MMP files is imperfect so it makes mistakes, but
# it would be good to use it enough to find those mistakes.
dir, fil, ext = _fileparse(export_filename)
if debug_flags.atom_debug:
linenum()
print "dir, fil, ext :", repr(dir), repr(fil), repr(ext)
tmpdir = find_or_make_Nanorex_subdir('temp')
tmp_mmp_filename = os.path.join(tmpdir, fil + ".mmp")
if debug_flags.atom_debug:
linenum()
print "tmp_mmp_filename :", repr(tmp_mmp_filename)
# We simply want to save a copy of the MMP file, not its Part Files, too.
# savePartFiles = False does this. Mark 2007-06-05
self.saveFile(tmp_mmp_filename, brag = False, savePartFiles = False)
result = self.launch_ne1_openbabel(in_format = "mmp", infile = tmp_mmp_filename,
out_format = sext[1:], outfile = export_filename)
if result and os.path.exists(export_filename):
if debug_flags.atom_debug:
linenum()
print "file translation OK"
env.history.message( cmd + "File exported: [ " + export_filename + " ]" )
else:
if debug_flags.atom_debug:
linenum()
print "file translation failed"
print "Problem translating ", tmp_mmp_filename, '->', export_filename
env.history.message(cmd + redmsg("File translation failed."))
if debug_flags.atom_debug:
linenum()
print "finish fileOpenBabelExport()"
def launch_ne1_openbabel(self, in_format, infile, out_format, outfile):
"""
Runs NE1's own version of Open Babel for translating to/from MMP and
many chemistry file formats. It will not work with other versions of
Open Babel since they do not support MMP file format (yet).
<in_format> - the chemistry format of the input file, specified by the
file format extension.
<infile> is the input file.
<out_format> - the chemistry format of the output file, specified by the
file format extension.
<outfile> is the converted file.
@return: boolean success code (*not* error code)
Example: babel -immp methane.mmp -oxyz methane.xyz
"""
# filePath = the current directory NE-1 is running from.
filePath = os.path.dirname(os.path.abspath(sys.argv[0]))
# "program" is the full path to *NE1's own* Open Babel executable.
if sys.platform == 'win32':
program = os.path.normpath(filePath + '/../bin/babel.exe')
else:
program = os.path.normpath('/usr/bin/babel')
if not os.path.exists(program):
print "Babel program not found here: ", program
return False # failure
# Will (Ware) had this debug arg for our version of Open Babel, but
# I've no idea if it works now or what it does. Mark 2007-06-05.
if debug_flags.atom_debug:
debugvar = "WWARE_DEBUG=1"
print "debugvar =", debugvar
else:
debugvar = None
if debug_flags.atom_debug:
print "program =", program
infile = os.path.normpath(infile)
outfile = os.path.normpath(outfile)
in_format = "-i"+in_format
out_format = "-o"+out_format
arguments = QStringList()
i = 0
for arg in [in_format, infile, out_format, outfile, debugvar]:
if not arg:
continue # For debugvar.
if debug_flags.atom_debug:
print "argument", i, " :", repr(arg)
i += 1
arguments.append(arg)
# Looks like Will's special debugging code. Mark 2007-06-05
if debug_babel:
# wware 060906 Create a shell script to re-run Open Babel
outf = open("rerunbabel.sh", "w")
# On the Mac, "-f" prevents running .bashrc
# On Linux it disables filename wildcards (harmless)
outf.write("#!/bin/sh -f\n")
for a in arguments:
outf.write(str(a) + " \\\n")
outf.write("\n")
outf.close()
# Need to set these environment variables on MacOSX so that babel can
# find its libraries. Brian Helfrich 2007/06/05
if sys.platform == 'darwin':
babelLibPath = os.path.normpath(filePath + '/../Frameworks')
os.environ['DYLD_LIBRARY_PATH'] = babelLibPath
babelLibPath = babelLibPath + '/openbabel'
os.environ['BABEL_LIBDIR'] = babelLibPath
print "launching openbabel:", program, [str_or_unicode(arg) for arg in arguments]
proc = QProcess()
proc.start(program, arguments) # Mark 2007-06-05
if not proc.waitForFinished (100000):
# Wait for 100000 milliseconds (100 seconds)
# If not done by then, return an error.
print "openbabel timeout (100 sec)"
return False # failure
exitStatus = proc.exitStatus()
stderr = str(proc.readAllStandardError())[:-1]
stderr2 = str(stderr.split(os.linesep)[-1])
stderr2 = stderr2.strip()
success = (exitStatus == 0 and stderr2 == "1 molecule converted")
if not success or debug_flags.atom_debug:
print "exit status:", exitStatus
print "stderr says:", stderr
print "stderr2 says:"%stderr2
print "'success' is:", success
print "stderr2 == str(1 molecule converted)?" , (stderr2 == "1 molecule converted")
print "finish launch_ne1_openbabel(%s, %s)" % (repr(infile), repr(outfile))
return success
def fileInsertMmp(self):
"""
Slot method for 'Insert > Molecular Machine Part file...'.
"""
formats = \
"Molecular Machine Part (*.mmp);;"\
"All Files (*.*)"
self.fileInsert(formats)
#UM 20080702: methods for fetching pdb files from the internet
def fileFetchPdb(self):
"""
Slot method for 'File > Fetch > Fetch PDB...'.
"""
form = FetchPDBDialog(self)
self.connect(form, SIGNAL('editingFinished()'), self.getPDBFileFromInternet)
return
def checkIfCodeIsValid(self, code):
"""
Check if the PDB ID I{code} is valid.
@return: ok, code
If a five letter code is entered and the last character is '_' it
is altered to ' '
"""
#first check if the length is correct
if not (len(code) == 4 or len(code) == 5):
return False, code
if len(code) == 4:
end = len(code)
else:
end = len(code) - 1
if not code[0].isdigit():
return False, code
for i in range(1, end):
if not (code[i].isdigit() or code[i].isalpha()):
return False, code
#special treatment for the fifth character
if len(code) == 5:
if not (code[4].isdigit() or code[4].isalpha() or code[4] == '_'):
return False, code
if code[4] == '_':
tempCode = code[0:3] + ' '
code = tempCode
return True, code
def getAndWritePDBFile(self, code):
"""
Fetch a PDB file from the internet (RCSB databank) and write it to a
temporary location that is later removed.
@return: The full path to the PDB temporary file fetched from RCSB.
@rtype: string
"""
try:
# Display the "wait cursor" since this might take some time.
from ne1_ui.cursors import showWaitCursor
showWaitCursor(True)
urlString = "http://www.rcsb.org/pdb/download/downloadFile.do?fileFormat=pdb&compression=NO&structureId=" + code
doc = urlopen(urlString).read()
if doc.find("No File Found") != -1:
msg = "No protein exists in the PDB with this code."
QMessageBox.warning(self, "Attention!", msg)
showWaitCursor(False)
return ''
except:
msg = "Error connecting to RCSB using URL [%s]" % urlString
print_compact_traceback( msg )
env.history.message( redmsg( msg ) )
showWaitCursor(False)
return ''
# Create full path to Nanorex temp directory for pdb file.
tmpdir = find_or_make_Nanorex_subdir('temp')
pdbTmpFile = os.path.join(tmpdir, code + ".pdb")
f = open(pdbTmpFile, 'w')
f.write(doc)
f.close()
showWaitCursor(False) # Revert to the previous cursor.
return pdbTmpFile
def insertPDBFromURL(self, filePath, chainID):
"""
read the pdb file
"""
try:
if chainID is not None:
insertpdb(self.assy, filePath, chainID)
else:
insertpdb(self.assy, filePath)
except:
print_compact_traceback( "MWsemantics.py: fileInsert(): error inserting PDB file [%s]: " % filePath )
env.history.message( redmsg( "Internal error while inserting PDB file: [ " + filePath + " ]") )
else:
self.assy.changed() # The file and the part are not the same.
env.history.message( "PDB file inserted: [ " + os.path.normpath(filePath) + " ]" )
self.glpane.scale = self.assy.bbox.scale()
self.glpane.gl_update()
self.mt.mt_update()
return
def savePDBFileIfDesired(self, code, filePath):
"""
Since the downloaded pdb file is stored in a temporary location, this
allows the user to store it permanently.
"""
# ask the user if he wants to save the file otherwise its deleted
msg = "Do you want to save a copy of this PDB file in its original "\
"format to your system disk before continuing?"
ret = QMessageBox.warning( self, "Attention!",
msg,
"&Yes", "&No", "",
0, # Enter = button 0 (yes)
1) # Escape = button 1 (no)
if ret:
return # User selected 'No'.
# Save this file
formats = \
"Protein Data BanK (*.pdb);;"
if (self.currentPDBSaveDirectory == None):
self.currentPDBSaveDirectory = self.currentWorkingDirectory
directory = self.currentPDBSaveDirectory
fileName = code + ".pdb"
currentFilename = directory + '/' + fileName
sfilter = QString("Protein Data Bank (*.pdb)")
fn = QFileDialog.getSaveFileName(self,
"Save PDB File",
currentFilename,
formats,
sfilter)
fileObject1 = open(filePath, 'r')
if fn:
fileObject2 = open(fn, 'w+')
#@ Review: fileObject2 will be overwritten if it exists.
# You should get confirmation from user first!
# mark 2008-07-03
else:
return
doc = fileObject1.readlines()
# we will write to this pdb file everything, irrespective of
# what the chain id is. Its too complicated to parse the info related
# to this particular chain id
fileObject2.writelines(doc)
fileObject1.close()
fileObject2.close()
dir, fil = os.path.split(str(fn))
self.currentPDBSaveDirectory = dir
self.setCurrentWorkingDirectory(dir)
env.history.message( "PDB file saved: [ " + os.path.normpath(str(fn)) + " ]")
return
def getPDBFileFromInternet(self):
"""
slot method for PDBFileDialog
"""
checkIfCodeValid, code = self.checkIfCodeIsValid(self._pdbCode)
if not checkIfCodeValid:
msg = "'%s' is an invalid PDB ID. Download aborted." % code
env.history.message(redmsg(msg))
QMessageBox.warning(self, "Attention!", msg)
return
filePath = self.getAndWritePDBFile(code[0:4])
if not filePath:
return
if len(code) == 5:
self.insertPDBFromURL(filePath, code[4])
else:
self.insertPDBFromURL(filePath, None)
self.savePDBFileIfDesired(code, filePath)
# delete the temp PDB file
os.remove(filePath)
return
def setPDBCode(self, code):
"""
Sets the pdb code
"""
self._pdbCode = code
return
def fileInsertPdb(self):
"""
Slot method for 'Insert > Protein Data Bank file...'.
"""
formats = \
"Protein Data Bank (*.pdb);;"\
"All Files (*.*)"
self.fileInsert(formats)
def fileInsertIn(self):
"""
Slot method for 'Insert > AMBER .in file fragment...'.
"""
formats = \
"AMBER internal coordinates file fragment (*.in_frag);;"\
"All Files (*.*)"
self.fileInsert(formats)
def fileInsert(self, formats):
"""
Inserts a file in the current part.
@param formats: File format options in chooser filter.
@type formats: list of strings
"""
env.history.message(greenmsg("Insert File:"))
if (self.currentFileInsertDirectory == None):
self.currentFileInsertDirectory = self.currentWorkingDirectory
fn = QFileDialog.getOpenFileName(self,
"Insert File",
self.currentFileInsertDirectory,
formats)
if not fn:
env.history.message("Cancelled")
return
if fn:
fn = str(fn)
if not os.path.exists(fn):
#bruce 050415: I think this should never happen;
# in case it does, I added a history message (to existing if/return code).
env.history.message( redmsg( "File not found: [ " + fn+ " ]") )
return
if fn[-3:] == "mmp":
try:
success_code = insertmmp(self.assy, fn)
except:
print_compact_traceback( "MWsemantics.py: fileInsert(): error inserting MMP file [%s]: " % fn )
env.history.message( redmsg( "Internal error while inserting MMP file: [ " + fn+" ]") )
else:
###TODO: needs history message to depend on success_code
# (since Insert can be cancelled or see a syntax error or
# read error). [bruce 080606 comment]
self.assy.changed() # The file and the part are not the same.
env.history.message( "MMP file inserted: [ " + os.path.normpath(fn) + " ]" )# fix bug 453 item. ninad060721
if fn[-3:] in ["pdb","PDB"]:
try:
insertpdb(self.assy, fn)
except:
print_compact_traceback( "MWsemantics.py: fileInsert(): error inserting PDB file [%s]: " % fn )
env.history.message( redmsg( "Internal error while inserting PDB file: [ " + fn + " ]") )
else:
self.assy.changed() # The file and the part are not the same.
env.history.message( "PDB file inserted: [ " + os.path.normpath(fn) + " ]" )
if fn[-7:] == "in_frag":
try:
success_code = insertin(self.assy, fn)
except:
print_compact_traceback( "MWsemantics.py: fileInsert(): error inserting IN_FRAG file [%s]: " % fn )
env.history.message( redmsg( "Internal error while inserting IN_FRAG file: [ " + fn+" ]") )
else:
###TODO: needs history message to depend on success_code
# (since Insert can be cancelled or see a syntax error or
# read error). [bruce 080606 comment]
self.assy.changed() # The file and the part are not the same.
env.history.message( "IN file inserted: [ " + os.path.normpath(fn) + " ]" )# fix bug 453 item. ninad060721
self.glpane.scale = self.assy.bbox.scale()
self.glpane.gl_update()
self.mt.mt_update()
# Update the current working directory (CWD). Mark 060729.
dir, fil = os.path.split(fn)
self.currentFileInsertDirectory = dir
self.setCurrentWorkingDirectory(dir)
def fileOpen(self, recentFile = None):
"""
Slot method for 'File > Open'.
By default, we assume user wants to specify file to open
through 'Open File' dialog.
@param recentFile: if provided, specifies file to open,
assumed to come from the 'Recent Files' menu list;
no Open File dialog will be used.
The file may or may not exist.
@type recentFile: string
"""
env.history.message(greenmsg("Open File:"))
warn_about_abandoned_changes = True
# note: this is turned off later if the user explicitly agrees
# to discard unsaved changes [bruce 080909]
if self.assy.has_changed():
ret = QMessageBox.warning( self, "Warning!",
"The part contains unsaved changes.\n"
"Do you want to save the changes before opening a new part?",
"&Save", "&Discard", "Cancel",
0, # Enter == button 0
2 ) # Escape == button 2
if ret == 0:
# Save clicked or Alt+S pressed or Enter pressed.
#Huaicai 1/6/05: If user now cancels save operation, return
# without letting user open another file
if not self.fileSave():
return
elif ret == 1:
# Discard
warn_about_abandoned_changes = False
# note: this is about *subsequent* discards on same old
# model, if any (related to exit_is_forced)
#Huaicai 12/06/04: don't clear assy, since user may cancel the file open action below
pass ## self._make_and_init_assy()
elif ret == 2:
# Cancel clicked or Alt+C pressed or Escape pressed
env.history.message("Cancelled.")
return
else:
assert 0 #bruce 080909
if recentFile:
if not os.path.exists(recentFile):
if hasattr(self, "name"):
name = self.name()
else:
name = "???"
QMessageBox.warning( self, name,
"The file [ " + recentFile + " ] doesn't exist any more.",
QMessageBox.Ok, QMessageBox.NoButton)
return
fn = recentFile
else:
formats = \
"Molecular Machine Part (*.mmp);;"\
"GROMACS Coordinates (*.gro);;"\
"All Files (*.*)"
if (self.currentFileOpenDirectory == None):
self.currentFileOpenDirectory = self.currentWorkingDirectory
fn = QFileDialog.getOpenFileName(self,
"Open File",
self.currentFileOpenDirectory,
formats)
if not fn:
env.history.message("Cancelled.")
return
if fn:
start = begin_timing("File..Open")
self.updateRecentFileList(fn)
self._make_and_init_assy('$DEFAULT_MODE',
warn_about_abandoned_changes = warn_about_abandoned_changes )
# resets self.assy to a new, empty Assembly object
self.assy.clear_undo_stack()
# important optimization -- the first call of clear_undo_stack
# (for a given value of self.assy) does two initial checkpoints,
# whereas all later calls do only one. Initial checkpoints
# (which scan all the objects that hold undoable state which are
# accessible from assy) are fast if done now (since assy is
# empty), but might be quite slow later (after readmmp adds lots
# of data to assy). So calling it now should speed up the later
# call (near the end of this method) by making it scan all data
# once rather than twice. The speedup from this has been
# verified. [bruce & ericm 080225/082229]
fn = str_or_unicode(fn)
if not os.path.exists(fn):
return
#k Can that return ever happen? Does it need an error message?
# Should preceding clear and modechange be moved down here??
# (Moving modechange even farther below would be needed,
# if we ever let the default mode be one that cares about the
# model or viewpoint when it's entered.)
# [bruce 050911 questions]
_openmsg = "" # Precaution.
### REVIEW: it looks like this is sometimes used, and it probably
# ought to be more informative, or be tested as a flag if
# no message is needed in those cases. If it's never used,
# it's not obvious why so that needs to be explained.
# [bruce 080606 comment]
env.history.message("Opening file...")
isMMPFile = False
gromacsCoordinateFile = None
if fn[-4:] == ".gro":
gromacsCoordinateFile = fn
failedToFindMMP = True
fn = gromacsCoordinateFile[:-3] + "mmp"
if (os.path.exists(fn)):
failedToFindMMP = False
elif gromacsCoordinateFile[-8:] == ".xyz.gro":
fn = gromacsCoordinateFile[:-7] + "mmp"
if (os.path.exists(fn)):
failedToFindMMP = False
elif gromacsCoordinateFile[-12:] == ".xyz-out.gro":
fn = gromacsCoordinateFile[:-11] + "mmp"
if (os.path.exists(fn)):
failedToFindMMP = False
if (failedToFindMMP):
env.history.message(redmsg("Could not find .mmp file associated with %s" % gromacsCoordinateFile))
return
# This puts up the hourglass cursor while opening a file.
QApplication.setOverrideCursor( QCursor(Qt.WaitCursor) )
ok = SUCCESS
if fn[-3:] == "mmp":
ok, listOfAtoms = readmmp(self.assy,
fn,
showProgressDialog = True,
returnListOfAtoms = True)
#bruce 050418 comment: we need to check for an error return
# and in that case don't clear or have other side effects on assy;
# this is not yet perfectly possible in readmmmp.
#mark 2008-06-05 comment: I included an error return value
# for readmmp (ok) checked below. The code below needs to
# be cleaned up, but I need Bruce's help to do that.
if ok == SUCCESS:
_openmsg = "MMP file opened: [ " + os.path.normpath(fn) + " ]"
elif ok == ABORTED:
_openmsg = orangemsg("Open cancelled: [ " + os.path.normpath(fn) + " ]")
elif ok == READ_ERROR:
_openmsg = redmsg("Error reading: [ " + os.path.normpath(fn) + " ]")
else:
msg = "Unrecognized readmmp return value %r" % (ok,)
print_compact_traceback(msg + ": ")
_openmsg = redmsg("Bug: " + msg) #bruce 080606 bugfix
isMMPFile = True
if ok == SUCCESS and (gromacsCoordinateFile):
#bruce 080606 added condition ok == SUCCESS (likely bugfix)
newPositions = readGromacsCoordinates(gromacsCoordinateFile, listOfAtoms)
if (type(newPositions) == type([])):
move_atoms_and_normalize_bondpoints(listOfAtoms, newPositions)
else:
env.history.message(redmsg(newPositions))
if ok == SUCCESS:
dir, fil, ext = _fileparse(fn)
# maybe: could replace some of following code with new method just now split out of saved_main_file [bruce 050907 comment]
self.assy.name = fil
self.assy.filename = fn
self.assy.reset_changed() # The file and the part are now the same
self.update_mainwindow_caption()
if isMMPFile:
#bruce 050418 moved this code into a new function in files_mmp.py
# (but my guess is it should mostly be done by readmmp itself)
fix_assy_and_glpane_views_after_readmmp( self.assy, self.glpane)
else: ###PDB or other file format
self.setViewFitToWindow()
self.assy.clear_undo_stack() #bruce 060126, fix bug 1398
# note: this is not redundant with the earlier call in this
# method -- both are needed. See comment there for details.
# [bruce comment 080229]
## russ 080603: Replaced by a call on gl_update_duration in
## GLPane.AnimateToView(), necessary for newly-created models.
##self.glpane.gl_update_duration(new_part = True) #mark 060116.
self.mt.mt_update()
# All set. Restore the normal cursor and print a history msg.
env.history.message(_openmsg)
QApplication.restoreOverrideCursor() # Restore the cursor
end_timing(start, "File..Open")
dir, fil = os.path.split(fn)
self.currentFileOpenDirectory = dir
self.setCurrentWorkingDirectory()
return
def fileSave(self):
"""
Slot method for 'File > Save'.
"""
env.history.message(greenmsg("Save File:"))
#Huaicai 1/6/05: by returning a boolean value to say if it is really
# saved or not, user may choose "Cancel" in the "File Save" dialog
if self.assy:
if self.assy.filename:
self.saveFile(self.assy.filename)
return True
else:
return self.fileSaveAs()
return False #bruce 050927 added this line (should be equivalent to prior implicit return None)
def fileSaveAs(self): #bruce 050927 revised this
"""
Slot method for 'File > Save As'.
"""
safile = self.fileSaveAs_filename()
# fn will be None or a Python string
if safile:
self.saveFile(safile)
return True
else:
# user cancelled, or some other error; message already emitted.
return False
pass
def fileExportPdb(self):
"""
Slot method for 'File > Export > Protein Data Bank...'
@return: The name of the file saved, or None if the user cancelled.
@rtype: string
"""
format = "Protein Data Bank (*.pdb)"
return self.fileExport(format)
def fileExportQuteMolXPdb(self):
"""
Slot method for 'File > Export > Protein Data Bank for QuteMolX...'
@return: The name of the file saved, or None if the user cancelled.
@rtype: string
"""
format = "Protein Data Bank for QuteMolX (*.qdb)"
return self.fileExport(format)
def fileExportJpg(self):
"""
Slot method for 'File > Export > JPEG Image...'
@return: The name of the file saved, or None if the user cancelled.
@rtype: string
"""
format = "JPEG (*.jpg)"
return self.fileExport(format)
def fileExportPng(self):
"""
Slot method for 'File > Export > PNG Image...'
@return: The name of the file saved, or None if the user cancelled.
@rtype: string
"""
format = "Portable Network Graphics (*.png)"
return self.fileExport(format)
def fileExportPov(self):
"""
Slot method for 'File > Export > POV-Ray...'
@return: The name of the file saved, or None if the user cancelled.
@rtype: string
"""
format = "POV-Ray (*.pov)"
return self.fileExport(format)
def fileExportAmdl(self):
"""
Slot method for 'File > Export > Animation Master Model...'
@return: The name of the file saved, or None if the user cancelled.
@rtype: string
@note: There are more popular .mdl file formats that we may need
to support in the future. This option was needed by John
Burch to create the nanofactory animation.
"""
format = "Animation Master Model (*.mdl)"
return self.fileExport(format)
def fileExport(self, format):
"""
Exports the current part into a different file format.
@param format: File format filter string to appear in the "Export As..."
file chooser dialog.
@type format: string
@return: The name of the file saved, or None if the user cancelled.
@rtype: string
"""
cmd = greenmsg("Export:")
currentFilename = self.getCurrentFilename()
sfilter = QString(format)
options = QFileDialog.DontConfirmOverwrite # this fixes bug 2380 [bruce 070619]
# Note: we can't fix that bug by removing our own confirmation
# (later in this function) instead, since the Qt confirmation
# doesn't happen if the file extension is implicit, as it is by
# default due to the workaround for bug 225 (above) in which
# currentFilename doesn't contain ext.
# debug_prefs for experimentation with dialog style [bruce 070619]
if (sys.platform == 'darwin' and
debug_pref("File Save As: DontUseSheet",
Choice_boolean_False,
prefs_key = True)):
options |= QFileDialog.DontUseSheet # probably faster
if debug_pref("File Save As: DontUseNativeDialog",
Choice_boolean_False,
prefs_key = True):
options |= QFileDialog.DontUseNativeDialog
fn = QFileDialog.getSaveFileName(
self, # parent
"Export As", # caption
currentFilename, # dialog's cwd and basename
format, # file format options
sfilter, # selectedFilter
QFileDialog.DontConfirmOverwrite # options
)
if not fn:
return None
# [bruce question: when and why can this differ from fn?]
# IIRC, fileparse() doesn't (or didn't) handle QString types.
# mark 2008-01-23
fn = str(fn)
dir, fil, ext2 = _fileparse(fn)
del fn #bruce 050927
ext = str(sfilter[-5:-1])
# Get "ext" from the sfilter.
# It *can* be different from "ext2"!!! - Mark
safile = dir + fil + ext # full path of "Save As" filename
# ask user before overwriting an existing file
# (other than this part's main file)
if os.path.exists(safile):
# Confirm overwrite of the existing file.
ret = QMessageBox.warning( self, "Warning!",
"The file \"" + fil + ext + "\" already exists.\n"\
"Do you want to overwrite the existing file?",
"&Overwrite", "&Cancel", "",
0, # Enter == button 0
1 ) # Escape == button 1
if ret == 1: # The user cancelled
env.history.message( cmd + "Cancelled. Part not exported." )
return None # Cancel/Escape pressed, user cancelled.
###e bruce comment 050927: this might be a good place to test whether we can write to that filename,
# so if we can't, we can let the user try choosing one again, within
# this method. But we shouldn't do this if it's the main filename,
# to avoid erasing that file now. (If we only do this test with each
# function that writes into the file, then if that fails it'll more
# likely require user to redo the entire op.)
self.saveFile(safile)
return safile
def fileSaveAs_filename(self):
#bruce 050927 split this out of fileSaveAs, added some comments,
# added images_ok option
"""
Prompt user with a "Save As..." file chooser dialog to specify a
new MMP filename. If file exists, ask them to confirm overwrite of
that file.
@return: the filename. If if user cancels, or if some error occurs,
emit a history message and return None.
@rtype: string
"""
currentFilename = self.getCurrentFilename()
format = "Molecular Machine Part (*.mmp)"
sfilter = QString(format)
options = QFileDialog.DontConfirmOverwrite
# this fixes bug 2380 [bruce 070619]
# Note: we can't fix that bug by removing our own confirmation
# (later in this function) instead, since the Qt confirmation
# doesn't happen if the file extension is implicit, as it is by
# default due to the workaround for bug 225 (above) in which
# currentFilename doesn't contain ext.
# debug_prefs for experimentation with dialog style [bruce 070619]
if (sys.platform == 'darwin'
and debug_pref("File Save As: DontUseSheet",
Choice_boolean_False,
prefs_key = True)):
options |= QFileDialog.DontUseSheet # probably faster -- try it and see
if debug_pref("File Save As: DontUseNativeDialog",
Choice_boolean_False,
prefs_key = True):
options |= QFileDialog.DontUseNativeDialog
fn = QFileDialog.getSaveFileName(
self, # parent
"Save As", # caption
currentFilename, # # dialog's cwd and basename
format, # filter
sfilter, # selectedFilter
QFileDialog.DontConfirmOverwrite # options
)
if not fn:
return None # User cancelled.
# [bruce question: when and why can this differ from fn?]
# IIRC, fileparse() doesn't (or didn't) handle QString types.
# mark 2008-01-23
fn = str_or_unicode(fn)
dir, fil, ext2 = _fileparse(fn)
del fn #bruce 050927
ext = str(sfilter[-5:-1])
# Get "ext" from the sfilter. It *can* be different from "ext2"!!!
# Note: As of 2008-01-23, only the MMP extension is supported.
# This may change in the future. Mark 2008-01-23.
safile = dir + fil + ext # full path of "Save As" filename
# Ask user before overwriting an existing file
# (except this part's main file)
if self.assy.filename != safile:
# If the current part and "Save As" filename are not the same...
if os.path.exists(safile):
# ...and if the "Save As" file exists.
# confirm overwrite of the existing file.
ret = QMessageBox.warning( self, "Warning!",
"The file \"" + fil + ext + "\" already exists.\n"\
"Do you want to overwrite the existing file?",
"&Overwrite", "&Cancel", "",
0, # Enter == button 0
1 ) # Escape == button 1
if ret == 1: # The user cancelled
env.history.message( "Cancelled. Part not saved." )
return None # User cancelled
###e bruce comment 050927: this might be a good place to test whether we can write to that filename,
# so if we can't, we can let the user try choosing one again, within this method.
# But we shouldn't do this if it's the main filename, to avoid erasing that file now.
# (If we only do this test with each function
# that writes into the file, then if that fails it'll more likely require user to redo the entire op.)
return safile
def fileSaveSelection(self): #bruce 050927
"""
Slot method for 'File > Save Selection'.
"""
env.history.message(greenmsg("Save Selection:"))
# print this before counting up what selection contains, in case that's slow or has bugs
(part, killfunc, desc) = self.assy.part_for_save_selection()
# part is existing part (if entire current part was selected)
# or new homeless part with a copy of the selection (if selection is not entire part)
# or None (if the current selection can't be saved [e.g. if nothing selected ##k]).
# If part is not None, its contents are described in desc;
# otherwise desc is an error message intended for the history.
if part is None:
env.history.message(redmsg(desc))
return
# now we're sure the current selection is savable
safile = self.fileSaveAs_filename( images_ok = False)
##e if entire part is selected, could pass images_ok = True,
# if we also told part_for_save_selection above never to copy it,
# which is probably appropriate for all image-like file formats
saved = self.savePartInSeparateFile(part, safile)
if saved:
desc = desc or "selection"
env.history.message( "Saved %s in %s" % (desc, safile) )
#e in all histmsgs like this, we should encode html chars in safile and desc!
else:
pass # assume savePartInSeparateFile emitted error message
killfunc()
return
def saveFile(self, safile, brag = True, savePartFiles = True):
"""
Save the current part as I{safile}.
@param safile: the part filename.
@type safile: string
@param savePartFiles: True (default) means save any part files if this
MMP file has a Part Files directory.
False means just save the MMP file and don't
worry about saving the Part Files directory, too.
"""
dir, fil, ext = _fileparse(safile)
#e only ext needed in most cases here, could replace with os.path.split [bruce 050907 comment]
if ext == ".mmp" : # Write MMP file.
self.save_mmp_file(safile, brag = brag, savePartFiles = savePartFiles)
self.setCurrentWorkingDirectory() # Update the CWD.
else:
self.savePartInSeparateFile( self.assy.part, safile)
return
def savePartInSeparateFile( self, part, safile): #bruce 050927 added part arg, renamed method
"""
Save some aspect of part (which might or might not be self.assy.part)
in a separate file, named safile, without resetting self.assy's
changed flag or filename. For some filetypes, use display attributes
from self.glpane.
For JPG and PNG, assert part is the glpane's current part, since
current implem only works then.
"""
#e someday this might become a method of a "saveable object" (open file) or a "saveable subobject" (part, selection).
linenum()
dir, fil, ext = _fileparse(safile)
#e only ext needed in most cases here, could replace with os.path.split [bruce 050908 comment]
type = "this" # never used (even if caller passes in unsupported filetype) unless there are bugs in this routine
saved = True # be optimistic (not bugsafe; fix later by having save methods which return a success code)
glpane = self.glpane
try:
# all these cases modify type variable, for use only in subsequent messages.
# kluges: glpane is used for various display options;
# and for grabbing frame buffer for JPG and PNG formats
# (only correct when the part being saved is the one it shows, which we try to check here).
linenum()
if ext == ".mmp": #bruce 050927; for now, only used by Save Selection
type = "MMP"
part.writemmpfile( safile) ###@@@ WRONG, stub... this writes a smaller file, unreadable before A5, with no saved view.
#e also, that func needs to report errors; it probably doesn't now.
###e we need variant of writemmpfile_assy, but the viewdata will differ...
# pass it a map from partindex to part?
# or, another way, better if it's practical: ###@@@ DOIT
# make a new assy (no shelf, same pov, etc) and save that. kill it at end.
# might need some code cleanups. what's done to it? worry about saver code reset_changed on it...
msg = "Save Selection: not yet fully implemented; saved MMP file lacks viewpoint and gives warnings when read."
# in fact, it lacks chunk/group structure and display modes too, and gets hydrogenated as if for sim!
print msg
env.history.message( orangemsg(msg) )
elif ext == ".pdb": #bruce 050927; for now, only used by Save Selection
type = "PDB"
writepdb(part, safile)
elif ext == ".qdb": #mark 2008-03-21
type = "QDB"
write_qutemol_pdb_file(self.assy.part, safile,
EXCLUDE_BONDPOINTS | EXCLUDE_HIDDEN_ATOMS)
elif ext == ".pov":
type = "POV-Ray"
writepovfile(part, glpane, safile) #bruce 050927 revised arglist
elif ext == ".mdl":
linenum()
type = "MDL"
writemdlfile(part, glpane, safile) #bruce 050927 revised arglist
elif ext == ".jpg":
type = "JPEG"
image = glpane.grabFrameBuffer()
image.save(safile, "JPEG", 85)
assert part is self.assy.part, "wrong image was saved" #bruce 050927
assert self.assy.part is glpane.part, "wrong image saved since glpane not up to date" #bruce 050927
elif ext == ".png":
type = "PNG"
image = glpane.grabFrameBuffer()
image.save(safile, "PNG")
assert part is self.assy.part, "wrong image was saved" #bruce 050927
assert self.assy.part is glpane.part, "wrong image saved since glpane not up to date" #bruce 050927
else: # caller passed in unsupported filetype (should never happen)
saved = False
env.history.message(redmsg( "File Not Saved. Unknown extension: " + ext))
except:
linenum()
print_compact_traceback( "error writing file %r: " % safile )
env.history.message(redmsg( "Problem saving %s file: " % type + safile ))
else:
linenum()
if saved:
linenum()
env.history.message( "%s file saved: " % type + safile )
return
def save_mmp_file(self, safile, brag = True, savePartFiles = True):
# bruce 050907 split this out of saveFile; maybe some of it should be moved back into caller ###@@@untested
"""
Save the current part as a MMP file under the name <safile>.
If we are saving a part (assy) that already exists and it has an (old) Part Files directory,
copy those files to the new Part Files directory (i.e. '<safile> Files').
"""
dir, fil, extjunk = _fileparse(safile)
from dna.updater.dna_updater_prefs import pref_mmp_save_convert_to_PAM5
from utilities.constants import MODEL_PAM5
# temporary, so ok to leave local for now:
from utilities.GlobalPreferences import debug_pref_write_bonds_compactly
from utilities.GlobalPreferences import debug_pref_read_bonds_compactly
# determine options for writemmpfile
options = dict()
if pref_mmp_save_convert_to_PAM5(): # maybe WRONG, see whether calls differ in this! ##### @@@@@@ [bruce 080326]
options.update(dict(convert_to_pam = MODEL_PAM5,
honor_save_as_pam = True))
pass
if debug_pref_write_bonds_compactly(): # bruce 080328
# temporary warning
env.history.message( orangemsg( "Warning: writing mmp file with experimental bond_chain records"))
if not debug_pref_read_bonds_compactly():
env.history.message( orangemsg( "Warning: your bond_chain reading code is presently turned off"))
options.update(dict(write_bonds_compactly = True))
pass
tmpname = "" # in case of exceptions
try:
tmpname = os.path.join(dir, '~' + fil + '.m~')
self.assy.writemmpfile(tmpname, **options)
except:
#bruce 050419 revised printed error message
print_compact_traceback( "Problem writing file [%s]: " % safile )
env.history.message(redmsg( "Problem saving file: " + safile ))
# If you want to see what was wrong with the MMP file, you can comment this out so
# you can see what's in the temp MMP file. Mark 050128.
if os.path.exists(tmpname):
os.remove (tmpname) # Delete tmp MMP file
else:
if os.path.exists(safile):
os.remove (safile) # Delete original MMP file
#bruce 050907 suspects this is never necessary, but not sure;
# if true, it should be removed, so there is never a time with no file at that filename.
# (#e In principle we could try just moving it first, and only if that fails, try removing and then moving.)
os.rename( tmpname, safile) # Move tmp file to saved filename.
if not savePartFiles:
# Sometimes, we just want to save the MMP file and not worry about
# any of the part's Part Files. For example, Open Babel just needs to
# save a copy of the current MMP file in a temp directory for
# translation purposes (see fileExport() and fileOpenBabelImport()).
# Mark 2007-06-05
return
errorcode, oldPartFilesDir = self.assy.find_or_make_part_files_directory(make = False) # Mark 060703.
# If errorcode, print a history warning about it and then proceed as if the old Part Files directory is not there.
if errorcode:
env.history.message( orangemsg(oldPartFilesDir))
oldPartFilesDir = None # So we don't copy it below.
self.saved_main_file(safile, fil)
if brag:
env.history.message( "MMP file saved: [ " + os.path.normpath(self.assy.filename) + " ]" )
# bruce 060704 moved this before copying part files,
# which will now ask for permission before removing files,
# and will start and end with a history message if it does anything.
# wware 060802 - if successful, we may choose not to brag, e.g. during a
# step of exporting a non-native file format
# If it exists, copy the Part Files directory of the original part
# (oldPartFilesDir) to the new name (i.e. "<safile> Files")
if oldPartFilesDir: #bruce 060704 revised this code
errorcode, errortext = self.copy_part_files_dir(oldPartFilesDir)
# Mark 060703. [only copies them if they exist]
#bruce 060704 will modify that function, e.g. to make it print
# a history message when it starts copying.
if errorcode:
env.history.message( orangemsg("Problem copying part files: " + errortext ))
else:
if debug_part_files:
env.history.message( _graymsg("debug: Success copying part files: " + errortext ))
else:
if debug_part_files:
env.history.message( _graymsg("debug: No part files to copy." ))
return
def copy_part_files_dir(self, oldPartFilesDir): # Mark 060703. NFR bug 2042. Revised by bruce 060704 for user safety, history.
"""
Recursively copy the entire directory tree rooted at oldPartFilesDir to the assy's (new) Part Files directory.
Return errorcode, message (message might be for error or for success, but is not needed for success except for debugging).
Might also print history messages (and in future, maintain progress indicators) about progress.
"""
set_waitcursor(True)
if not oldPartFilesDir:
set_waitcursor(False)
return 0, "No part files directory to copy."
errorcode, newPartFilesDir = self.assy.get_part_files_directory() # misnamed -- actually just gets its name
if errorcode:
set_waitcursor(False)
return 1, "Problem getting part files directory name: " + newPartFilesDir
if oldPartFilesDir == newPartFilesDir:
set_waitcursor(False)
return 0, "Nothing copied since the part files directory is the same."
if os.path.exists(newPartFilesDir):
# Destination directory must not exist. copytree() will create it.
# Assume the user was prompted and confirmed overwriting the MMP file,
# and thus its part files directory, so remove newPartFilesDir.
#bruce 060704 revision -- it's far too dangerous to do this without explicit permission.
# Best fix would be to integrate this permission with the one for overwriting the main mmp file
# (which may or may not have been given at this point, in the current code --
# it might be that the newPartFilesDir exists even if the new mmp file doesn't).
# For now, if no time for better code for A8, just get permission here. ###@@@
if os.path.isdir(newPartFilesDir):
if "need permission":
# ... confirm overwrite of the existing file. [code copied from another method above]
ret = QMessageBox.warning( self, "Warning!", ###k what is self.name()?
"The Part Files directory for the copied mmp file,\n[" + newPartFilesDir + "], already exists.\n"\
"Do you want to overwrite this directory, or skip copying the Part Files from the old mmp file?\n"\
"(If you skip copying them now, you can rename this directory and copy them using your OS;\n"\
"if you don't rename it, the copied mmp file will use it as its own Part Files directory.)",
"&Overwrite", "&Skip", "",
0, # Enter == button 0
1 ) # Escape == button 1
if ret == 1: # The user wants to skip copying the part files
msg = "Not copying Part Files; preexisting Part Files directory at new name [%s] will be used unless renamed." % newPartFilesDir
env.history.message( orangemsg( msg ) )
return 0, "Nothing copied since user skipped overwriting existing part files directory"
else:
# even this could take a long time; and the user needs to have a record that we're doing it
# (in case they later realize it was a mistake).
msg = "Removing existing part files directory [%s]" % newPartFilesDir
env.history.message( orangemsg( msg ) )
env.history.h_update() # needed, since following op doesn't processEvents and might take a long time
try:
shutil.rmtree(newPartFilesDir)
except Exception, e:
set_waitcursor(False)
return 1, ("Problem removing an existing part files directory [%s]" % newPartFilesDir
+ " - ".join(map(str, e.args)))
# time to start copying; tell the user what's happening
# [in future, ASAP, this needs to be abortable, and maybe have a progress indicator]
###e this ought to have a wait cursor; should grab code from e.g. SurfaceChunks
msg = "Copying part files from [%s] to [%s]" % ( oldPartFilesDir, newPartFilesDir )
env.history.message( msg )
env.history.h_update() # needed
try:
shutil.copytree(oldPartFilesDir, newPartFilesDir)
except Exception, e:
eic.handle_exception() # BUG: Undefined variable eic (fyi, no handle_exception method is defined in NE1)
set_waitcursor(False)
return 1, ("Problem copying files to the new parts file directory " + newPartFilesDir
+ " - ".join(map(str, e.args)))
set_waitcursor(False)
env.history.message( "Done.")
return 0, 'Part files copied from "' + oldPartFilesDir + '" to "' + newPartFilesDir + '"'
def saved_main_file(self, safile, fil): #bruce 050907 split this out of mmp and pdb saving code
"""
Record the fact that self.assy itself is now saved into (the same or a new) main file
(and will continue to be saved into that file, until further notice)
(as opposed to part or all of it being saved into some separate file, with no change in status of main file).
Do necessary changes (filename, window caption, assy.changed status) and updates, but emit no history message.
"""
# (It's probably bad design of pdb save semantics for it to rename the assy filename -- it's more like saving pov, etc.
# This relates to some bug reports. [bruce 050907 comment])
# [btw some of this should be split out into an assy method, or more precisely a savable-object method #e]
self.assy.filename = safile
self.assy.name = fil
self.assy.reset_changed() # The file and the part are now the same.
self.updateRecentFileList(safile)
#bruce 050927 code cleanup: moved updateRecentFileList here (before, it preceded each call of this method)
self.update_mainwindow_caption()
self.mt.mt_update() # since it displays self.assy.name [bruce comment 050907; a guess]
# [note, before this routine was split out, the mt_update happened after the history message printed by our callers]
return
def prepareToCloseAndExit(self): #bruce 070618 revised/renamed #e SHOULD RENAME to not imply side effects other than file save
"""
The user has asked NE1 to close the main window and exit; if any files are modified,
ask the user whether to save them, discard them, or cancel the exit.
If the user wants any files saved, save them. (In the future there might be more than one
open file, and this would take care of them all, even though some but not all might get saved.)
If the user still wants NE1 to exit, return True; otherwise (if user cancels exit at any
time during this, using some dialog's Cancel button), return False.
Perform no exit-related side effects other than possibly saving modified files.
If such are needed, the caller should do them afterwards (see cleanUpBeforeExiting in current code)
or before (not implemented as of 070618 in current code).
"""
if not self.assy.has_changed():
return True
rc = QMessageBox.warning( self, "Warning!",
"The part contains unsaved changes.\n"
"Do you want to save the changes before exiting?",
"&Save", "&Discard", "Cancel",
0, # Enter == button 0
2 ) # Escape == button 2
print "fyi: dialog choice =", ["Save", "Discard", "Cancel"][rc] # leave this in until changes fully tested [bruce 070618]
if rc == 0: # Save (save file and exit)
isFileSaved = self.fileSave()
if isFileSaved:
return True
else:
##Huaicai 1/6/05: While in the "Save File" dialog, if user chooses
## "Cancel", the "Exit" action should be ignored. bug 300
return False
elif rc == 1: # Discard (discard file and exit)
return True
else: # Cancel (cancel exit, and don't save file)
return False
pass
__last_closeEvent_cancel_done_time = 0.0 #bruce 070618 for bug 2444
__exiting = False #bruce 070618 for bug 2444
def closeEvent(self, ce):
"""
Slot method for closing the main window (and exiting NE1), called via
"File > Exit" or clicking the "Close" button on the window title.
@param ce: The close event.
@type ce: U{B{QCloseEvent}<http://doc.trolltech.com/4/qcloseevent.html>}
"""
# Note about bug 2444 and its fix here:
#
# For unknown reasons, Qt can send us two successive closeEvents.
# This is part of the cause of bug 2444 (two successive dialogs asking
# user whether to save changes).
# The two events are not distinguishable in any way we [Bruce & Ninad]
# know of (stacktrace, value of ce.spontaneous()).
# But there is no documented way to be sure they are the same
# (their id is the same, but that doesn't mean much, since it's often
# true even for different events of the same type; QCloseEvent has
# no documented serial number or time; they are evidently different
# PyQt objects, since a Python attribute saved in the first one (by
# debug code tried here) is no longer present in the second one).
#
# But, there is no correct bugfix except to detect whether they're
# the same, because:
# - if the user cancels an exit, then exits again (without doing
# anything in between), they *should* get another save-changes dialog;
# - the cause of getting two events per close is not known, so it
# might go away, so (in trying to handle that case) we can't just
# assume the next close event should be discarded.
#
# So all that's left is guessing whether they're the same, based on
# intervening time. (This means comparing end time of handling one
# event with start time of handling the next one, since getting the
# cancel from the user can take an arbitrarily long time.)
# (Of course if the user doesn't cancel, so we're really exiting,
# then we know they have to be the same.)
#
# But even once we detect the duplicate, we have to handle it
# differently depending on whether we're exiting.
# (Note: during development, a bug caused us to call neither
# ce.accept() nor ce.ignore() on the 2nd event, which in some cases
# aborted the app with "Modules/gcmodule.c:231: failed assertion
# `gc->gc.gc_refs != 0'".)
now = time.time()
## print "self.__exiting =", self.__exiting, ", now =", now, ", last done time =", self.__last_closeEvent_cancel_done_time
if self.__exiting or (now - self.__last_closeEvent_cancel_done_time <= 0.5):
# (I set the threshhold at 0.5 since the measured time difference was up to 0.12 during tests.)
# Assume this is a second copy of the same event (see long comment above).
# To fix bug 2444, don't do the same side effects for this event,
# but accept or ignore it the same as for the first one (based on self.__exiting).
duplicate = True
shouldExit = self.__exiting # from prior event
print "fyi: ignoring duplicate closeEvent (exiting = %r)" % shouldExit
# leave this print statement in place until changes fully tested [bruce 070618]
else:
# normal case
duplicate = False
shouldExit = self.prepareToCloseAndExit() # puts up dialog if file might need saving
if shouldExit:
self.__exiting = True
if not duplicate:
print "exiting" # leave this in until changes fully tested [bruce 070618]
self.cleanUpBeforeExiting()
#Not doing the following in 'cleanupBeforeExiting?
#as it is not a 'clean up'. Adding it below for now --ninad 20070702
#Note: saveState() is QMainWindow.saveState(). It saves the
#current state of this mainwindow's toolbars and dockwidgets
#The 'objectName' property is used to identify each QToolBar
#and QDockWidget.
#QByteArray QMainWindow::saveState ( int version = 0 ) const
toolbarState_QByteArray = self.saveState()
env.prefs[toolbar_state_prefs_key] = str(toolbarState_QByteArray)
ce.accept()
else:
ce.ignore()
if not duplicate:
env.history.message("Cancelled exit.")
self.__last_closeEvent_cancel_done_time = time.time() # note: not the same value as the time.time() call above
## print "done time =",self.__last_closeEvent_cancel_done_time
return
def fileClose(self):
"""
Slot method for 'File > Close'.
"""
env.history.message(greenmsg("Close File:"))
isFileSaved = True
warn_about_abandoned_changes = True # see similar code in fileOpen
if self.assy.has_changed():
ret = QMessageBox.warning( self, "Warning!" ,
"The model contains unsaved changes.\n"
"Do you want to save the changes before closing\n"\
"this model and beginning a new (empty) model?",
"&Save", "&Discard", "Cancel",
0, # Enter == button 0
2 ) # Escape == button 2
if ret == 0:
# Save clicked or Alt+S pressed or Enter pressed
isFileSaved = self.fileSave()
elif ret == 1:
# Discard
env.history.message("Changes discarded.")
warn_about_abandoned_changes = False
elif ret == 2:
# Cancel clicked or Alt+C pressed or Escape pressed
env.history.message("Cancelled.")
return
else:
assert 0 #bruce 080909
if isFileSaved:
self._make_and_init_assy('$STARTUP_MODE',
warn_about_abandoned_changes = warn_about_abandoned_changes )
self.assy.reset_changed() #bruce 050429, part of fixing bug 413
self.assy.clear_undo_stack() #bruce 060126, maybe not needed, or might fix an unreported bug related to 1398
self.win_update()
return
def fileSetWorkingDirectory(self):
"""
Slot for 'File > Set Working Directory', which prompts the user to
select a new NE1 working directory via a directory chooser dialog.
@deprecated: The 'Set Working Directory' menu item that calls this slot
has been removed from the File menu as of Alpha 9. Mark 2007-06-18.
"""
env.history.message(greenmsg("Set Working Directory:"))
workdir = env.prefs[workingDirectory_prefs_key]
wdstr = "Current Working Directory - [" + workdir + "]"
workdir = QFileDialog.getExistingDirectory( self, wdstr, workdir )
if not workdir:
env.history.message("Cancelled.")
return
self.setCurrentWorkingDirectory(workdir)
def setCurrentWorkingDirectory(self, dir = None): # Mark 060729.
"""
Set the current working directory (CWD).
@param dir: The working directory. If I{dir} is None (the default), the
CWD is set to the directory of the current assy filename
(i.e. the directory of the current part). If there is no
current assy filename, the CWD is set to the default
working directory.
@type dir: string
@see: L{getDefaultWorkingDirectory()}
"""
if not dir:
dir, fil = os.path.split(self.assy.filename)
if os.path.isdir(dir):
self.currentWorkingDirectory = dir
self._setWorkingDirectoryInPrefsDB(dir)
else:
self.currentWorkingDirectory = getDefaultWorkingDirectory()
#print "setCurrentWorkingDirectory(): dir=",dir
def _setWorkingDirectoryInPrefsDB(self, workdir = None):
"""
[private method]
Set the working directory in the user preferences database.
@param workdir: The fullpath directory to write to the user pref db.
If I{workdir} is None (default), there is no change.
@type workdir: string
"""
if not workdir:
return
workdir = str(workdir)
if os.path.isdir(workdir):
workdir = os.path.normpath(workdir)
env.prefs[workingDirectory_prefs_key] = workdir # Change pref in prefs db.
else:
msg = "[" + workdir + "] is not a directory. Working directory was not changed."
env.history.message( redmsg(msg))
return
def _make_and_init_assy(self,
initial_mode_symbol = None,
warn_about_abandoned_changes = True ):
"""
[private; as of 080812, called only from fileOpen and fileClose]
Close current self.assy, make a new assy and reinit commandsequencer
for it (in glpane.setAssy), tell new assy about our model tree and
glpane (and vice versa), update mainwindow caption.
@param initial_mode_symbol: if provided, initialize the command
sequencer to that mode; otherwise,
to nullMode. All current calls provide
this as a "symbolic mode name".
@param warn_about_abandoned_changes: passed to exit_all_commands method in
self.assy.commandSequencer; see that
method in class CommandSequencer for
documentation
@type warn_about_abandoned_changes: boolean
@note: MWsemantics.__init__ doesn't call this, but contains similar
code, not all in one place. It's not clear whether it could
be made to call this.
@note: certain things are done shortly after this call by all callers,
and by the similar MWsemantics.__init__ code, but since various
things intervene it's not clear whether they could be pulled
into a single method. These include assy.clear_undo_stack.
"""
#bruce 080812 renamed this from __clear (which is very old).
# REVIEW: should all or part of this method be moved back into
# class MWsemantics (which mixes it in)?
if self.assy:
cseq = self.assy.commandSequencer
cseq.exit_all_commands( warn_about_abandoned_changes = \
warn_about_abandoned_changes )
#bruce 080909 new features:
# 1. exit all commands here, rather than (or in addition to)
# when initing new assy.
# 2. but tell that not to warn about abandoning changes
# stored in commands, if user already said to discard changes
# stored in assy (according to caller passing False for warn_about_abandoned_changes).
# This should fix an old bug in which redundant warnings
# would be given if both kinds of changes existed.
self.assy.close_assy() # bruce 080314
self.assy = self._make_a_main_assy()
self.update_mainwindow_caption()
self.glpane.setAssy(self.assy)
# notes: this calls assy.set_glpane, and _reinit_modes
# (which leaves currentCommand as nullmode)
# (even after USE_COMMAND_STACK).
### TODO: move _reinit_modes out of that, do it somewhere else.
self.assy.set_modelTree(self.mt)
self.mt.mt_update() # not sure if needed
if initial_mode_symbol:
#bruce 080812 pulled this code in from just after both calls
self.commandSequencer.start_using_initial_mode( initial_mode_symbol)
return
def openRecentFile(self, idx):
"""
Slot method for the "Open Recent File" menu,
a submenu of the "File" menu.
"""
text = str_or_unicode(idx.text())
selectedFile = text[text.index(" ") + 2:]
# Warning: Potential bug if number of recent files >= 10
# (i.e. LIST_CAPACITY >= 10)
self.fileOpen(selectedFile)
return
pass # end of class fileSlotsMixin
# ==
## Test code -- By cleaning the recent files list of QSettings
if __name__ == '__main__':
prefs = QSettings()
from utilities.constants import RECENTFILES_QSETTINGS_KEY
emptyList = QStringList()
prefs.writeEntry(RECENTFILES_QSETTINGS_KEY, emptyList)
# todo: make a user-accessible way to erase the recent files list.
# [bruce 080727 suggestion]
del prefs
# end
| NanoCAD-master | cad/src/operations/ops_files.py |
# Copyright 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
update_select_mode.py - change currentCommand or assy.selwhat or selection
to make them consistent
@author: Bruce
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
Note: this can still have an effect, but ought to be generalized
and refactored. It is accessed as a method on the main window,
but is in its own file since it probably doesn't belong there
after refactoring and since that module is too long.
History:
bruce 050124 wrote this [as a method of the model tree];
should generalize and refile;
should be used for more or for all events
bruce 060403 revised this but didn't update docstring [of method in modeltree];
now it can change from *Chunk modes to Build, only, I think
bruce 081216 moved this out of class ModelTree (where it made no sense)
into its own file, to be called via a same-named method on MWsemantics
"""
from utilities.GlobalPreferences import permit_atom_chunk_coselection
from utilities.constants import SELWHAT_ATOMS
from utilities.constants import SELWHAT_CHUNKS
from utilities.constants import SELWHAT_NAMES
from utilities import debug_flags
def update_select_mode(win):
"""
Warning: this docstring is partly obsolete.
This should be called at the end of event handlers which might have
changed the current internal selection mode (atoms vs chunks),
to resolve disagreements between that and the visible selection mode
iff it's one of the Select modes [or more generally, i assume as of 060403,
if the current mode wants to be ditched if selwhat has to have certain values it dislikes].
If the current mode is not one of Select Atoms or Select Chunks (or a subclass),
this routine has no effect.
(In particular, if selwhat changed but could be changed back to what it was,
it does nothing to correct that [obs? see end of docstring], and indeed it doesn't know the old value of
selwhat unless the current mode (being a selectMode) implies that.)
[We should generalize this so that other modes could constrain the selection
mode to just one of atoms vs chunks if they wanted to. However, the details of this
need design, since for those modes we'd change the selection whereas for the
select modes we change which mode we're in and don't change the selection. ###@@@]
If possible, we leave the visible mode the same (even changing assy.selwhat
to fit, if nothing is actually selected [that part was NIM until 050519]).
But if forced to, by what is currently selected, then we change the visible
selection mode to fit what is actually selected. (We always assert that selwhat
permitted whatever was selected to be selected.)
"""
if permit_atom_chunk_coselection(): #bruce 060721, experimental
return
from commands.SelectChunks.SelectChunks_Command import SelectChunks_Command # todo: move to toplevel
#bruce 050519 revised docstring and totally rewrote code.
assy = win.assy
commandSequencer = win.commandSequencer #bruce 071008
mode = commandSequencer.currentCommand
#bruce 071008; note, I'm not sure it's right to ask the currentCommand
# for selwhat_from_mode, as opposed to the current graphicsMode!
# This whole thing needs total revamping (along with everything
# related to what can be selected at a given time), so I'm not going
# to worry about it for now.
part = assy.part
# 0. Appraise the situation.
# 0a: assy.selwhat is what internal code thinks selection restriction is, currently.
selwhat = assy.selwhat
assert selwhat in (SELWHAT_CHUNKS, SELWHAT_ATOMS) # any more choices, or change in rules, requires rewriting this method
# 0b. What does current mode think it needs to be?
# (Someday we might distinguish modes that constrain this,
# vs modes that change to fit it or to fit the actual selection.
# For now we only handle modes that change to fit the actual selection.)
selwhat_from_mode = None # most modes don't care
if isinstance( mode, SelectChunks_Command):
# TODO: replace this by a method call or getattr on mode
selwhat_from_mode = SELWHAT_CHUNKS
#bruce 060403 commenting out the following, in advance of proposed removal of Select Atoms mode entirely:
## elif isinstance( mode, SelectAtoms_Command) and mode.commandName == SelectAtoms_Command.commandName:
## #bruce 060210 added commandName condition to fix bug when current mode is Build (now a subclass of Select Atoms)
## selwhat_from_mode = SELWHAT_ATOMS
change_mode_to_fit = (selwhat_from_mode is not None) # used later; someday some modes won't follow this
# 0c. What does current selection itself think it needs to be?
# (If its desires are inconsistent, complain and fix them.)
if assy.selatoms and assy.selmols:
if debug_flags.atom_debug:
#bruce 060210 made this debug-only, since what it reports is not too bad, and it happens routinely now in Build mode
# if atoms are selected and you then select a chunk in MT
print "atom_debug: bug, fyi: there are both atoms and chunks selected. Deselecting some of them to fit current mode or internal code."
new_selwhat_influences = ( selwhat_from_mode, selwhat) # old mode has first say in this case, if it wants it
#e (We could rewrite this (equivalently) to just use the other case with selwhat_from_sel = None.)
else:
# figure out what to do, in this priority order: actual selection, old mode, internal code.
if assy.selatoms:
selwhat_from_sel = SELWHAT_ATOMS
elif assy.selmols:
selwhat_from_sel = SELWHAT_CHUNKS
else:
selwhat_from_sel = None
new_selwhat_influences = ( selwhat_from_sel, selwhat_from_mode, selwhat)
if selwhat_from_sel is not None and selwhat_from_sel != selwhat:
# following code will fix this with no harm, so let's not consider it a big deal,
# but it does indicate a bug -- so just print a debug-only message.
# (As of 050519 740pm, we get this from the jig cmenu command "select this jig's atoms"
# when the current mode is more compatible with selecting chunks. But I think this causes
# no harm, so I might as well wait until we further revise selection code to fix it.)
if debug_flags.atom_debug:
print "atom_debug: bug, fyi: actual selection (%s) inconsistent " \
"with internal variable for that (%s); will fix internal variable" % \
(SELWHAT_NAMES[selwhat_from_sel], SELWHAT_NAMES[selwhat])
# Let the strongest (first listed) influence, of those with an opinion,
# decide what selmode we'll be in now, and make everything consistent with that.
for opinion in new_selwhat_influences:
if opinion is not None:
# We have our decision. Carry it out (on mode, selection, and assy.selwhat) and return.
selwhat = opinion
if change_mode_to_fit and selwhat_from_mode != selwhat:
#bruce 050520 fix bug 644 by only doing this if needed (i.e. if selwhat_from_mode != selwhat).
# Without this fix, redundantly changing the mode using these tool buttons
# immediately cancels (or completes?) any node-renaming-by-dblclick
# right after it gets initiated (almost too fast to see).
if selwhat == SELWHAT_CHUNKS:
win.toolsSelectMolecules()
print "fyi: forced mode to Select Chunks" # should no longer ever happen as of 060403
elif selwhat == SELWHAT_ATOMS:
win.toolsBuildAtoms() #bruce 060403 change: toolsSelectAtoms -> toolsBuildAtoms
## win.toolsSelectAtoms() #bruce 050504 making use of this case for the first time; seems to work
# that might have fixed the following too, but never mind, we'll just always do it -- sometimes it's needed.
if selwhat == SELWHAT_CHUNKS:
part.unpickatoms()
assy.set_selwhat(SELWHAT_CHUNKS)
elif selwhat == SELWHAT_ATOMS:
if assy.selmols: # only if needed (due to a bug), since this also desels Groups and Jigs
# (never happens if no bug, since then the actual selection has the strongest say -- as of 050519 anyway)
part.unpickparts()
assy.set_selwhat(SELWHAT_ATOMS) # (this by itself does not deselect anything, as of 050519)
return
assert 0, "new_selwhat_influences should not have ended in None: %r" % (new_selwhat_influences,)
# scratch comments:
# if we had been fixing selwhat in the past, it would have fixed bug 500 in spite of permit_pick_parts in cm_hide/cm_unhide.
# So why aren't we? let's find out with some debug code... (now part of the above, in theory)
return
# end
| NanoCAD-master | cad/src/operations/update_select_mode.py |
# Copyright 2006-2008 Nanorex, Inc. See LICENSE file for details.
"""
reposition_baggage.py -- help reposition baggage atoms after real neighbor
atoms have moved
@author: Bruce
@version: $Id$
@copyright: 2006-2008 Nanorex, Inc. See LICENSE file for details.
"""
import math
from utilities import debug_flags
from geometry.VQT import V, norm, cross, vlen
from model.bond_constants import find_bond
from model.bond_constants import V_SINGLE
from geometry.geometryUtilities import arbitrary_perpendicular
debugprints = False
# geometry of a tetrahedron (for finding sp3 bonding positions)
coef1, coef2 = norm( V( 1, math.sqrt(2) ) )
coef1 = - coef1
def reposition_baggage_0(self, baggage = None, planned_atom_nupos = None):
"""
Your baggage atoms (or the given subset of them) might no longer
be sensibly located, since you and/or some neighbor atoms have moved
(or are about to move, re planned_atom_nupos as explained below),
so fix up their positions based on your other neighbors' positions,
using old baggage positions only as hints.
BUT one of your other neighbors (but not self) might be about to move
(rather than already having moved) -- if so,
planned_atom_nupos = (that neighbor, its planned posn),
and use that posn instead of its actual posn to decide what to do.
@warning: we assume baggage is a subset of self.baggageNeighbors(),
but we don't check this except when ATOM_DEBUG is set.
"""
#bruce 060629 for bondpoint problem -- second guess what caller did so far
if baggage is None:
baggage, other = self.baggage_and_other_neighbors()
else:
other = -1 # set later if needed
if debug_flags.atom_debug:
# assert baggage is a subset of self.baggageNeighbors()
_bn = map(id, self.baggageNeighbors())
for atom in baggage:
assert id(atom) in _bn
atom = 0
del _bn, atom
_reposition_baggage_1(self, baggage, other, planned_atom_nupos)
if self.element.pam and self.element.role in ('axis', 'strand'):
# Let the dna updater 3rd-guess this when it has a DnaLadder and can
# do a better job. REVIEW: are the things already done in _reposition_baggage_1
# good or bad in this case? At least in most cases, they seem good (by testing).
# [bruce 080404/080405 new feature / bugfix]
self._f_dna_updater_should_reposition_baggage = True
# do it immediately if possible
ladder = getattr(self.molecule, 'ladder', None) ## self.molecule.ladder
if ladder and ladder.valid and not ladder.error:
# usual case, at least when dragging a real neighbor of self;
# in these cases the dna updater is never running;
# by test it's clear this is doing more good than harm, usually,
# as of the late 080405 improvements to this method
self.reposition_baggage_using_DnaLadder()
else:
# don't know if this ever happens
# (it does happen for ghost bases created within ordinary chunks)
self._changed_structure() # make sure dna updater runs on self
# (probably NOT redundant with other changes by caller)
pass
return
def _reposition_baggage_1(self, baggage, other, planned_atom_nupos):
"""
"""
# trivial cases
len_baggage = len(baggage)
if not len_baggage:
return
# cases handled well enough by calling code (as of 060629),
# needing no correction here
len_other = len(self.bonds) - len_baggage
if not len_other:
# should never happen, as we are called as of 060629, i think,
# though if it did, there would be things we could do in theory,
# like rotate atomtype.bondvecs to best match baggage...
print "bug?: %r.reposition_baggage(%r) finds no other atoms -- " \
"nim, not thought to ever happen" % (self, baggage)
return
if len_other == 1:
# the Q(old, new) code in the callers ought to handle it properly --
# UNLESS other is a pi_bond, and its alignment ought to affect a pair
# of baggage atoms.
if self.atomtype.spX == 2: # note: true for sp2 and sp2(graphitic)
pass # let the main code run, including some other
# "if len_other == 1" scattered around
##e someday: don't we want to also notice sp, and propogate
# twisting of a pi_bond through an sp_chain?
# I recall some code in depositMode for that...
# I can't remember its scope, thus whether it covers this already...
# I think not. ###@@@
else:
return
# at least 2 other (except sp2 pi_bond other), and at least one baggage...
# might as well get other_posns we'll want to use
# (handling planned_atom_nupos once and for all).
if other == -1:
other = []
baggage_keys = [atom.key for atom in baggage]
for b in self.bonds:
atom = b.other(self)
if atom.key not in baggage_keys:
other.append(atom)
if len(other) != len_other:
# must mean baggage is not a subset of neighbors
args = (self, baggage, planned_atom_nupos)
print "bug in reposition_baggage%r: len(other == %r) != len_other %r" % \
(args, other, len_other)
return
if len_other == 1:
other0_bond = find_bond(other[0], self)
if other0_bond.v6 == V_SINGLE:
# calling code handled this case well enough
return
planned_atom, nupos = None, None
if planned_atom_nupos:
planned_atom, nupos = planned_atom_nupos
if planned_atom not in other:
print "likely bug in reposition_baggage: " \
"planned_atom not in other", planned_atom, other
other_posns = [(atom.posn(), nupos)[atom is planned_atom] for atom in other]
#e we might later wish we'd kept a list of the bonds to baggage and
# other, to grab the v6's -- make a dict from atom.key above?
selfposn = self.posn()
othervecs = [norm(pos - selfposn) for pos in other_posns]
bag_posns = [atom.posn() for atom in baggage]
bagvecs = [norm(pos - selfposn) for pos in bag_posns]
# The alg is specific to atomtype, and number and sometimes *type* of all
# bonds involved. We'll code the most important and/or easiest cases first.
# Someday we might move them into methods of the atomtypes themselves.
algchoice = (self.atomtype.spX, len_baggage, len_other)
# len_baggage >= 1, len_other >= 2 (except pi_bond case)
extra = 0 # might be altered below
if algchoice == (3, 2, 2) or algchoice == (3, 1, 2):
# (3, 2, 2) -- e.g. C(sp3) with 2 bp's and 2 real bonds
# This is not the easiest case, but it's arguably the most important.
# For alignment purposes we can assume bonds are single.
# (Due to monovalent atoms being baggage, that doesn't mean the baggage
# atoms are equivalent to each other.)
#
# (3, 1, 2) -- e.g. N(sp3) with 1 bondpoint and 2 real bonds;
# use same code and coefs, but pretend a phantom baggage atom is present
if len_baggage == 1: # (3,1,2)
extra = 1
if debugprints:
print "debug repos baggage: sp3,1,2"
plane = cross( othervecs[0], othervecs[1] )
if vlen(plane) < 0.001:
# othervecs are nearly parallel (same or opposite);
# could force existing bonds perp to them, at correct angle,
# as close to existing posns as you can, but this case can be left
# nim for now since it's rare and probably transient.
if debugprints:
print "debug repos baggage: othervecs are nearly parallel; " \
"this case is nim", self, other ###@@@
return
plane = norm(plane)
back = norm(othervecs[0] + othervecs[1])
res = [coef1 * back + coef2 * plane, coef1 * back - coef2 * plane]
pass # fall thru to assigning res vecs to specific baggage
elif algchoice == (3, 1, 3):
back = norm(othervecs[0] + othervecs[1] + othervecs[2])
if back:
res = [-back]
##e might not be as good as averaging the three crossproducts,
# after choosing their sign close to -back; or something better,
# since real goal is just "be repelled from them all";
# consider case where two othervecs are similar ###@@@
else:
plane0 = norm( cross( othervecs[0], othervecs[1] ))
if plane0:
if debugprints:
print "debug repos baggage: sp3 with 3 real bonds in a plane"
# pick closest of plane0, -plane0 to existing posn
## # one way:
## if dot(plane0, bagvecs[0]) < 0:
## res = [-plane0]
## else:
## res = [plane0]
# another way:
res = [-plane0, plane0]
extra = 1
else:
# not possible -- if othervecs[0], othervecs[1] are antiparallel,
# overall sum (in back) can't be zero; if parallel, ditto.
print "can't happen: back and plane0 vanish", othervecs
return
pass
pass
elif algchoice == (2, 1, 2): # e.g. C(sp2) with 1 bondpoint and 2 real bonds
back = norm(othervecs[0] + othervecs[1])
if back:
res = [-back] # tested
else:
# real bonds are antiparallel; find closest point on equator to
# existing posn, or arb point on equator
p0 = cross( bagvecs[0], othervecs[0] )
if debugprints:
print "debug repos baggage: antiparallel sp2 1 2 case, " \
"not p0 == %r" % (not p0) # untested so far
if not p0:
# bagvec is parallel too
res = [arbitrary_perpendicular(othervecs[0])]
else:
# choose closest perpendicular to existing direction
res0 = - norm( cross(p0, othervecs[0]) )
#k this ought to be positive of, but might be (buggily)
# negative of, desired value -- need to test this ###@@@
# but being too lazy to test it, just make it work either way:
res = [res0, -res0]
extra = 1
pass
pass
elif algchoice == (2, 2, 1):
# This only matters for twisting a pi_bond, and we verified above that
# we have >single bond. A difficulty: how can we update the geometry,
# not knowing whether the caller moved all the source atoms yet,
# and with the bond code not knowing which direction along the bond
# effects are propogating?
# BTW, I guess that when you drag singlets, depositMode implems this
# (along sp_chains too), but when you move chain atoms (let alone
# their neighbors), I just don't recall.
if debugprints:
print "debug repos baggage: sp2 with twisting pi_bond is nim", self ###@@@
return
else:
#bruce 080515 bugfix: fallback case
# (otherwise following code has UnboundLocalError for 'res')
print "bug?: reposition_baggage (for %r) can't yet handle this algchoice:" % self, algchoice
return
# now work out the best assignment of posns in res to baggage; reorder res
# to fit bags_ordered
assert len(res) == len_baggage + extra
bags_ordered = baggage # in case len(res) == 1
if len(res) > 1:
dists = []
for atom_junk, vec, i in zip(baggage, bagvecs, range(len_baggage)):
for pos in res:
dists.append(( vlen(pos - vec), i, pos ))
dists.sort()
res0 = res
res = []
bags_ordered = []
bagind_matched = [0 for bag in baggage]
for dist, bagind, pos in dists:
# assume not yet done matching, and don't yet know if bagind or pos
# are still in the running;
# when a bag matches, set bagind_matched[bagind];
# when a pos matches, remove it from res0.
if bagind_matched[bagind] or pos not in res0:
continue
# found a match
res0.remove(pos)
bagind_matched[bagind] = 1
res.append(pos)
bags_ordered.append(baggage[bagind])
if len(bags_ordered) >= len_baggage:
break
assert len(bags_ordered) == len_baggage, \
"somehow failed to match up some baggage at all, should never happen"
assert len_baggage == len(res) # whether or not extra > 0
# now move the atoms, preserving distance from self
# (assume calling code got that part right)
for atom, vec in zip(bags_ordered, res):
dist = vlen( selfposn - atom.posn() )
if abs(1.0 - vlen(vec)) > 0.00001:
print "bug in reposition_baggage: vec not len 1:", vec
atom.setposn( selfposn + norm(vec) * dist )
# norm(vec) is hoped to slightly reduce accumulated
# numerical errors...
###e ideally we'd correct the bond lengths too, but as of 060630,
# even Build doesn't get them right (and it can't, unless bond tools
# would also change them when at most one real atom would need
# moving, which I suppose they ought to...)
if debugprints and 0:
print "done"
return # from _reposition_baggage_1
# end... tested: sp3 2 2, sp2 1 2
# not tested: the others, the extreme cases
# (need try/except in practice since it's hard to test them all;
# put it in calling code?)
| NanoCAD-master | cad/src/operations/reposition_baggage.py |
# Copyright 2004-2009 Nanorex, Inc. See LICENSE file for details.
"""
ops_atoms.py -- operations on the atoms and/or bonds inside a Part.
These operations generally create or destroy atoms, bondpoints, or real bonds.
Operations specific to single modes (Build, Crystal, Extrude) are not included here.
@version: $Id$
@copyright: 2004-2009 Nanorex, Inc. See LICENSE file for details.
History:
bruce 050507 made this by collecting appropriate methods (by various authors)
from existing modules, from class Part and class basicMode.
"""
from utilities.Log import greenmsg, redmsg
from utilities.constants import SELWHAT_CHUNKS, SELWHAT_ATOMS
from utilities.constants import gensym
from platform_dependent.PlatformDependent import fix_plurals
from model.elements import Singlet
from model.chem import Atom
import foundation.env as env
class ops_atoms_Mixin:
"""
Mixin class for providing these methods to class Part
"""
def make_Atom_and_bondpoints(self,
elem,
pos,
atomtype = None,
Chunk_class = None ):
"""
Create one unbonded atom, of element elem
and (if supplied) the given atomtype
(otherwise the default atomtype for elem),
at position pos, in its own new chunk,
with enough bondpoints to have no valence error.
@param Chunk_class: constructor for the returned atom's new chunk
(self.assy.Chunk by default)
@return: one newly created Atom object, already placed into a new
chunk which has been added to the model using addnode
"""
#bruce 041215 moved this from chunk.py to chem.py, and split part of it
# into the new atom method make_bondpoints_when_no_bonds, to help fix bug 131.
#bruce 050510 added atomtype option
#bruce 080520 added Chunk_class option
#bruce 090112 renamed oneUnbonded function and turned it into this method
assy = self.assy
if Chunk_class is None:
Chunk_class = assy.Chunk
chunk = Chunk_class(assy, 'bug') # name is reset below!
atom = Atom(elem.symbol, pos, chunk)
# bruce 041124 revised name of new chunk, was gensym('Chunk.');
# no need for gensym since atom key makes the name unique, e.g. C1.
atom.set_atomtype_but_dont_revise_singlets(atomtype)
# ok to pass None, type name, or type object; this verifies no change in elem
# note, atomtype might well already be the value we're setting;
# if it is, this should do nothing
## chunk.name = "Chunk-%s" % str(atom)
chunk.name = gensym("Chunk", assy) #bruce 080407 per Mark NFR desire
atom.make_bondpoints_when_no_bonds() # notices atomtype
assy.addnode(chunk) # REVIEW: same as self.addnode?
return atom
def modifyTransmute(self, elem, force = False, atomType = None):
"""
This method was originally a method of class mode and selectMode.
Transmute selected atoms into <elem> and with an optional <atomType>.
<elem> is an element number that selected atoms will be transmuted to.
<force>: boolean variable meaning keeping existing bond or not.
<atomType>: the optional hybrid bond type if the element support hybrid. --Huaicai[9/1/05]
"""
# now change selected atoms to the specified element
# [bruce 041215: this should probably be made available for any modes
# in which "selected atoms" are permitted, not just Select modes. #e]
from model.elements import PeriodicTable
if self.selatoms:
dstElem = PeriodicTable.getElement(elem)
for atm in self.selatoms.values():
atm.Transmute(dstElem, force = force, atomtype=atomType)
# bruce 041215 fix bug 131 by replacing low-level mvElement call
# with new higher-level method Transmute. Note that singlets
# can't be selected, so the fact that Transmute does nothing to
# them is not (presently) relevant.
#e status message?
# (Presently a.Transmute makes one per "error or refusal".)
self.o.gl_update()
if self.selmols: #bruce 060720 elif -> if, in case both atoms and chunks can be selected someday
dstElem = PeriodicTable.getElement(elem) #bruce 060720 fix typo dstElm -> dstElem to fix bug 2149
# but we have to decide if we want the behavior this now gives us, of transmuting inside selected chunks.
for mol in self.selmols[:]:
for atm in mol.atoms.values():
atm.Transmute(dstElem, force = force, atomtype=atomType)
# this might run on some killed singlets; should be ok
self.o.gl_update()
return
def modifyDeleteBonds(self):
"""
Delete all bonds between selected and unselected atoms or chunks
"""
cmd = greenmsg("Delete Bonds: ")
if not self.selatoms and not self.selmols: # optimization, and different status msg
msg = redmsg("Nothing selected")
env.history.message(cmd + msg)
return
cutbonds = 0
# Delete bonds between selected atoms and their neighboring atoms that are not selected.
for a in self.selatoms.values():
for b in a.bonds[:]:
neighbor = b.other(a)
if neighbor.element != Singlet:
if not neighbor.picked:
b.bust()
a.pick() # Probably not needed, but just in case...
cutbonds += 1
# Delete bonds between selected chunks and chunks that are not selected.
for mol in self.selmols[:]:
# "externs" contains a list of bonds between this chunk and a different chunk
for b in mol.externs[:]:
# atom1 and atom2 are the connect atoms in the bond
if int(b.atom1.molecule.picked) + int(b.atom2.molecule.picked) == 1:
b.bust()
cutbonds += 1
msg = fix_plurals("%d bond(s) deleted" % cutbonds)
env.history.message(cmd + msg)
if self.selatoms and cutbonds:
self.modifySeparate() # Separate the selected atoms into a new chunk
else:
self.w.win_update() #e do this in callers instead?
return
# change surface atom types to eliminate dangling bonds
# a kludgey hack
# bruce 041215 added some comments.
def modifyPassivate(self):
cmd = greenmsg("Passivate: ")
if not self.selatoms and not self.selmols: # optimization, and different status msg
msg = redmsg("Nothing selected")
env.history.message(cmd + msg)
return
if self.selwhat == SELWHAT_CHUNKS:
for m in self.selmols:
m.Passivate(True) # arg True makes it work on all atoms in m
else:
assert self.selwhat == SELWHAT_ATOMS
for m in self.molecules:
m.Passivate() # lack of arg makes it work on only selected atoms
# (maybe it could just iterate over selatoms... #e)
# bruce 050511: remove self.changed (since done as needed in atom.Passivate) to fix bug 376
## self.changed() # could be much smarter
self.o.gl_update()
# add hydrogen atoms to each dangling bond
# Changed this method to mirror what modifyDehydrogenate does.
# It is more informative about the number of chunks modified, etc.
# Mark 050124
def modifyHydrogenate(self):
"""
Add hydrogen atoms to bondpoints on selected chunks/atoms.
"""
cmd = greenmsg("Hydrogenate: ")
fixmols = {} # helps count modified mols for statusbar
if self.selmols:
counta = countm = 0
for m in self.selmols:
changed = m.Hydrogenate()
if changed:
counta += changed
countm += 1
fixmols[id(m)] = m
if counta:
didwhat = "Added %d atom(s) to %d chunk(s)" \
% (counta, countm)
if len(self.selmols) > countm:
didwhat += \
" (%d selected chunk(s) had no bondpoints)" \
% (len(self.selmols) - countm)
didwhat = fix_plurals(didwhat)
else:
didwhat = "Selected chunks contain no bondpoints"
elif self.selatoms:
count = 0
for a in self.selatoms.values():
ma = a.molecule
for atm in a.neighbors():
matm = atm.molecule
changed = atm.Hydrogenate()
if changed:
count += 1
fixmols[id(ma)] = ma
fixmols[id(matm)] = matm
if fixmols:
didwhat = \
"Added %d atom(s) to %d chunk(s)" \
% (count, len(fixmols))
didwhat = fix_plurals(didwhat)
# Technically, we *should* say ", affected" instead of "from"
# since the count includes mols of neighbors of
# atoms we removed, not always only mols of atoms we removed.
# Since that's rare, we word this assuming it didn't happen.
# [#e needs low-pri fix to be accurate in that rare case;
# might as well deliver that as a warning, since that case is
# also "dangerous" in some sense.]
else:
didwhat = "No bondpoints on selected atoms"
else:
didwhat = redmsg("Nothing selected")
if fixmols:
self.changed()
self.w.win_update()
env.history.message(cmd + didwhat)
return
# Remove hydrogen atoms from each selected atom/chunk
# (coded by Mark ~10/18/04; bugfixed/optimized/msgd by Bruce same day,
# and cleaned up (and perhaps further bugfixed) after shakedown changes
# on 041118.)
def modifyDehydrogenate(self):
"""
Remove hydrogen atoms from selected chunks/atoms.
"""
cmd = greenmsg("Dehydrogenate: ")
fixmols = {} # helps count modified mols for statusbar
if self.selmols:
counta = countm = 0
for m in self.selmols:
changed = m.Dehydrogenate()
if changed:
counta += changed
countm += 1
fixmols[id(m)] = m
if counta:
didwhat = "Removed %d atom(s) from %d chunk(s)" \
% (counta, countm)
if len(self.selmols) > countm:
didwhat += \
" (%d selected chunk(s) had no hydrogens)" \
% (len(self.selmols) - countm)
didwhat = fix_plurals(didwhat)
else:
didwhat = "Selected chunks contain no hydrogens"
elif self.selatoms:
count = 0
for a in self.selatoms.values():
ma = a.molecule
for atm in list(a.neighbors()) + [a]:
#bruce 041018 semantic change: added [a] as well
matm = atm.molecule
changed = atm.Dehydrogenate()
if changed:
count += 1
fixmols[id(ma)] = ma
fixmols[id(matm)] = matm
if fixmols:
didwhat = \
"Removed %d atom(s) from %d chunk(s)" \
% (count, len(fixmols))
didwhat = fix_plurals(didwhat)
# Technically, we *should* say ", affected" instead of "from"
# since the count includes mols of neighbors of
# atoms we removed, not always only mols of atoms we removed.
# Since that's rare, we word this assuming it didn't happen.
# [#e needs low-pri fix to be accurate in that rare case;
# might as well deliver that as a warning, since that case is
# also "dangerous" in some sense.]
else:
didwhat = "No hydrogens bonded to selected atoms"
else:
didwhat = redmsg("Nothing selected")
if fixmols:
self.changed() #e shouldn't we do this in lower-level methods?
self.w.win_update()
env.history.message(cmd + didwhat)
return
pass # end of class ops_atoms_Mixin
# end
| NanoCAD-master | cad/src/operations/ops_atoms.py |
# Copyright 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
ops_copy.py -- general cut/copy/delete operations on selections
containing all kinds of model tree nodes.
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
History:
bruce 050507 made this by collecting appropriate methods from class Part.
bruce extended it at various later times.
"""
from utilities import debug_flags
import foundation.env as env
from utilities.Comparison import same_vals
from utilities.debug import print_compact_stack
from utilities.Log import greenmsg, redmsg, orangemsg
from platform_dependent.PlatformDependent import fix_plurals
from foundation.Group import Group
from model.chunk import Chunk
from model.chunk import mol_copy_name
from model.chem import Atom_prekill_prep
from operations.ops_select import Selection
from model.bonds import bond_copied_atoms
from utilities.constants import gensym
from operations.ops_select import selection_from_part
from utilities.constants import noop
from geometry.VQT import V
from geometry.BoundingBox import BBox
from model.jigs import Jig
#General page prefs - paste offset scale for chunk and dna pasting prefs key
from utilities.prefs_constants import pasteOffsetScaleFactorForChunks_prefs_key
from utilities.prefs_constants import pasteOffsetScaleFactorForDnaObjects_prefs_key
DEBUG_COPY = False # do not leave this as True in the release [bruce 080414]
class ops_copy_Mixin:
"""
Mixin class for providing these methods to class Part
"""
# == ###@@@ cut/copy/paste/kill will all be revised to handle bonds better (copy or break them as appropriate)
# incl jig-atom connections too [bruce, ca. time of assy/part split]
# [renamings, bruce 050419: kill -> delete_sel, cut -> cut_sel, copy -> copy_sel; does paste also need renaming? to what?]
# bruce 050131/050201 revised these Cut and Copy methods to fix some Alpha bugs;
# they need further review after Alpha, and probably could use some merging. ###@@@
# See also assy.delete_sel (Delete operation).
#@see: self._getInitialPasteOffsetForPastableNodes() to see how these are
#attrs are used
_initial_paste_offset_for_chunks = V(0, 0, 0)
_initial_paste_offset_for_other_pastables = V(0, 0, 0)
_previously_pasted_node_list = None
def cut(self): # we should remove this obsolete alias shortly after the release. [bruce 080414 comment]
print "bug (worked around): assy.cut called, should use its new name cut_sel" #bruce 050927
if debug_flags.atom_debug:
print_compact_stack( "atom_debug: assy.cut called, should use its new name cut_sel: ")
return self.cut_sel()
def cut_sel(self, use_selatoms = True):
#bruce 050505 added use_selatoms = True option, so MT ops can pass False (bugfix)
#bruce 050419 renamed this from cut to avoid confusion with Node method
# and follow new _sel convention
#
###BUG: this does not yet work properly for DNA. No time to fix for .rc1.
# [bruce 080414 late]
#
# Note [bruce 080415]:
# one correct implem for DNA would just be "copy, then delete".
# (Each of those two ops operates on a differently extended set of chunks
# based on the selected chunks.) This would also make it work for
# selected atoms, and make it "autogroup multiple items for clipboard".
#
# The issues of that implementation would be:
# - delete doesn't yet work for DNA either (needs to extend the selection
# as specified elsewhere).
# - need to make sure copy doesn't change selection, or if it does,
# record it first and restore it before delete, or pass the set
# of objects to use as the selection to delete_sel.
# - copied jigs referring to noncopied atoms lose those references,
# whereas current code (which moves the jigs) preserves them
# (the jigs become disabled, but keep all their atoms).
# - the history messages would say Copy rather than Cut.
# - there may be other benefits of moving nodes rather than copying
# them, which I am not thinking of now.
# Some of those could be addressed by adding a flag to Copier to
# tell it it was "copying as part of Cut". Maybe we could even get
# it to move rather than copy for nodes in a specified set.
mc = env.begin_op("Cut") #bruce 050908 for Undo
try:
cmd = greenmsg("Cut: ")
if use_selatoms and self.selatoms:
# condition should not use selwhat, since jigs can be selected even in Select Atoms mode
msg = redmsg("Cutting selected atoms is not yet supported.")
# REVIEW: could we fix that by calling Separate here,
# selecting the chunks it made from selected atoms,
# then continuing with Cut on them? [bruce 080415 Q]
# WARNING [bruce 060307, clarified 080415]: when this is
# implemented, the code below needs to check self.topnode
# for becoming None as a side effect of removing all atoms
# from a clipboard item whose topnode is a single chunk.
# See similar code in delete_sel, added by Mark to fix
# bug 1466, and the 'mark 060307' comment there.
env.history.message(cmd + msg)
# don't return yet, in case some jigs were selected too.
# note: we will check selatoms again, below, to know whether we emitted this message
new = Group(gensym("Copy", self.assy), self.assy, None) # (in cut_sel)
# bruce 050201 comment: this group is usually, but not always, used only for its members list
if self.immortal() and self.topnode.picked:
###@@@ design note: this is an issue for the partgroup but not for clips... what's the story?
### Answer: some parts can be deleted by being entirely cut (top node too) or killed, others can't.
### This is not a property of the node, so much as of the Part, I think.... not clear since 1-1 corr.
### but i'll go with that guess. immortal parts are the ones that can't be killed in the UI.
#bruce 050201 to fix catchall bug 360's "Additional Comments From [email protected] 2005-02-02 00:36":
# don't let assy.tree itself be cut; if that's requested, just cut all its members instead.
# (No such restriction will be required for assy.copy_sel, even when it copies entire groups.)
self.topnode.unpick_top()
## env.history.message(redmsg("Can't cut the entire Part -- cutting its members instead.")) #bruce 050201
###@@@ following should use description_for_history, but so far there's only one such Part so it doesn't matter yet
msg = "Can't cut the entire Part; copying its toplevel Group, cutting its members."
env.history.message(cmd + msg)
self.topnode.apply2picked(lambda(x): x.moveto(new))
use = new
use.name = self.topnode.name # not copying any other properties of the Group (if it has any)
new = Group(gensym("Copy", self.assy), self.assy, None) # (in cut_sel)
new.addchild(use)
else:
self.topnode.apply2picked(lambda(x): x.moveto(new))
# bruce 050131 inference from recalled bug report:
# this must fail in some way that addchild handles, or tolerate jigs/groups but shouldn't;
# one difference is that for chunks it would leave them in assy.molecules whereas copy_sel would not;
# guess: that last effect (and the .pick we used to do) might be the most likely cause of some bugs --
# like bug 278! Because findpick (etc) uses assy.molecules. So I fixed this with sanitize_for_clipboard, below.
# [later, 050307: replaced that with update_parts.]
# Now we know what nodes to cut (i.e. move to the clipboard) -- the members of new.
# And they are no longer in their original location,
# but neither they nor the group "new" is in its final location.
# (But they still belong to their original Part, until this is changed later.)
#e some of the following might someday be done automatically by something like end_event_handler (obs)
# and/or by methods in a Cut command object [bruce 050908 revised comment]
if new.members:
# move them to the clipboard (individually for now, though this
# is wrong if they are bonded; also, this should be made common code
# with DND move to clipboard, though that's more complex since
# it might move nodes inside an existing item. [bruce 050307 comment])
self.changed() # bruce 050201 doing this earlier; 050223 made it conditional on new.members
nshelf_before = len(self.shelf.members) #bruce 050201
for ob in new.members[:]:
# [bruce 050302 copying that members list, to fix bug 360 item 8, like I fixed
# bug 360 item 5 in "copy_sel" 2 weeks ago. It's silly that I didn't look for the same
# bug in this method too, when I fixed it in copy_sel.]
# bruce 050131 try fixing bug 278 in a limited, conservative way
# (which won't help the underlying problem in other cases like drag & drop, sorry),
# based on the theory that chunks remaining in assy.molecules is the problem:
## self.sanitize_for_clipboard(ob) ## zapped 050307 since obs
self.shelf.addchild(ob) # add new member(s) to the clipboard [incl. Groups, jigs -- won't be pastable]
#bruce 080415 comment: it seems wrong that this doesn't
# put them all into a single new Group on the clipboard,
# when there is more than one item. That would fix the
# bond-breaking issue mentioned above.
nshelf_after = len(self.shelf.members) #bruce 050201
msg = fix_plurals("Cut %d item(s)." % (nshelf_after - nshelf_before))
env.history.message(cmd + msg) #bruce 050201
else:
if not (use_selatoms and self.selatoms):
#bruce 050201-bug370: we don't need this if the message for selatoms already went out
env.history.message(cmd + redmsg("Nothing to cut.")) #bruce 050201
finally:
self.assy.update_parts()
self.w.win_update()
env.end_op(mc)
return
## def copy(self): # we should remove this obsolete alias shortly after the release. [bruce 080414 comment]
## print "bug (worked around): assy.copy called, should use its new name copy_sel" #bruce 050927
## if debug_flags.atom_debug:
## print_compact_stack( "atom_debug: assy.copy called, should use its new name copy_sel: ")
## return self.copy_sel()
# copy any selected parts (molecules) [making a new clipboard item... #doc #k]
# Revised by Mark to fix bug 213; Mark's code added by bruce 041129.
# Bruce's comments (based on reading the code, not all verified by test): [###obs comments]
# 0. If groups are not allowed in the clipboard (bug 213 doesn't say,
# but why else would it have been a bug to have added a group there?),
# then this is only a partial fix, since if a group is one of the
# selected items, apply2picked will run its lambda on it directly.
# 1. The group 'new' is now seemingly used only to hold
# a list; it's never made a real group (I think). So I wonder if this
# is now deviating from Josh's intention, since he presumably had some
# reason to make a group (rather than just a list).
# 2. Is it intentional to select only the last item added to the
# clipboard? (This will be the topmost selected item, since (at least
# for now) the group members are in bottom-to-top order.)
#e bruce 050523: should revise this to use selection_from_MT object...
def copy_sel(self, use_selatoms = True): #bruce 050505 added use_selatoms = True option, so MT ops can pass False (bugfix)
#bruce 050419 renamed this from copy
#bruce 050523 new code
# 1. what objects is user asking to copy?
cmd = greenmsg ("Copy: ")
from dna.model.DnaLadderRailChunk import DnaAxisChunk, DnaStrandChunk
# must be runtime import; after release, clean up by doing it in class Assembly
# and referring to these as self.assy.DnaAxisChunk etc
def chunks_to_copy_along_with(chunk):
"""
Return a list (or other sequence) of chunks that we should copy along with chunk.
"""
# after release, refactor by adding methods to these chunk classes.
if isinstance(chunk, DnaAxisChunk):
ladder = chunk.ladder
if ladder and ladder.valid: # don't worry about ladder.error
return ladder.strand_chunks() #k
elif isinstance(chunk, DnaStrandChunk):
ladder = chunk.ladder
if ladder and ladder.valid:
return ladder.axis_chunks() #k
else:
pass
return ()
part = self
sel = selection_from_part(part,
use_selatoms = use_selatoms,
expand_chunkset_func = chunks_to_copy_along_with
)
# 2. prep this for copy by including other required objects, context, etc...
# (eg a new group to include it all, new chunks for bare atoms)
# and emit message about what we're about to do
if debug_flags.atom_debug: #bruce 050811 fixed this for A6 (it was a non-debug reload)
print "atom_debug: fyi: importing or reloading ops_copy from itself"
import operations.ops_copy as hmm
reload(hmm)
from operations.ops_copy import Copier # use latest code for that class, even if not for this mixin method!
copier = Copier(sel) #e sel.copier()?
copier.prep_for_copy_to_shelf()
if copier.objectsCopied == 0: # wware 20051128, bug 1118, no error msg if already given
return
if copier.ok():
desc = copier.describe_objects_for_history() # e.g. "5 items" ### not sure this is worth it if we have a results msg
if desc:
text = "Copy %s" % desc
else:
text = "Copy"
env.history.message(cmd + text)
else:
whynot = copier.whynot()
env.history.message(cmd + redmsg(whynot))
return
# 3. do it
new = copier.copy_as_node_for_shelf()
self.shelf.addchild(new)
# 4. clean up
self.assy.update_parts()
# overkill! should just apply to the new shelf items. [050308] ###@@@
# (It might not be that simple -- at one point we needed to scan anything they were jig-connected to as well.
# Probably that's no longer true, but it needs to be checked before this is changed. [050526])
self.w.win_update()
return
def copy_sel_in_same_part(self, use_selatoms = True):
"""
Copies the selected object in the same part.
@param use_selatoms: If true, it uses the selected atoms in the GLPane
for copying.
@type use_selatoms: boolean
@return copiedObject: Object copied and added to the same part
(e.g. Group, chunk, jig)
@note: Uses: Used in mirror operation.
"""
#NOTE: This uses most of the code in copy_sel.
# 1. what objects is user asking to copy?
part = self
sel = selection_from_part(part, use_selatoms = use_selatoms)
# 2. prep this for copy by including other required objects, context, etc...
# (eg a new group to include it all, new chunks for bare atoms)
# and emit message about what we're about to do
if debug_flags.atom_debug: #bruce 050811 fixed this for A6 (it was a non-debug reload)
print "atom_debug: fyi: importing or reloading ops_copy from itself"
import operations.ops_copy as hmm
reload(hmm)
from operations.ops_copy import Copier # use latest code for that class, even if not for this mixin method!
copier = Copier(sel) #e sel.copier()?
copier.prep_for_copy_to_shelf()
if copier.objectsCopied == 0: # wware 20051128, bug 1118, no error msg if already given
return
if copier.ok():
desc = copier.describe_objects_for_history() # e.g. "5 items" ### not sure this is worth it if we have a results msg
if desc:
text = "Mirror %s" % desc
else:
text = "Mirror"
env.history.message(text)
else:
whynot = copier.whynot()
cmd = 'Copy: ' # WRONG, but before this, it was undefined, according to pylint;
# I guess it should be passed in from caller? needs REVIEW. [bruce 071107]
env.history.message(cmd + redmsg(whynot))
return
# 3. do it
copiedObject = copier.copy_as_node_for_shelf()
self.assy.addnode(copiedObject)
# 4. clean up
self.assy.update_parts()
# overkill! should just apply to the new shelf items. [050308] ###@@@
# (It might not be that simple -- at one point we needed to scan anything they were jig-connected to as well.
# Probably that's no longer true, but it needs to be checked before this is changed. [050526])
self.w.win_update()
return copiedObject
def part_for_save_selection(self):
#bruce 050925; this helper method is defined here since it's very related to copy_sel ###k does it need self?
"""
[private helper method for Save Selection]
Return the tuple (part, killfunc, desc),
where part is an existing or new Part which can be saved (in any format the caller supports)
in order to save the current selection, or is None if it can't be saved (with desc being the reason why not),
killfunc should be called when the caller is done using part,
and desc is "" or a text string describing the selection contents
(or an error message when part is None, as mentioned above), for use in history messages.
"""
self.assy.o.saveLastView() # make sure glpane's cached info gets updated in its current Part, before we might use it
entire_part = self
sel = selection_from_part(entire_part, use_selatoms = True) #k use_selatoms is a guess
if debug_flags.atom_debug:
print "atom_debug: fyi: importing or reloading ops_copy from itself"
import operations.ops_copy as hmm
reload(hmm)
from operations.ops_copy import Copier # use latest code for that class, even if not for this mixin method!
copier = Copier(sel) #e sel.copier()?
copier.prep_for_copy_to_shelf() ###k guess: same prep method should be ok
if copier.ok():
desc = copier.describe_objects_for_history() # e.g. "5 items"
desc = desc or "" #k might be a noop
# desc is returned below
else:
whynot = copier.whynot()
desc = whynot
if not sel.nonempty():
# override message, which refers to "copy" [##e do in that subr??]
desc = "Nothing to save" # I'm not sure this always means nothing is selected!
return None, noop, desc
# copy the selection (unless it's an entire part)
####@@@@ logic bug: if it's entire part, copy might still be needed if jigs ref atoms outside it! Hmm... ####@@@@
copiedQ, node = copier.copy_as_node_for_saving()
if node is None:
desc = "Can't save this selection." #e can this happen? needs better explanation. Does it happen for no sel?
return None, noop, desc
# now we know we can save it; find or create part to save
if not copiedQ:
# node is top of an existing Part, which we should save in its entirety. Its existing pov is fine.
savepart = node.part
assert savepart is not None
killfunc = noop
else:
# make a new part, copy pov from original one (##k I think that
# pov copy happens automatically in Part.__init__)
## from part import Part as Part_class
Part_class = self.__class__ #bruce 071103 to fix import cycle
# (this code is untested since that Part_class change, since
# this feature is not accessible from the UI)
assert Part_class.__name__ == 'Part' # remove when works
savepart = Part_class(self.assy, node)
# obs comment, if the above import cycle fix works:
### TODO: get the appropriate subclass of Part from self.assy
# or node, and/or use a superclass with fewer methods,
# to break an import cycle between part and ops_copy.
# Note that this method is only needed for "save selection",
# which is not in the UI and probably not fully implemented
# (though I can't see in what way it's not done in the code,
# except the logic bug comment above; otoh I might be missing
# something), but which appears to be "almost fully implemented",
# so this code should be preserved (and made accessible from a
# debug menu command).
# [bruce 071029 comment]
killfunc = savepart.destroy_with_topnode
self.w.win_update() # precaution in case of bugs (like side effects on selection) -- if no bugs, should not be needed
return (savepart, killfunc, desc)
def paste(self, pastableNode, mousePosition = None):
"""
Paste the given item in the 3D workspace.
A. Implementation notes for the single shot paste operation:
- The object (chunk or group) is pasted with a slight offset.
Example:
Create a graphene sheet, select it , do Ctrl + C and then Ctrl + V.
The pasted object is offset to original one.
- It deselects others, selects the pasted item and then does a zoom
to selection so that the selected item is in the center of the
screen.
- Bugs/ Unsupported feature: If you paste multiple copies of an
object they are pasted at the same location.
(i.e. the offset is constant)
B. Implemetation notes for 'Paste from clipboard' operation:
- Enter L{PasteFromClipboard_Command}, select a pastable from the PM and then
double click inside the 3D workspace to paste that object.
This function uses the mouse coordinates during double click for
pasting.
@param pastableNode: The item to be pasted in the 3D workspace
@type pastableNode: L{Node}
@param mousePosition: These are the coordinates during mouse
double click while in Paste Mode.
If the node has a center it will be moved by the
moveOffset, which is L{[mousePosition} -
node.center. This parameter is not used if its a
single shot paste operation (Ctrl + V)
@type mousePosition: Array containing the x, y, z position on the
screen, or None
@see:L{self._pasteChunk}, L{self._pasteGroup}, L{self._pasteJig}
@see:L{MWsemantics.editPaste}, L{MWsemantics.editPasteFromClipboard}
@return: (itemPasted, errorMsg)
@rtype: tuple of (node or None, string)
"""
###REVIEW: this has not been reviewed for DNA data model. No time to fix for .rc1. [bruce 080414 late]
pastable = pastableNode
pos = mousePosition
moveOffset = V( 0, 0, 0)
itemPasted = None
# TODO: refactor this so that the type-specific paste methods
# can all be replaced by a single method that works for any kind
# of node, includind kinds other than Chunk, Group, or Jig.
# This would probably involve adding new methods to the Node API
# for things like bounding box for 3d objects.
# Also there is a design Q of what Paste should do for selections
# which include non-3d objects like comment nodes; I think it should
# "just work", copying them into a new location in the model tree.
# And it ought to work for selected non-nodes like atoms, too, IMHO.
# [bruce 071011 comment]
if isinstance(pastable, Chunk):
itemPasted, errorMsg = self._pasteChunk(pastable, pos)
elif isinstance(pastable, Group):
itemPasted, errorMsg = self._pasteGroup(pastable, pos)
elif isinstance(pastable, Jig):
#NOTE: it never gets in here because an independent jig on the
#clipboard is not considered 'pastable' . This needs to change
# so that Planes etc , which are internally 'jigs' can be pasted
# when they exist as a single node -- ninad 2007-08-31
itemPasted, errorMsg = self._pasteJig(pastable, pos)
else:
errorMsg = redmsg("Internal error pasting clipboard item [%s]") % \
pastable.name
if pos is None:
self.assy.unpickall_in_GLPane()
itemPasted.pick()
#Do not "zoom to selection" (based on a discussion with Russ) as
#its confusing -- ninad 2008-06-06 (just before v1.1.0 code freeze)
##self.assy.o.setViewZoomToSelection(fast = True)
self.assy.w.win_update()
if errorMsg:
msg = errorMsg
else:
msg = greenmsg("Pasted copy of clipboard item: [%s] ") % \
pastable.name
env.history.message(msg)
return itemPasted, "copy of %r" % pastable.name
def _pasteChunk(self, chunkToPaste, mousePosition = None):
"""
Paste the given chunk in the 3D workspace.
@param chunkToPaste: The chunk to be pasted in the 3D workspace
@type chunkToPaste: L{Chunk}
@param mousePosition: These are the coordinates during mouse double
click.
@type mousePosition: Array containing the x, y, z position on the
screen, or None
@see: L{self.paste} for implementation notes.
@return: (itemPasted, errorMsg)
@rtype: tuple of (node or None, string)
"""
assert isinstance(chunkToPaste, Chunk)
pastable = chunkToPaste
pos = mousePosition
newChunk = None
errorMsg = None
moveOffset = V(0, 0, 0)
newChunk = pastable.copy_single_chunk(None)
chunkCenter = newChunk.center
#@see: self._getInitialPasteOffsetForPastableNodes()
original_copied_nodes = [chunkToPaste]
if chunkToPaste:
initial_offset_for_chunks, initial_offset_for_other_pastables = \
self._getInitialPasteOffsetForPastableNodes(original_copied_nodes)
else:
initial_offset_for_chunks = V(0, 0, 0)
initial_offset_for_other_pastables = V(0, 0, 0)
if pos:
#Paste from clipboard (by Double clicking)
moveOffset = pos - chunkCenter
else:
#Single Shot paste (Ctrl + V)
boundingBox = BBox()
boundingBox.merge(newChunk.bbox)
scale = float(boundingBox.scale() * 0.06)
if scale < 0.001:
scale = 0.1
moveOffset = scale * self.assy.o.right
moveOffset += scale * self.assy.o.down
moveOffset += initial_offset_for_chunks
#@see: self._getInitialPasteOffsetForPastableNodes()
self._initial_paste_offset_for_chunks = moveOffset
newChunk.move(moveOffset)
self.assy.addmol(newChunk)
return newChunk, errorMsg
def _pasteGroup(self, groupToPaste, mousePosition = None):
"""
Paste the given group (and all its members) in the 3D workspace.
@param groupToPaste: The group to be pasted in the 3D workspace
@type groupToPaste: L{Group}
@param mousePosition: These are the coordinates during mouse
double click.
@type mousePosition: Array containing the x, y, z
position on the screen, or None
@see: L{self.paste} for implementation notes.
@see: self. _getInitialPasteOffsetForPastableNodes()
@return: (itemPasted, errorMsg)
@rtype: tuple of (node or None, string)
"""
#@TODO: REFACTOR and REVIEW this.
#Many changes made just before v1.1.0 codefreeze for a new must have
#bug fix -- Ninad 2008-06-06
#Note about new implementation as of 2008-06-06:
#When pasting a selection which may contain various groups as
#well as independent chunks, this method does the following --
#a) checks if the items to be pasted have at least one Dna object
# such as a DnaGroup or DnaStrandOrSegment or a DnaStrandOrAxisChunk
#If it finds the above, the scale for computing the move offset
#for pasting all the selection is the one for pasting dna objects
#(see scale_when_dna_in_newNodeList).
#- If there are no dna objects AND all pastable items are pure chunks
# then uses a scale computed using bounding box of the chunks.. if thats
#too low, then uses 'scale_when_dna_in_newNodeList'
#for all non 'pure chunk' pastable items, it always uses
#'scale_when_dna_in_newNodeList'. soon, these scale values will become a
#user preference. -- Ninad 2008-06-06
assert isinstance(groupToPaste, Group)
pastable = groupToPaste
pos = mousePosition
newGroup = None
errorMsg = None
moveOffset = V(0, 0, 0)
assy = self.assy
nodes = list(pastable.members) # used in several places below ### TODO: rename
newstuff = copied_nodes_for_DND( [pastable],
autogroup_at_top = True, ###k
assy = assy )
if len(newstuff) == 1:
# new code (to fix bug 2919) worked, keep using it
use_new_code = True # to fix bug 2919, but fall back to old code on error [bruce 080718]
newGroup = newstuff[0]
newNodeList = list(newGroup.members)
# copying this is a precaution, probably not needed
else:
# new code failed, fall back to old code
print "bug in fix for bug 2919, falling back to older code " \
"(len is %d, should be 1)" % len(newstuff)
use_new_code = False
newGroup = Group(pastable.name, assy, None)
# Review: should this use Group or groupToPaste.__class__,
# e.g. re a DnaGroup or DnaSegment? [bruce 080314 question]
# (Yes, to fix bug 2919; or better, just copy the whole node
# using the copy function now used on its members
# [bruce 080717 reply]. This is now attempted above.)
newNodeList = copied_nodes_for_DND( nodes,
autogroup_at_top = False,
assy = assy )
if not newNodeList:
errorMsg = orangemsg("Clipboard item is probably an empty group. "\
"Paste cancelled")
# review: is this claim about the cause always correct?
# review: is there any good reason to cancel the paste then?
# probably not; not only that, it appears that we *don't* cancel it,
# but return something that means we'll go ahead with it,
# i.e. the message is wrong. [bruce 080717 guess]
return newGroup, errorMsg
pass
# note: at this point, if use_new_code is false,
# newGroup is still empty (newNodeList not yet added to it);
# in that case they are added just before returning.
selection_has_dna_objects = self._pasteGroup_nodeList_contains_Dna_objects(newNodeList)
scale_when_dna_in_newNodeList = env.prefs[pasteOffsetScaleFactorForDnaObjects_prefs_key]
scale_when_no_dna_in_newNodeList = env.prefs[pasteOffsetScaleFactorForChunks_prefs_key]
def filterChunks(node):
"""
Returns True if the given node is a chunk AND its NOT a DnaStrand
chunk or DnaAxis chunk. Otherwise returns False.
See also sub-'def filterOtherPastables', which does exactly opposite
It filters out pastables that are not 'pure chunks'
"""
if isinstance(node, self.assy.Chunk):
if not node.isAxisChunk() or node.isStrandChunk():
return True
return False
def filterOtherPastables(node):
"""
Returns FALSE if the given node is a chunk AND its NOT a DnaStrand
chunk or DnaAxis chunk. Otherwise returns TRUE. (does exactly opposite
of def filterChunks
@see: sub method filterChunks.
_getInitialPasteOffsetForPastableNodesc
"""
if isinstance(node, self.assy.Chunk):
if not node.isAxisChunk() or node.isStrandChunk():
return False
return True
chunkList = []
other_pastable_items = []
chunkList = filter(lambda newNode: filterChunks(newNode), newNodeList)
if len(chunkList) < len(newNodeList):
other_pastable_items = filter(lambda newNode:
filterOtherPastables(newNode),
newNodeList)
#@see: self._getInitialPasteOffsetForPastableNodes()
original_copied_nodes = nodes
if nodes:
initial_offset_for_chunks, initial_offset_for_other_pastables = \
self._getInitialPasteOffsetForPastableNodes(original_copied_nodes)
else:
initial_offset_for_chunks = V(0, 0, 0)
initial_offset_for_other_pastables = V(0, 0, 0)
if chunkList:
boundingBox = BBox()
for m in chunkList:
boundingBox.merge(m.bbox)
approxCenter = boundingBox.center()
if selection_has_dna_objects:
scale = scale_when_dna_in_newNodeList
else:
#scale that determines moveOffset
scale = float(boundingBox.scale() * 0.06)
if scale < 0.001:
scale = scale_when_no_dna_in_newNodeList
if pos:
moveOffset = pos - approxCenter
else:
moveOffset = scale * self.assy.o.right
moveOffset += scale * self.assy.o.down
moveOffset += initial_offset_for_chunks
#@see: self._getInitialPasteOffsetForPastableNodes()
self._initial_paste_offset_for_chunks = moveOffset
#Move the chunks (these will be later added to the newGroup)
for m in chunkList:
m.move(moveOffset)
if other_pastable_items:
approxCenter = V(0.01, 0.01, 0.01)
scale = scale_when_dna_in_newNodeList
if pos:
moveOffset = pos - approxCenter
else:
moveOffset = initial_offset_for_other_pastables
moveOffset += scale * self.assy.o.right
moveOffset += scale * self.assy.o.down
#@see: self._getInitialPasteOffsetForPastableNodes()
self._initial_paste_offset_for_other_pastables = moveOffset
for m in other_pastable_items:
m.move(moveOffset)
pass
#Now add all the nodes in the newNodeList to the Group, if needed
if not use_new_code:
for newNode in newNodeList:
newGroup.addmember(newNode)
assy.addnode(newGroup)
# review: is this the best place to add it?
# probably there is no other choice, since it comes from the clipboard
# (unless we introduce a "model tree cursor" or "current group").
# [bruce 080717 comment]
return newGroup, errorMsg
#Determine if the selection
def _pasteGroup_nodeList_contains_Dna_objects(self, nodeList): # by Ninad
"""
Private method, that tells if the given list has at least one dna object
in it. e.g. a dnagroup or DnaSegment etc.
Used in self._pasteGroup as of 2008-06-06.
@TODO: May even be moved to a general utility class
in dna pkg. (but needs self.assy for isinstance checks)
"""
# BUG: doesn't look inside Groups. Ignorable,
# since this method will be removed when paste method is refactored.
# [bruce 080717 comment]
for node in nodeList:
if isinstance(node, self.assy.DnaGroup) or \
isinstance(node, self.assy.DnaStrandOrSegment):
return True
if isinstance(node, Chunk):
if node.isStrandChunk() or node.isAxisChunk():
return True
return False
def _getInitialPasteOffsetForPastableNodes(self, original_copied_nodes): # by Ninad
"""
@see: self._pasteGroup(), self._pasteChunk()
What it supports:
1. User selects some objects
2. Hits Ctrl + C
3. Hits Ctrl + V
- first ctrl V pastes object at an offset, (doesn't recenter the view)
to the original one
- 2nd paste offsets it further and like that....
This fixes bug 2890
"""
#@TODO: Review this method. It was added just before v1.1.0 to fix a
#copy-paste-pasteagain-pasteagain bug -- Ninad 2008-06-06
if same_vals(original_copied_nodes, self._previously_pasted_node_list):
initial_offset_for_chunks = self._initial_paste_offset_for_chunks
initial_offset_for_other_pastables = self._initial_paste_offset_for_other_pastables
else:
initial_offset_for_chunks = V(0, 0, 0)
initial_offset_for_other_pastables = V(0, 0, 0)
self._previously_pasted_node_list = original_copied_nodes
return initial_offset_for_chunks, initial_offset_for_other_pastables
def _pasteJig(self, jigToPaste, mousePosition = None):
"""
Paste the given Jig in the 3D workspace.
@param jigToPaste: The chunk to be pasted in the 3D workspace
@type jigToPaste: L{Jig}
@param mousePosition: These are the coordinates during mouse double
click.
@type mousePosition: Array containing the x, y, z position on the
screen, or None
@see: L{self.paste} for implementation notes.
@return: (itemPasted, errorMsg)
@rtype: tuple of (node or None, string)
"""
assert isinstance(jigToPaste, Jig)
pastable = jigToPaste
pos = mousePosition
errorMsg = None
moveOffset = V(0, 0, 0)
## newJig = pastable.copy(None) # BUG: never works (see comment below);
# inlining it so I can remove that method from Node: [bruce 090113]
pastable.redmsg("This cannot yet be copied")
newJig = None # will cause bugs below
# Note: there is no def copy on Jig or any subclass of Jig,
# so this would run Node.copy, which prints a redmsg to history
# and returns None. What we need is new paste code which uses
# something like the existing code to "copy a list of nodes".
# Or perhaps a new implem of Node.copy which uses the existing
# general copy code properly (if pastables are always single nodes).
# [bruce 080314 comment]
jigCenter = newJig.center
if pos:
moveOffset = pos - jigCenter
else:
moveOffset = 0.2 * self.assy.o.right
moveOffset += 0.2 * self.assy.o.down
newJig.move(moveOffset)
self.assy.addnode(newJig)
return newJig, errorMsg
def kill(self):
print "bug (worked around): assy.kill called, should use its new name delete_sel" #bruce 050927
if debug_flags.atom_debug:
print_compact_stack( "atom_debug: assy.kill called, should use its new name delete_sel: ")
self.delete_sel()
def delete_sel(self, use_selatoms = True): #bruce 050505 added use_selatoms = True option, so MT ops can pass False (bugfix)
"""
delete all selected nodes or atoms in this Part
[except the top node, if we're an immortal Part]
"""
###REVIEW: this may not yet work properly for DNA. No time to review or fix for .rc1. [bruce 080414 late]
#bruce 050419 renamed this from kill, to distinguish it
# from standard meaning of obj.kill() == kill that obj
#bruce 050201 for Alpha: revised this to fix bug 370
## "delete whatever is selected from this assembly " #e use this in the assy version of this method, if we need one
cmd = greenmsg("Delete: ")
info = ""
###@@@ #e this also needs a results-message, below.
if use_selatoms and self.selatoms:
self.changed()
nsa = len(self.selatoms) # Get the number of selected atoms before it changes
if 1:
#bruce 060328 optimization: avoid creating transient new bondpoints as you delete bonds between these atoms
# WARNING: the rules for doing this properly are tricky and are not yet documented.
# The basic rule is to do things in this order, for atoms only, for a lot of them at once:
# prekill_prep, prekill all the atoms, kill the same atoms.
val = Atom_prekill_prep()
for a in self.selatoms.itervalues():
a._f_will_kill = val # inlined a._f_prekill(val), for speed
for a in self.selatoms.values(): # the above can be itervalues, but this can't be!
a.kill()
self.selatoms = {} # should be redundant
info = fix_plurals( "Deleted %d atom(s)" % nsa)
## bruce 050201 removed the condition "self.selwhat == 2 or self.selmols"
# (previously used to decide whether to kill all picked nodes in self.topnode)
# since selected jigs no longer force selwhat to be 2.
# (Maybe they never did, but my guess is they did; anyway I think they shouldn't.)
# self.changed() is not needed since removing Group members should do it (I think),
# and would be wrong here if nothing was selected.
if self.immortal():
self.topnode.unpick_top() #bruce 050201: prevent deletion of entire part (no msg needed)
if self.topnode:
# This condition is needed because the code above that calls
# a.kill() may have already deleted the Chunk/Node the atom(s)
# belonged to. If the current node is a clipboard item part,
# self no longer has a topnode. Fixes bug 1466. mark 060307.
# [bruce 060307 adds: this only happens if all atoms in the Part
# were deleted, and it has no nodes except Chunks. By "the current
# node" (which is not a concept we have) I think Mark meant the
# former value of self.topnode, when that was a chunk which lost
# all its atoms.) See also my comment in cut_sel, which will
# someday need this fix [when it can cut atoms].]
self.topnode.apply2picked(lambda o: o.kill())
self.invalidate_attr('natoms') #####@@@@@ actually this is needed in the Atom and Chunk kill methods, and add/remove methods
#bruce 050427 moved win_update into delete_sel as part of fixing bug 566
env.history.message( cmd + info) # Mark 050715
self.w.win_update()
return
pass # end of class ops_copy_Mixin
# ==
### TODO: after the release, should split this into two files at this point. [bruce 080414 comment]
DEBUG_ORDER = False #bruce 070525, can remove soon
def copied_nodes_for_DND( nodes, autogroup_at_top = False, assy = None, _sort = False):
"""
Given a list of nodes (which must live in the same Part, though this may go unchecked),
copy them (into their existing assy, or into a new one if given), as if they were being DND-copied
from their existing Part, but don't place the copies under any Group (caller must do that).
Honor the autogroup_at_top option (see class Copier for details).
@warning: this ignores their order in the list of input nodes, using only their
MT order (native order within their Part's topnode) to determine the order
of the returned list of copied nodes. If the input order matters, use
copy_nodes_in_order instead.
@note: _sort is a private option for use by copy_nodes_in_order.
@note: this method is used for several kinds of copying, not only for DND.
"""
if not nodes:
return None
if DEBUG_ORDER:
print "copied_nodes_for_DND: got nodes",nodes
print "their ids are",map(id,nodes)
part = nodes[0].part # kluge
originals = nodes[:] #k not sure if this list-copy is needed
copier = Copier(Selection(part, nodes = nodes), assy = assy)
### WARNING: this loses all info about the order of nodes! At least, it does once copier copies them.
# That was indirectly the cause of bug 2403 (copied nodes reversed in DND) -- the caller reversed them
# to try to compensate, but that had no effect. It might risk bugs in our use for Extrude, as well [fixed now].
# But for most copy applications (including DND for a general selected set), it does make sense to use MT order
# rather than the order in which a list of nodes was provided (which in some cases might be selection order
# or an arbitrary dict-value order). So -- I fixed the DND order bug by reversing the copies
# (not the originals) in the caller; and I added copy_nodes_in_order for copying a list of nodes
# in the same order as in the list, and used it in Extrude as a precaution.
# [bruce 070525]
copier.prep_for_copy_to_shelf()
if not copier.ok():
#e histmsg?
return None
nodes = copier.copy_as_list_for_DND() # might be None (after histmsg) or a list
if _sort:
# sort the copies to correspond with the originals -- or, more precisely,
# only include in the output the copies of the originals
# (whether or not originals are duplicated, or new wrapping nodes were created when copying).
# If some original was not copied, print a warning (for now only -- later this will be legitimized)
# and use None in its place -- thus preserving orig-copy correspondence at same positions in
# input and output lists. [bruce 070525]
def lookup(orig):
"return the copy corresponding to orig"
res = copier.origid_to_copy.get(id(orig), None)
if res is None:
print "debug note: copy of %r is None" % (orig,) # remove this if it happens legitimately
return res
nodes = map(lookup, originals)
if nodes and autogroup_at_top:
if _sort:
nodes = filter( lambda node: node is not None , nodes)
nodes = copier.autogroup_if_several(nodes)
if DEBUG_ORDER:
print "copied_nodes_for_DND: return nodes",nodes
print "their ids are",map(id,nodes)
print "copier.origid_to_copy is",copier.origid_to_copy
print "... looking at that with id",[(k,id(v)) for (k,v) in copier.origid_to_copy.items()]
return nodes
def copy_nodes_in_order(nodes, assy = None): #bruce 070525
"""
Given a list of nodes in the same Part, copy them
(into their existing assy, or into a new one if given)
and return the list of copied nodes, in the same order
as their originals (whether or not this agrees with their
MT order, i.e. their native order in their Part) -- in fact,
with a precise 1-1 correspondence between originals and copies
at the same list positions (i.e. no missing copies --
use None in their place if necessary).
See also copied_nodes_for_DND, which uses the nodes' native order instead.
"""
copies = copied_nodes_for_DND(nodes, assy = assy, _sort = True)
# if we decide we need an autogroup_at_top option, we'll have to modify this code
if not copies:
copies = []
assert len(copies) == len(nodes) # should be true even if some nodes were not copyable
return copies
# ==
class Copier: #bruce 050523-050526; might need revision for merging with DND copy
"""
Control one run of an operation which copies selected nodes and/or atoms.
@note: When this is passed to Node copy routines, it's referred to in their
argument names as a mapping.
"""
def __init__(self, sel, assy = None):
"""
Create a new Copier for a new (upcoming) copy operation,
where sel is a Selection object which represents the set of things we'll copy
(or maybe a superset of that?? #k),
and assy (optional) is the assembly object which should contain the new node copies
(if not provided, they'll be in the same assembly as before; all nodes in a Selection object
must be in a single assembly).
"""
self.sel = sel
self.assy = assy or sel.part.assy # the assy into which we'll put copies
# [new feature, bruce 070430: self.assy can differ from assy of originals -- ###UNTESTED; will use for partlib groups]
self.objectsCopied = 0 # wware 20051128, bug 1118, no error msg if already given
def prep_for_copy_to_shelf(self):
"""
Figure out whether to make a new toplevel Group,
whether to copy any nonselected Groups or Chunks with selected innards, etc.
@note: in spite of the name, this is also used by copied_nodes_for_DND
(which is itself used for several kinds of copying, not only for DND).
"""
# Rules: partly copy (just enough to provide a context or container for other copied things):
# - any chunk with copied atoms (since atoms can't live outside of chunks),
# - certain jigs with copied atoms (those which confer properties on the atoms),
# - any Group with some but not all copied things (not counting partly copied jigs?) inside it
# (since it's a useful separator),
# - (in future; maybe) any Group which confers properties (eg display modes) being used on copied
# things inside it (but probably just copy the properties actually being used).
# Then at the end (these last things might not be done until a later method, not sure):
# - if more than topnode is being copied, make a wrapping group around
# everything that gets copied (this is not really a copy of the PartGroup, e.g. its name is unrelated).
# - perhaps modify the name of the top node copied (or of the wrapping group) to say it's a copy.
# Algorithm:
# we'll make dicts of leafnodes to partly copy, but save most of the work
# (including all decisions about groups) for a scan during the actual copy.
fullcopy = self.fullcopy = {}
atom_chunks = self.atom_chunks = {} # id(chunk) -> chunk, for chunks containing selected atoms
atom_chunk_atoms = self.atom_chunk_atoms = {} # id(chunk) -> list of its atoms to copy (if it's not fullcopied) (arb order)
atom_jigs = self.atom_jigs = {}
sel = self.sel
if debug_flags.atom_debug and not sel.topnodes:
print "debug warning: prep_for_copy_to_shelf sees no sel.topnodes"
#bruce 060627; not always a bug (e.g. happens for copying atoms)
for node in sel.topnodes: # no need to scan selmols too, it's redundant (and in general a subset)
# chunks, jigs, Groups -- for efficiency and in case it's a feature,
# don't scan jigs of a chunk's atoms like we do for individual atoms;
# this decision might be revised, and if so, we'd scan that here when node is a chunk.
if node.will_copy_if_selected(sel, True): #wware 060329 added realCopy arg, True to cause non-copy warning to be printed
# Will this node agree to be copied, if it's selected, given what else is selected?
# (Can be false for Jigs whose atoms won't be copied, if they can't exist with no atoms or too few atoms.)
# For Groups, no need to recurse here and call this on members,
# since we assume the groups themselves always say yes -- #e if that changes,
# we might need to recurse on their members here if the groups say no,
# unless that 'no' applies to copying the members too.
fullcopy[id(node)] = node
for atom in sel.selatoms.itervalues(): # this use of selatoms.itervalues is only safe because .pick/.unpick is not called
chunk = atom.molecule
#e for now we assume that all these chunks will always be partly copied;
# if that changes, we'd need to figure out which ones are not copied, but not right here
# since this can run many times per chunk.
idchunk = id(chunk)
atom_chunks[idchunk] = chunk #k might not be needed since redundant with atom_chunk_atoms except for knowing the chunk
# some of these might be redundant with fullcopied chunks (at toplevel or lower levels); that's ok
# (note: I think none are, for now)
atoms = atom_chunk_atoms.setdefault(idchunk, [])
atoms.append(atom)
for jig in atom.jigs:
if jig.confers_properties_on(atom):
# Note: it's intentional that we don't check this for all jigs on all atoms
# copied inside of selected chunks. The real reason is efficiency; the excuse
# is that when selecting chunks, user could do this in MT and choose which jigs
# to select, whereas when selecting atoms, they can't, so we have to do it
# for them (by "when in doubt, copy the jig" and letting them delete the ones
# they didn't want copied).
# It's also intentional that whether the jig is disabled makes no difference here.
atom_jigs[id(jig)] = jig # ditto (about redundancy)
#e save filtering of jigtypes for later, for efficiency?
# I tried coding that and it seemed less efficient
# (since I'm assuming it can depend on the atom, in general, tho for now, none do).
# Now we need to know which atom_jigs will actually agree to be partly copied,
# just due to the selatoms inside them. Assume most of them will agree to be copied
# (since they said they confer properties that should be copied) (so just delete the other ones).
for jig in atom_jigs.values():
if not jig.will_partly_copy_due_to_selatoms(sel):
del atom_jigs[id(jig)]
# This might delete some which should be copied since selected -- that's ok,
# they remain in fullcopy then. We're just deleting *this reason* to copy them.
# (At this point we assume that all jigs we still know about will agree to be copied,
# except perhaps the ones inside fullcopied groups, for which we don't need to know in advance.)
self.verytopnode = sel.part.topnode
for d in (fullcopy, atom_chunks, atom_chunk_atoms, atom_jigs): # wware 20051128, bug 1118
self.objectsCopied += len(d)
# [note: I am not sure there are not overlaps in these dicts, so this number might be wrong,
# but whether it's 0 is right, which is all that matters. But I have not reviewed
# whether the code related to how it's used is fully correct. bruce 060627 comment]
return # from prep_for_copy_to_shelf
# the following methods should be called only after some operation has been prepped for
# (and usually before it's been done, but that's not required)
def ok(self):
if self.sel.nonempty():
return True
self._whynot = "Nothing to copy"
return False
def describe_objects_for_history(self):
return self.sel.describe_objects_for_history()
_whynot = ""
def whynot(self):
return self._whynot or "can't copy those items"
# this makes the actual copy (into a known destination) using the info computed above; there are several variants.
def copy_as_list_for_DND(self): #bruce 050527 added this variant and split out the other one
"""
Return a list of nodes, or None
"""
return self.copy_as_list( make_partial_groups = False)
def copy_as_node_for_shelf(self):
"""
Create and return a new single node (not yet placed in any Group)
which is a copy of our selected objects meant for the Clipboard;
or return None (after history message -- would it be better to let caller do that??)
if all selected objects refuse to be copied.
"""
newstuff = self.copy_as_list( make_partial_groups = True) # might be None
if newstuff is None:
return None
return self.wrap_or_rename( newstuff)
def copy_as_node_for_saving(self): #bruce 050925 for "save selection"
"""
Copy the selection into a single new node, suitable for saving into a new file;
in some cases, return the original selection if a copy would not be needed;
return value is a pair (copiedQ, node) where copiedq says whether node is a copy or not.
If nothing was selected or none of the selection could be copied, return value is (False, None).
Specifically:
If the selection consists of one entire Part, return it unmodified (with no wrapping group).
(This is the only use of the optimization of not actually copying; other uses of that
would require an ability to copy or create a Group but let its children be shared with an
existing different Group, which the Node class doesn't yet support.)
Otherwise, return a new Group (suitable for transformation into a PartGroup by the caller)
containing copies of the top selected nodes (perhaps partially grouped if they were in the original),
even if there is only one top selected node.
"""
# review: is this used by anything accessible from the UI?
if [self.sel.part.topnode] == self.sel.topnodes:
return (False, self.sel.part.topnode)
newstuff = self.copy_as_list( make_partial_groups = True) # might be None
if newstuff is None:
return (False, None)
name = "Copied Selection" #k ?? add unique int?
res = Group(name, self.assy, None, newstuff)
### REVIEW: some other subclass of Group?
# use copy_with_provided_copied_partial_contents?
return (True, res)
def copy_as_list(self, make_partial_groups = True):
"""
[private helper method, used in the above copy_as_xxx methods]
Create and return a list of one or more new nodes (not yet placed in any Group)
which is a copy of our selected objects
(with all ordering in the copy coming from the model tree order of the originals),
or return None (after history message -- would it be better to let caller do that??)
if all objects refuse to be copied.
It's up to caller whether to group these nodes if there is more than one,
whether to rename the top node, whether to recenter them,
and whether to place them in the same or in a different Part as the one they started in.
Assuming no bugs, the returned nodes might have internode bonds, but they have no
bonds or jig-atom references (in either direction) between them as a set, and anything else.
So, even if we copy a jig and caller intends to place it in the same Part,
this method (unless extended! ###e) won't let that jig refer to anything except copied
atoms (copied as part of the same set of nodes). [So to implement a "Duplicate" function
for jigs, letting duplicates refer to the same atoms, this method would need to be extended.
Or maybe copy for jigs with none of their atoms copied should have a different meaning?? #e]
"""
self.make_partial_groups = make_partial_groups # used by self.recurse
# Copy everything we need to (except for extern bonds, and finishing up of jig refs to atoms).
self.origid_to_copy = {} # various obj copy methods use/extend this, to map id(orig-obj) to copy-obj for all levels of obj
self.extern_atoms_bonds = []
# this will get extended when chunks or isolated atoms are copied,
# with (orig-atom, orig-bond) pairs for which bond copies are not made
# (but atom copies will be made, and recorded in origid_to_copy)
self.do_these_at_end = [] #e might change to a dict so it can handle half-copied bonds too, get popped when they pair up
self.newstuff = []
self.tentative_new_groups = {}
self.recurse( self.verytopnode) #e this doesn't need the kluge of verytop always being a group, i think
# Now handle the bonds that were not made when atoms were copied.
# (Someday we might merge this into the "finishing up" (for jigs) that happens
# later. The order of this vs. that vs. group cleanup should not matter.
# [update 080414: a comment below, dated [bruce 050704], says it might matter now.])
halfbonds = {}
actualbonds = {}
origid_to_copy = self.origid_to_copy
for atom2, bond in self.extern_atoms_bonds:
atom1 = halfbonds.pop(id(bond), None)
if atom1 is not None:
na1 = origid_to_copy[id(atom1)]
na2 = origid_to_copy[id(atom2)]
bond_copied_atoms(na1, na2, bond, atom1)
else:
halfbonds[id(bond)] = atom2
actualbonds[id(bond)] = bond
#e would it be faster to just use bonds as keys? Probably not! (bond.__getattr__)
# Now "break" (in the copied atoms) the still uncopied bonds (those for which only one atom was copied)
# (This does not affect original atoms or break any existing bond object, but it acts like
# we copied a bond and then broke that copied bond.)
for bondid, atom in halfbonds.items():
bond = actualbonds[bondid]
nuat = origid_to_copy[id(atom)]
nuat.break_unmade_bond(bond, atom)
# i.e. add singlets (or do equivalent invals) as if bond was copied onto atom, then broken;
# uses original atom so it can find other atom and know bond direction
# (it assumes nuat might be translated but not rotated, for now)
# Do other finishing-up steps as requested by copied items
# (e.g. jigs change their atom refs from orig to copied atoms)
# (warning: res is still not in any Group, and still has no Part,
# and toplevel group structure might still be revised)
# In case this can ever delete nodes or add siblings (though it doesn't do that for now) [now it can, as of bruce 050704],
# we should do it before cleaning up the Group structure.
for func in self.do_these_at_end[:]:
func() # these should not add further such funcs! #e could check for that, or even handle them if added.
# [these funcs now sometimes delete just-copied nodes, as of bruce 050704 fixing bug 743.]
del self.do_these_at_end
# Now clean up the toplevel Group structure of the copy, and return it.
newstuff = self.newstuff
del self.newstuff
## assert (not self.make_partial_groups) or (not newstuff or len(newstuff) == 1)
if not ((not self.make_partial_groups) or (not newstuff or len(newstuff) == 1)): # weakened to print, just in case, 080414
print "fyi: old sanity check failed: assert (not self.make_partial_groups) or (not newstuff or len(newstuff) == 1)"
# since either verytopnode is a leaf and refused or got copied,
# or it's a group and copied as one (or all contents refused -- not sure if it copies then #k)
# (this assert is not required by following code, it's just here as a sanity check)
# strip off unneeded groups at the top, and return None if nothing got copied
while len(newstuff) == 1 and \
self._dissolveable_and_in_tentative_new_groups( newstuff[0]):
newstuff = newstuff[0].steal_members()
if not newstuff:
# everything refused to be copied. Can happen (e.g. for a single selected jig at the top).
env.history.message( redmsg( "That selection can't be copied by itself." )) ###e improve message
return None
# further processing depends on the caller (a public method of this class)
return newstuff
def _dissolveable_and_in_tentative_new_groups(self, group): #bruce 080414
# note: this method name is intended to be findable when searching for tentative_new_groups;
# otherwise I'd call this _dissolveable_tentative_group.
res = id(group) in self.tentative_new_groups and \
not self._non_dissolveable_group(group)
if DEBUG_COPY and res:
print "debug copy: discarding the outer Group wrapper of: %r" % group
return res
def _non_dissolveable_group(self, group): #bruce 080414
"""
id(group) is in self.tentative_new_groups, and if it's an ordinary
Group we will dissolve it and use its members directly,
since we made it but it was not selected initially during this copy.
But, not all Groups want us to dissolve them then!
Return True if this is one of the special kinds that doesn't want that.
"""
DnaStrandOrSegment = self.assy.DnaStrandOrSegment
DnaGroup = self.assy.DnaGroup
return isinstance(group, DnaGroup) or isinstance(group, DnaStrandOrSegment)
def autogroup_if_several(self, newstuff): #bruce 050527
#e should probably refile this into self.assy or so,
# or even into Node or Group (for target node which asks for the grouping to do),
# and merge with similar code
if newstuff and len(newstuff) > 1:
# add wrapping group
name = self.assy.name_autogrouped_nodes_for_clipboard( newstuff) #k argument
res = Group(name, self.assy, None, newstuff)
###e we ought to also store this name as the name of the new part
# (which does not yet exist), like is done in create_new_toplevel_group;
# not sure when to do that or how to trigger it; probably could create a
# fake old part here just to hold the name...
# addendum, 050527: new prior_part feature might be doing this now; find out sometime #k
#update, bruce 080414: does this ever need to make a special class of Group?
# answer: not due to its members or where they came from -- if they needed that,
# then we needed to copy some Group around them when copying them
# in recurse (the kind of Group it stores in tentative_new_groups).
# but maybe, if the reason is based on where we plan to *put* the result.
# AFAIK that never matters yet, except that the reason we autogroup at all
# is that this matters for the clipboard. If in future we know we're
# pasting inside a special group (e.g. a DnaGroup), it might matter then
# (not sure) (alternatively the paste could modify our outer layers,
# perhaps using our internal record of whether they were tentative).
newstuff = [res]
return newstuff
def wrap_or_rename(self, newstuff):
# wrap or rename result
if len(newstuff) > 1: #revised 050527
newstuff = self.autogroup_if_several(newstuff)
(res,) = newstuff
# later comment [bruce 080414]:
# hmm, I think this means:
# assert len(newstuff) == 1
# res = newstuff[0]
# and after the upcoming release I should change it to that
# as a clarification.
else:
res = newstuff[0]
# now rename it, like old code would do (in mol.copy), though whether
# this is a good idea seems very dubious to me [bruce 050524]
if res.name.endswith('-frag'):
# kluge - in -frag case it's definitely bad to rename the copy, if this op added that suffix;
# we can't tell, but since it's likely and since this is dubious anyway, don't do it in this case.
pass
else:
res.name = mol_copy_name(res.name, self.assy)
# REVIEW: is self.assy correct here, even when we're copying
# something from one assy to another? [bruce 080407 Q]
#e in future we might also need to store a ref to the top original node, top_orig;
# this is problematic when it was made up as a wrapping group,
# but if we think of verytopnode as the one, in that case (always a group in that case), then it's probably ok...
# for now we don't attempt this, since when we removed useless outer groups we didn't keep track of the original node.
##e this is where we'd like to recenter the view (rather than the object, as the old code did for single chunks),
# but I'm not sure exactly how, so I'll save this for later. ###@@@
return res
##e ideally we'd implem atoms & bonds differently than now, and copy using Numeric, but not for now.
def recurse(self, orig): #e rename
"""
copy whatever is needed from orig and below, but don't fix refs
immediately; append new copies to self.newstuff.
@note: this is initially called on self.verytopnode.
"""
# review: should this method be renamed as private? [bruce 080414 comment]
idorig = id(orig)
res = None # default result, changed in many cases below
if idorig in self.fullcopy:
res = orig.copy_full_in_mapping(self)
# recurses into groups, does atoms, bonds, jigs...
# copies jigs leaving refs to orig things but with an at_end fixup method (??)
# if refuses, puts None in the mapping as the copy
elif idorig in self.atom_chunks:
# orig contains some selected atoms (for now, that means it's a chunk)
# but is not in fullcopy at any level. (Proof: if it's in fullcopy at toplevel, we handled it
# in the 'if' case; if it's in fullcopy at a lower level, this method never recurses into it at all,
# instead letting copy_full_in_mapping on the top fullcopied group handle it.)
# Ask it to make a partial copy with only the required atoms (which it should also copy).
# It should properly copy those atoms (handling bonds, adding them to mapping if necessary).
atoms = self.atom_chunk_atoms.pop(idorig)
# the pop is just a space-optim (imperfect since not done for fullcopied chunks)
res = orig.copy_in_mapping_with_specified_atoms(self, atoms)
elif idorig in self.atom_jigs:
# orig is something which confers properties on some selected atoms (but does not contain them);
# copy it partially, arrange to fix refs later (since not all those atoms might be copied yet).
# Since this only happens for Anchor jigs, and the semantics are same as copying them fully,
# for now I'll just use the same method... later we can introduce a distinct 'copy_partial_in_mapping'
# if there's some reason to do so.
# [note, update 080414: 'copy_partial_in_mapping' is used in class Node as an alias
# for (the Node implem of) copy_full_in_mapping, even though it's never called.]
res = orig.copy_full_in_mapping(self)
elif orig.is_group():
# what this section does depends on self.make_partial_groups, so is described
# differently below in each clause of the following 'if' statement
# [bruce 080414 comments]:
if self.make_partial_groups: #bruce 050527 made this optional so DND copy can not do it
# copy whatever needs copying from orig into a new self.newstuff
# that is initially [], and is the local var newstuff in later code,
# with the initial self.newstuff unchanged
save = self.newstuff
self.newstuff = []
map( self.recurse, orig.members)
if self.make_partial_groups: #050527
newstuff = self.newstuff
self.newstuff = save
else:
newstuff = None
## print "not self.make_partial_groups" # fyi: this does happen, for DND of copied nodes onto a node
###BUG: this does not yet work properly for DNA. No time to fix for .rc1. [bruce 080414 late]
if newstuff:
# if self.make_partial_groups, this means: if anything in orig was copied.
# otherwise, this means: always false (don't run this code).
# [bruce 080414 comment]
# we'll make some sort of Group from newstuff, as a partial copy of orig
# (note that orig is a group which was not selected, so is only needed
# to hold copies of selected things inside it, perhaps at lower levels
# than its actual members)
# first, if newstuff has one element which is a Group we made,
# decide whether to merge it into the group we'll make as a partial copy
# of orig, or not. As part of fixing copy of Dna bugs [bruce 080414],
# I'll modify this to never dissolve a DnaStrandOrSegment or a DnaGroup.
# After the release we can figure out a more principled way of asking
# the group whether to dissolve it here -- this might be determinable
# from existing Group API attrs or methods, or require new ones.
# (Note that debug prefs that open up groups in the MT for seeing into
# or ungrouping should *not* thereby affect copy behavior, even if they
# affect MT drop-on behavior.)
# make sure i got these attr names right [remove when works]
DnaStrandOrSegment = self.assy.DnaStrandOrSegment
DnaGroup = self.assy.DnaGroup
if len(newstuff) == 1 and \
self._dissolveable_and_in_tentative_new_groups( newstuff[0]):
# merge names (and someday, pref settings) of orig and newstuff[0]
#update, bruce 080414: use non_dissolveable_group to not merge them
# if special classes (i.e. DnaStrandOrSegment, DnaGroup)
# unless they are a special case that is mergable in a special way
# (hopefully controlled by methods on one or both of the orig objects)
# (no such special case is yet needed).
innergroup = newstuff[0]
name = orig.name + '/' + innergroup.name
newstuff = innergroup.steal_members()
# no need to recurse, since innergroup
# would have merged with its own member if possible
else:
name = orig.name
## res = Group(name, self.assy, None, newstuff)
res = orig.copy_with_provided_copied_partial_contents( name, self.assy, None, newstuff ) #bruce 080414
# note: if newstuff elements are still members of innergroup
# (not sure if this is true after steal_members! probably not. should review.),
# this constructor call pulls them out of innergroup (slow?)
#update, bruce 080414: this probably needs to make a special class of Group
# (by asking orig what class to use) in some cases... now it does!
self.tentative_new_groups[id(res)] = res
# mark it as tentative so enclosing-group copies are free to discard it and more directly wrap its contents
## record_copy is probably not needed, but do it anyway just for its assertions, for now:
self.record_copy(orig, res)
#e else need to record None as result?
# else ditto?
# now res is the result of that (if anything)
if res is not None:
self.newstuff.append(res)
return # from recurse
def mapper(self, orig): #k needed?
# note: None result is ambiguous -- never copied, or refused?
return self.origid_to_copy.get(id(orig))
def record_copy(self, orig, copy): #k called by some but not all copiers; probably not needed except for certain atoms
"""
Subclass-specific copy methods should call this to record the fact that orig
(a node or a component of one, e.g. an atom or perhaps even a bond #k)
is being copied as 'copy' in this mapping.
(When this is called, copy must of course exist, but need not be "ready for use" --
e.g. it's ok if orig's components are not yet copied into copy.)
Also asserts orig was not already copied.
"""
assert not self.origid_to_copy.has_key(id(orig))
self.origid_to_copy[id(orig)] = copy
def do_at_end(self, func): #e might change to dict
"""
Node-specific copy methods can call this
to request that func be run once when the entire copy operation is finished.
Warning: it is run before [###doc -- before what?].
"""
self.do_these_at_end.append(func)
pass # end of class Copier
# end
| NanoCAD-master | cad/src/operations/ops_copy.py |
# Copyright 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
ops_rechunk.py -- operations for changing the way atoms are divided
into chunks, without altering the atoms or bonds themselves.
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
History:
bruce 050507 made this by collecting appropriate methods from class Part.
"""
from utilities.Log import greenmsg, redmsg
from platform_dependent.PlatformDependent import fix_plurals
from model.chunk import Chunk
from utilities.constants import gensym
from utilities.prefs_constants import assignColorToBrokenDnaStrands_prefs_key
from dna.model.Dna_Constants import getNextStrandColor
import foundation.env as env
from utilities.debug_prefs import debug_pref, Choice_boolean_False
from utilities.debug import print_compact_stack
from simulation.sim_commandruns import adjustSinglet
class ops_rechunk_Mixin:
"""
Mixin class for providing "chunking" (i.e. atom grouping) methods to
class Part.
"""
#m modifySeparate needs to be changed to modifySplit. Need to coordinate
# this with Bruce since this is called directly from some mode modules.
# Mark 050209
#
# separate selected atoms into a new Chunk
# (one new mol for each existing one containing any selected atoms)
# do not break bonds
def modifySeparate(self, new_old_callback = None):
"""
For each Chunk (named N) containing any selected atoms,
move the selected atoms out of N (but without breaking any bonds)
into a new Chunk which we name N-frag. If N is now empty, remove it.
@param new_old_callback: If provided, then each time we create a new
(and nonempty) fragment N-frag, call new_old_callback with the
2 args N-frag and N (that is, with the new and old molecules).
@type new_old_callback: function
@warning: we pass the old mol N to that callback, even if it has no
atoms and we deleted it from this assembly.
"""
# bruce 040929 wrote or revised docstring, added new_old_callback feature
# for use from Extrude.
# Note that this is called both from a tool button and for internal uses.
# bruce 041222 removed side effect on selection mode, after discussion
# with Mark and Josh. Also added some status messages.
# Questions: is it good to refrain from merging all moved atoms into one
# new mol? If not, then if N becomes empty, should we rename N-frag to N?
cmd = greenmsg("Separate: ")
if not self.selatoms: # optimization, and different status msg
msg = redmsg("No atoms selected")
env.history.message(cmd + msg)
return
if 1:
#bruce 060313 mitigate bug 1627, or "fix it by doing something we'd rather not always have to do" --
# create (if necessary) a new toplevel group right now (before addmol does), avoiding a traceback
# when all atoms in a clipboard item part consisting of a single chunk are selected for this op,
# and the old part.topnode (that chunk) disappears from loss of atoms before we add the newly made chunk
# containing those same atoms.
# The only things wrong with this fix are:
# - It's inefficient (so is the main algorithm, and it'd be easy to rewrite it to be faster, as explained below).
# - The user ends up with a new Group even if one would theoretically not have been needed.
# But that's better than a traceback and disabled session, so for A7 this fix is fine.
# - The same problem might arise in other situations (though I don't know of any), so ideally we'd
# have a more general fix.
# - It's nonmodular for this function to have to know anything about Parts.
##e btw, a simpler way to do part of the following is "part = self". should revise this when time to test it. [bruce 060329]
someatom = self.selatoms.values()[0] # if atoms in multiple parts could be selected, we'd need this for all their mols
part = someatom.molecule.part
part.ensure_toplevel_group()
# this is all a kluge; a better way would be to rewrite the main algorithm to find the mols
# with selected atoms, only make numol for those, and add it (addmol) before transferring all the atoms to it.
pass
numolist=[]
for mol in self.molecules[:]: # new mols are added during the loop!
numol = Chunk(self.assy, gensym(mol.name + "-frag", self.assy)) # (in modifySeparate)
for a in mol.atoms.values():
if a.picked:
# leave the moved atoms picked, so still visible
a.hopmol(numol)
if numol.atoms:
numol.setDisplayStyle(mol.display) # Fixed bug 391. Mark 050710
numol.setcolor(mol.color, repaint_in_MT = False)
#bruce 070425, fix Extrude bug 2331 (also good for Separate in general), "nice to have" for A9
self.addmol(numol) ###e move it to just after the one it was made from? or, end of same group??
numolist+=[numol]
if new_old_callback:
new_old_callback(numol, mol) # new feature 040929
msg = fix_plurals("Created %d new chunk(s)" % len(numolist))
env.history.message(cmd + msg)
self.w.win_update() #e do this in callers instead?
#merge selected molecules together ###@@@ no update -- does caller do it?? [bruce 050223]
def merge(self):
"""
Merges selected atoms into a single chunk, or merges the selected
chunks into a single chunk.
@note: If the selected atoms belong to the same chunk, nothing happens.
"""
#mark 050411 changed name from weld to merge (Bug 515)
#bruce 050131 comment: might now be safe for clipboard items
# since all selection is now forced to be in the same one;
# this is mostly academic since there's no pleasing way to use it on them,
# though it's theoretically possible (since Groups can be cut and maybe copied).
cmd = greenmsg("Combine Chunks: ")
if self.selatoms:
self.makeChunkFromSelectedAtoms()
return
if len(self.selmols) < 2:
msg = redmsg("Need two or more selected chunks to merge")
env.history.message(cmd + msg)
return
self.changed() #bruce 050131 bugfix or precaution
mol = self.selmols[0]
for m in self.selmols[1:]:
mol.merge(m)
def makeChunkFromSelectedAtoms(self):
"""
Create a new chunk from the selected atoms.
"""
#ninad 070411 moved the original method out of 'merge' method to
#facilitate implementation of 'Create New Chunk
#from selected atoms' feature
cmd = greenmsg("Create New Chunk: ")
if not self.selatoms:
msg1 = "Create New Chunk: "
msg2 = redmsg('Select some atoms first to create a new chunk')
env.history.message(msg1+msg2)
return
#ninad070411 : Following checks if the selected molecules
#belong to more than one chunk. If they don't (i.e. if they are a part of
# a sinle chunk, it returns from the method with proper histry msg
molList = []
for atm in self.selatoms.values():
if not len(molList) > 1:
mol = atm.molecule
if mol not in molList:
molList.append(mol)
if len(molList) < 2:
msg1 = "Create New Chunk: "
msg2 = redmsg('Not created as the selected atoms are part of the \
same chunk.')
env.history.message(msg1+msg2)
return
#bruce 060329 new feature: work on atoms too (put all selected atoms into a new chunk)
self.ensure_toplevel_group() # avoid bug for part containing just one chunk, all atoms selected
numol = Chunk(self.assy, gensym("Chunk", self.assy))
natoms = len(self.selatoms)
for a in self.selatoms.values():
# leave the moved atoms picked, so still visible
a.hopmol(numol)
self.addmol(numol)
#e should we add it in the same groups (and just after the chunks) which these atoms used to belong to?
# could use similar scheme to placing jigs...
msg = fix_plurals("made chunk from %d atom(s)" % natoms) # len(numol.atoms) would count bondpoints, this doesn't
msg = msg.replace('chunk', numol.name)
env.history.message(cmd + msg)
self.w.win_update()
def makeChunkFromAtomList(self,
atomList,
name = None,
group = None,
color = None):
"""
Creates a new chunk from the given atom list.
@param atomList: List of atoms from which to create the chunk.
@type atomList: list
@param name: Name of new chunk. If None, we'll assign one.
@type name: str
@param group: The group to add the new chunk to. If None, the new chunk
is added to the bottom of the model tree.
@type group: L{Group}
@param color: Color of new chunk. If None, no chunk color is assigned
(chunk atoms will be drawn in their element colors).
@type color: tuple
@return: The new chunk.
@rtype: L{Chunk}
"""
assert atomList
if name:
newChunk = Chunk(self.assy, name)
else:
newChunk = Chunk(self.assy, gensym("Chunk", self.assy))
for a in atomList:
a.hopmol(newChunk)
if group is not None:
group.addchild(newChunk) #bruce 080318 addmember -> addchild
else:
self.addnode(newChunk)
newChunk.setcolor(color, repaint_in_MT = False)
return newChunk
def makeStrandChunkFromBrokenStrand(self, x1, x2): # by Mark
"""
Makes a new strand chunk using the two singlets just created by
busting the original strand, which is now broken. If the original
strand was a ring, no new chunk is created.
The new strand chunk, which includes the atoms between the 3' end of
the original strand and the new 5' end (i.e. the break point), is
added to the same DNA group as the original strand and assigned a
different color.
@param x1: The first of two singlets created by busting a strand
backbone bond. It is either the 3' or 5' open bond singlet,
but we don't know yet.
@type x1: L{Atom}
@param x2: The second of two singlets created by busting a backbone
backbone bond. It is either the 3' or 5' open bond singlet,
but we don't know yet.
@type x2: L{Atom}
@return: The new strand chunk. Returns B{None} if no new strand chunk
is created, as is the case of a ring.
@rtype: L{Chunk}
"""
minimize = debug_pref("Adjust broken strand bondpoints using minimizer?",
#bruce 080415 revised text (to not use the developer-
# jargon-only term "singlet"), changed prefs_key,
# and removed non_debug = True, for .rc2 release,
# since the repositioning bug this worked around
# is now fixed.
Choice_boolean_False,
prefs_key = True,
)
_five_prime_atom = None
_three_prime_atom = None
for singlet in (x1, x2):
adjustSinglet(singlet, minimize = minimize)
open_bond = singlet.bonds[0]
if open_bond.isFivePrimeOpenBond():
_five_prime_atom = open_bond.other(singlet)
else:
_three_prime_atom = open_bond.other(singlet)
# Make sure we have exactly one 3' and one 5' singlet.
# If not, there is probably a direction error on the open bond(s)
# that x1 and/or x2 are members of.
if not _five_prime_atom:
print_compact_stack("No 5' bondpoint.")
return None
if not _three_prime_atom:
print_compact_stack("No 3' bondpoint.")
return None
atomList = self.o.assy.getConnectedAtoms([_five_prime_atom])
if _three_prime_atom in atomList:
# The strand was a closed loop strand, so we're done.
return None # Since no new chunk was created.
# See self.ensure_toplevel_group() docstring for explanation.
self.ensure_toplevel_group()
_group_five_prime_was_in = _five_prime_atom.molecule.dad
if env.prefs[assignColorToBrokenDnaStrands_prefs_key]:
_new_strand_color = getNextStrandColor(_five_prime_atom.molecule.color)
else:
_new_strand_color = _five_prime_atom.molecule.color
return self.makeChunkFromAtomList(atomList,
group = _group_five_prime_was_in,
name = gensym("Strand"),
# doesn't need "DnaStrand" or self.assy,
# since not normally seen by users
# [bruce 080407 comment]
color = _new_strand_color)
pass # end of class ops_rechunk_Mixin
# end
| NanoCAD-master | cad/src/operations/ops_rechunk.py |
# Copyright 2004-2007 Nanorex, Inc. See LICENSE file for details.
"""
pastables.py -- identifying and using nodes that can be pasted
@version: $Id$
@copyright: 2004-2007 Nanorex, Inc. See LICENSE file for details.
History:
Bruce circa 050121 split these out of existing code in depositMode,
probably by Josh, and generalized them (but left them in that file)
Ninad 2007-08-29 moved them into Utility.py and extended or
modified them
bruce 071026 moved them from Utility into a new file
TODO:
- All these functions probably need cleanup and generalization.
More importantly, they need to be methods in the Node API with
implementations on a few subclasses of Node.
- A longstanding NFR is to be able to define a hotspot in a Group
and paste it onto a bondpoint.
"""
from model.chunk import Chunk # only for isinstance
from foundation.Group import Group # only for isinstance
def is_pastable(obj):
"""
whether to include a clipboard object on Build's pastable spinbox
"""
#bruce 050127 make this more liberal, so it includes things which are
# not pastable onto singlets but are still pastable into free space
# (as it did before my changes of a few days ago)
# but always run is_pastable_onto_singlet in case it has a klugy bugfixing side-effect
return is_pastable_onto_singlet(obj) or is_pastable_into_free_space(obj)
# these separate is_pastable_xxx functions make a distinction which might not yet be used,
# but which should be used soon to display these kinds of pastables differently
# in the model tree and/or spinbox [bruce 050127]
# (they're only used in this file, but that comment suggests
# they ought to be public anyway)
def is_pastable_into_free_space(obj):#bruce 050127
return isinstance(obj, Chunk) or isinstance(obj, Group)
def is_pastable_onto_singlet(obj): #bruce 050121 (renamed 050127)
# this might have a klugy bugfixing side-effect -- not sure
ok, spot_or_whynot = find_hotspot_for_pasting(obj)
return ok
def find_hotspot_for_pasting(obj):
"""
Return (True, hotspot) or (False, reason),
depending on whether obj is pastable in Build mode
(i.e. on whether a copy of it can be bonded to an existing singlet).
In the two possible return values,
hotspot will be one of obj's singlets, to use for pasting it
(but the one to actually use is the one in the copy made by pasting),
or reason is a string (for use in an error message) explaining why there isn't
a findable hotspot. For now, the hotspot can only be found for certain
chunks, but someday it might be defined for certain groups, as well,
or anything else that can be bonded to an existing singlet.
"""
#Note: method modified to support group pasting -- ninad 2007-08-29
if not (isinstance(obj, Chunk) or isinstance(obj, Group)):
return False, "only chunks or groups can be pasted" #e for now
if isinstance(obj, Chunk):
ok, spot_or_whynot = _findHotspot(obj)
return ok, spot_or_whynot
elif isinstance(obj, Group):
groupChunks = []
def func(node):
if isinstance(node, Chunk):
groupChunks.append(node)
obj.apply2all(func)
if len(groupChunks):
for m in groupChunks:
ok, spot_or_whynot = _findHotspot(m)
if ok:
return ok, spot_or_whynot
return False, "no hotspot in group's chunks"
pass
def _findHotspot(obj):
if isinstance(obj, Chunk):
if len(obj.singlets) == 0:
return False, "no bondpoints in %r (only pastable in empty space)" % obj.name
elif len(obj.singlets) > 1 and not obj.hotspot:
return False, "%r has %d bondpoints, but none has been set as its hotspot" % (obj.name, len(obj.singlets))
else:
return True, obj.hotspot or obj.singlets[0]
pass
# end
| NanoCAD-master | cad/src/operations/pastables.py |
# Copyright 2005-2007 Nanorex, Inc. See LICENSE file for details.
"""
undo_UI.py - Undo-related main menu commands other than Undo/Redo themselves
@author: bruce
@version: $Id$
@copyright: 2005-2007 Nanorex, Inc. See LICENSE file for details.
History:
bruce 071025 split this out of undo_manager.py
to rationalize import dependencies
TODO:
- general cleanup, especially of comments
WARNING: most comments have not been not reviewed since this code was moved
out of undo_manager.py [as of 071025], so they may be misleadingly out of
context.
"""
from utilities.debug import print_compact_traceback
import foundation.env as env
from utilities.Log import greenmsg, redmsg, _graymsg
from widgets.widget_helpers import PleaseConfirmMsgBox
# ==
def editMakeCheckpoint(win):
"""
This is called from MWsemantics.editMakeCheckpoint,
which is documented as:
"Slot for making a checkpoint (only available when
Automatic Checkpointing is disabled)."
"""
env.history.message( greenmsg("Make Checkpoint"))
# do it
try:
#REVIEW: Should make sure this is correct with or without
# auto-checkpointing enabled, and leaves that setting unchanged.
# (This is not urgent, since in present UI it can't be called
# except when auto-checkpointing is disabled.)
um = win.assy.undo_manager
if um:
um.make_manual_checkpoint()
# no msg needed, was emitted above:
## env.history.message(greenmsg("Make Checkpoint"))
pass
else:
# this should never happen
msg = "Make Checkpoint: error, no undo_manager"
env.history.message(redmsg(msg))
except:
print_compact_traceback("exception caught in editMakeCheckpoint: ")
msg = "Internal error in Make Checkpoint. " \
"Undo/Redo might be unsafe until a new file is opened."
env.history.message(redmsg(msg))
#e that wording assumes we can't open more than one file at a time...
return
def editClearUndoStack(win):
"""
This is called from MWsemantics.editClearUndoStack,
which is documented as:
"Slot for clearing the Undo Stack. Requires the user to confirm."
"""
#bruce 060304, modified from Mark's prototype in MWsemantics
#e the following message should specify the amount of data to be lost...
#e and the menu item text also should
msg = "Please confirm that you want to clear the Undo/Redo Stack.<br>" + \
_graymsg("(This operation cannot be undone.)")
confirmed = PleaseConfirmMsgBox( msg)
# TODO: I bet this needs a "don't show this again" checkbox...
# with a prefs key...
if not confirmed:
env.history.message("Clear Undo Stack: Cancelled.") #k needed??
return
# do it
env.history.message(greenmsg("Clear Undo Stack"))
# no further message needed if it works, I think
try:
##e Note: the following doesn't actually free storage.
# [update, 060309 -- i think as of a few days ago it does try to... ##k]
# Once the UI seems to work, we'll either add that to it,
# or destroy and remake assy.undo_manager itself before doing this
# (and make sure destroying it frees storage).
##e Make sure this can be called with or without auto-checkpointing
# enabled, and leaves that setting unchanged. #####@@@@@
win.assy.clear_undo_stack()
except:
print_compact_traceback("exception in clear_undo_stack: ")
msg = "Internal error in Clear Undo Stack. " \
"Undo/Redo might be unsafe until a new file is opened."
env.history.message(redmsg(msg))
#e that wording assumes we can't open more than one file at a time...
return
# bugs in editClearUndoStack [some fixed as of 060304 1132p PST, removed now]:
# cosmetic:
# + [worked around in this code, for now] '...' needed in menu text;
# - it ought to have ram estimate in menu text;
# - "don't show again" checkbox might be needed
# - does the dialog (or menu item if it doesn't have one) need a wiki help
# link?
# - dialog starts out too narrow
# - when Undo is disabled at the point where stack was cleared, maybe text
# should say it was cleared? "Undo stack cleared (%d.)" ???
# end
| NanoCAD-master | cad/src/operations/undo_UI.py |
# Copyright 2005-2007 Nanorex, Inc. See LICENSE file for details.
"""
bonds_from_atoms.py -- experimental code for inferring bonds from
atom positions and elements alone
@author: Dr. K. Eric Drexler
@version: $Id$
@copyright: 2005-2007 Nanorex, Inc. See LICENSE file for details.
History:
bruce 050906 translated into Python some Lisp code
contributed by Dr. K. Eric Drexler.
bruce 071030 moved inferBonds interface to that code (probably by Will) here,
from bonds.py.
TODO: make this directly accessible as one or more user operations.
(As of 071030 it's only used when importing some PDB files.)
"""
# Translation into Python of Lisp code contributed by Dr. K. Eric Drexler.
# Some comments are from contributed code, perhaps paraphrased.
#e Plans: for efficiency, we'll further translate this into Pyrex or C, and/or combine
# with atom-position hashtable rather than scanning all pairs of atoms.
# This implem is just for testing and experimentation.
# This code does not yet consider the possibility of non-sp3 atomtypes,
# and will need changes to properly handle those.
# For now it ignores existing atomtypes and creates new bonds appropriate for sp3
# perhaps plus some extra too-long bonds at the end, if permitted by valence.
import math
from geometry.VQT import vlen
from geometry.VQT import atom_angle_radians
import foundation.env as env
from model.bonds import bond_atoms_faster
from geometry.NeighborhoodGenerator import NeighborhoodGenerator
from model.bond_constants import atoms_are_bonded # was: from bonds import bonded
from model.bond_constants import V_SINGLE
from model.bond_constants import bond_params
# constants; angles are in radians
degrees = math.pi / 180
TARGET_ANGLE = 114 * degrees #e this will probably need to be generalized for non-sp3 atoms
MIN_BOND_ANGLE = 30 * degrees # accepts moderately distorted three-membered rings
ANGLE_ACCEPT_DIST = 0.9 # ignore angle cutoff below this distance (in Angstroms)
MAX_DIST_RATIO_HUNGRY = 2.0 # prohibit bonds way longer than their proper length
MAX_DIST_RATIO_NON_HUNGRY = 1.4 # prohibit bonds a little longer than their proper length
DIST_COST_FACTOR = 5.0 # multiplier on square of distance-ratio beyond optimal
# (utilities used by contributed code, defined differently for nE-1)
def atm_distance(atom1, atom2):
return vlen(atom1.posn() - atom2.posn())
atm_angle = atom_angle_radians # args are three atoms
# utility for creating mutable linked lists in Python
def linked_list( lis1, func = None ):
"""
Given a list of 0 or more elements (e.g. [a,b,c,d]),
return a "python mutable linked list" of the form [a, [b, [c, [d, None]]]].
If func is supplied, apply it to each element of the original list
(e.g. return [f(a),[f(b),[f(c),[f(d),None]]]] for f = func).
Note that we terminate the resulting linked list with None, not [] or f(None).
It might be easier for some applications (which want to append elements to the result,
leaving it in linked list form) if we terminated with [], so this is a possible future change in API.
"""
assert type(lis1) == type([])
lis1.reverse()
res = None # correct result for 0-length lis1
for elt in lis1:
if func is not None:
elt = func(elt)
res = [elt, res]
return res
#e unlink_list?
def idealBondLength(atm1, atm2):
"""
Return the ideal length of a single bond between atm1 and atm2,
assuming they have their current elements but their default atomtypes
(ignoring their current atomtypes).
@see: similar function, ideal_bond_length in bond_constants.py
(not directly useable by this function)
"""
# don't use getEquilibriumDistanceForBond directly, in case pyrex sim (ND-1)
# is not available [bruce 060620]
r1, r2 = bond_params(atm1.element.atomtypes[0], atm2.element.atomtypes[0], V_SINGLE)
return r1 + r2
def max_atom_bonds(atom, special_cases={'H': 1,
'B': 4,
'C': 4,
'N': 4,
'O': 2,
'F': 1,
'Si': 4,
'P': 5,
'S': 4,
'Cl': 1}): # coded differently for nE-1
"""
Return max number of bonds permitted on this atom, based only on its element
(for any atomtype, ignoring current atomtype of atom). (Returns 0 for noble gases.)
"""
elt = atom.element
sym = elt.symbol
if special_cases.has_key(sym):
return special_cases[sym]
else:
maxbonds = 0
for atype in elt.atomtypes:
if atype.numbonds > maxbonds:
maxbonds = atype.numbonds
return maxbonds
def min_atom_bonds(atom): # coded differently for nE-1
"""
Return min number of bonds permitted on this atom, based only on its element
(for any atomtype, ignoring current atomtype of atom). (Returns 0 for noble gases.)
That is, find the atomtype with the smallest number of bonds (e.g. sp for carbon,
which can have just two double bonds) and return that number of bonds. This is the
smallest number of bonds that could possibly make this element happy.
"""
minbonds = 10000
for atype in atom.element.atomtypes:
if atype.numbonds < minbonds:
minbonds = atype.numbonds
return minbonds
"""
Eric D writes: If the bond-number limit for each atom is based on its
atom-type, rather than on its element-type, there should be no
problem. Or, if atom-types are unknown, then we can use the maximum
valence for that atom that occurs in non-exotic chemistry. Damian
should cross-check this, but I'd use:
H 1
B 4
C 4
N 4
O 2
F 1
Si 4
P 5
S 4
Cl 1
Many elements can have any one of several atomtypes based on
hybridization. Our concepts of these appear in the _mendeleev table in
elements.py. So for each element there is a minimum number of possible
bonds and a maximum number of possible bonds; for carbon these are
respectively 2 (two double bonds for sp hybridization) and 4 (sp3).
There are two things that could be the independent variable. Either
you derive the atomtype from the number of bonds, or you hold the
atomtype fixed and permit only the number of bonds it allows.
Currently max_atom_bonds() is looking at
atom.element.atomtypes[0].numbonds to determine how many bonds are OK
for this atom. That presumes the first atomtype permits the most bonds,
which is presently [and still, 071101] true, but it would be better
to take an explicit maximum.
So we DO want the maximum number of bonds for ANY atomtype for this
element, with the presumption that somebody else will later
rehybridize the atom to get the right atomtype. We don't need to do
that here.
The other messy thing is this: If we know we don't have enough bonds
for the element (i.e. fewer than the smallest number of bonds for any
of its atomtypes) then we should use MAX_DIST_RATIO = 2.0 because we
are hungry for more bonds. When we get enough for the minimum, we
reduce MAX_DIST_RATIO to 1.4 because we're not so hungry any more.
MAX_DIST_RATIO is used in two places. One is in list_potential_bonds,
where we clearly want this policy. (Complication: there are two
atoms involved - we will use the smaller value only when BOTH are
non-hungry.) The other place is atm_distance_cost, another case
where there are two atoms involved. I think it applies there too.
"""
def max_dist_ratio(atm1, atm2):
def is_hungry(atm):
return len(atm.realNeighbors()) < min_atom_bonds(atm)
if is_hungry(atm1) or is_hungry(atm2):
return MAX_DIST_RATIO_HUNGRY
else:
return MAX_DIST_RATIO_NON_HUNGRY
def bondable_atm(atom): # coded differently for nE-1 due to open bonds
"""
Could this atom accept any more bonds
(assuming it could have any of its atomtypes,
and ignoring positions and elements of atoms it's already bonded to,
and ignoring open bonds,
and treating all existing bonds as single bonds)?
"""
#e len(atom.bonds) would be faster but would not ignore open bonds;
# entire alg could be recoded to avoid ever letting open bonds exist,
# and then this could be speeded up.
return len(atom.realNeighbors()) < max_atom_bonds(atom)
def bond_angle_cost(angle, accept, bond_length_ratio):
"""
Return the cost of the given angle, or None if that cost is infinite.
Note that the return value can be 0.0, so callers should only
test it for "is None", not for its boolean value.
If accept is true, don't use the minimum-angle cutoff (i.e. no angle
is too small to be accepted).
"""
# if bond is too short, bond angle constraint changes
if not (accept or angle > MIN_BOND_ANGLE * 1.0 + (2.0 * max(0.0, bond_length_ratio - 1.0)**2)):
return None
diff = min(0.0, angle - TARGET_ANGLE) # for heuristic cost, treat all angles as approximately tetrahedral
square = diff * diff
if 0.0 < diff:
# wide angle
return square
else:
# tight angle -- larger quadratic penalty
return 2.0 * square
def atm_angle_cost(atm1, atm2, ratio):
"""
Return total cost of all bond-angles which include the atm1-atm2 bond
(where one bond angle is said to include the two bonds whose angle it describes);
None means infinite cost.
"""
accept = atm_distance(atm1, atm2) < ANGLE_ACCEPT_DIST
sum = 0.0
for atm in atm1.realNeighbors():
cost = bond_angle_cost( atm_angle(atm, atm1, atm2), accept, ratio)
if cost is None: # cost can be 0.0, so don't use a boolean test here [bruce 050906]
return None
sum += cost
for atm in atm2.realNeighbors():
# note different order of atm2, atm1
cost = bond_angle_cost( atm_angle(atm, atm2, atm1), accept, ratio)
if cost is None:
return None
sum += cost
return sum
covrad_table = dict( [
# from webelements.com (via contributed code)
('H', 0.37),
('C', 0.77), ('N', 0.75), ('O', 0.73), ('F', 0.72),
('Si', 1.11), ('P', 1.06), ('S', 1.02), ('Cl', 0.99),
])
def covalent_radius(atm):
"""
Return atm's covalent radius (assuming default atomtype, not its current one), always as a float.
"""
try:
return float( covrad_table[atm.element.symbol] ) # use radius from contributed code, if defined
except KeyError:
print "fyi: covalent radius not in table:",atm.element.symbol # make sure I didn't misspell any symbol names
return float( atm.element.atomtypes[0].rcovalent ) # otherwise use nE-1 radius
pass
def atm_distance_cost(atm1, atm2, ratio):
"""
Return cost (due to length alone) of a hypothetical bond between two atoms; None means infinite
"""
if not (ratio < max_dist_ratio(atm1, atm2)):
return None
if ratio < 1.0:
# short bond
return ratio * 0.01 # weak preference for smallest of small distances
else:
# long bond -- note, long and short bond cost is the same, where they join at ratio == 1.0
return 0.01 + DIST_COST_FACTOR * (ratio - 1.0) ** 2 # quadratic penalty for long bonds
pass
_enegs = ['F', 'Cl', 'O', 'S', 'N', 'P']
def bond_element_cost(atm1, atm2, _enegs=_enegs):
"""
Avoid bonding a pair of electronegative atoms
"""
if atm1.element.symbol in _enegs and atm2.element.symbol in _enegs:
return 1.0
else:
return 0.0
def bond_cost(atm1, atm2):
"""
Return total cost of hypothetical new bond between two atoms, or None if bond is not permitted or already there
"""
if not (bondable_atm(atm1) and bondable_atm(atm2)): # check valence of existing bonds
return None
if atoms_are_bonded(atm1, atm2): # already bonded? (redundant after list-potential-bonds) ###
return None
distance = atm_distance(atm1, atm2)
# note the assumption that we are talking about SINGLE bonds, which runs throughout this code
# some day we should consider the possibility of higher-order bonds; a stab in this direction
# is the bondtyp argument in make_bonds(), but that's really a kludge
best_dist = idealBondLength(atm1, atm2)
# previously: best_dist = covalent_radius(atm1) + covalent_radius(atm2)
if not best_dist:
return None # avoid ZeroDivision exception from pondering a He-He bond
ratio = distance / best_dist # best_dist is always a float, so this is never "integer division"
dc = atm_distance_cost(atm1, atm2, ratio)
if dc is None:
return None
ac = atm_angle_cost(atm1, atm2, ratio)
if ac is None:
return None
ec = bond_element_cost(atm1, atm2)
return ac + dc + ec
def list_potential_bonds(atmlist0):
"""
Given a list of atoms, return a list of triples (cost, atm1, atm2) for all bondable pairs of atoms in the list.
Each pair of atoms is considered separately, as if only it would be bonded, in addition to all existing bonds.
In other words, the returned bonds can't necessarily all be made (due to atom valence), but any one alone can be made,
in addition to whatever bonds the atoms currently have.
Warning: the current implementation takes quadratic time in len(atmlist0). The return value will have reasonable
size for physically realistic atmlists, but could be quadratic in size for unrealistic ones (e.g. if all atom
positions were compressed into a small region of space).
"""
atmlist = filter( bondable_atm, atmlist0 )
lst = []
maxBondLength = 2.0
ngen = NeighborhoodGenerator(atmlist, maxBondLength)
for atm1 in atmlist:
key1 = atm1.key
pos1 = atm1.posn()
for atm2 in ngen.region(pos1):
bondLen = vlen(pos1 - atm2.posn())
idealBondLen = idealBondLength(atm1, atm2)
if atm2.key < key1 and bondLen < max_dist_ratio(atm1, atm2) * idealBondLen:
# i.e. for each pair (atm1, atm2) of bondable atoms
cost = bond_cost(atm1, atm2)
if cost is not None:
lst.append((cost, atm1, atm2))
lst.sort() # least cost first
return lst
def make_bonds(atmlist, bondtyp = V_SINGLE):
"""
Make some bonds between the given atoms. At any moment make the cheapest permitted unmade bond;
stop only when no more bonds are permitted (i.e. all potential bonds have infinite cost).
Assume that newly made bonds can never decrease the cost of potential bonds.
(This is needed to justify the algorithm, which moves potential bonds later in an ordered list
when their cost has increased since last checked;
it's true since the bond cost (as defined elsewhere in this module) is a sum of terms,
and adding a bond can add new terms but doesn't change the value of any existing terms.)
Return the number of bonds created.
"""
# Implementation note: the only way I know of to do this efficiently is to use a linked list
# (as the Lisp code did), even though this is less natural in Python.
bondlst0 = list_potential_bonds(atmlist) # a list of triples (cost, atm1, atm2)
bondlst = linked_list(bondlst0, list) # arg2 (list) is a function to turn the triple (cost, atm1, atm2) into a list.
# value is a list [[cost, atm1, atm2], next]; needs to be mutable in next and cost elements
# (Note: Lisp code used a Lisp linked list of triples, (cost atm1 atm2 . next), but all Lisp lists are mutable.)
res = 0
while bondlst:
entry, bondlst = bondlst
# original code assumed this was too early to change bondlst -- we might insert a new element right after entry;
# but I think that's not possible, so we can move forward now [bruce 050906]
oldcostjunk, atm1, atm2 = entry
cost = bond_cost(atm1, atm2) # might be different than last recorded cost
#e optim: could invalidate changed costs, avoid recomputing some of them, incrementally adjust others
if cost is not None:
if (bondlst is None) or bondlst[0][0] >= cost:
# if there's no next-best bond, or its cost is no better than this one's, make this bond
bond_atoms_faster(atm1, atm2, bondtyp) # optimized bond_atoms, and doesn't make any open bonds
res += 1
else:
# cost has increased beyond next bond in list -- update entry and move it down list
entry[0] = cost
curr = bondlst # loop variable - next possible list element after which we might insert entry
while 1:
# (at this point, we know curr is not None, and we already compared cost to curr[0][0])
junk, next = curr
if (next is None) or next[0][0] >= cost:
break # found insertion point: right after curr, before next (next might be None)
curr = next
assert curr[1] is next #e remove when works
# insert entry after curr, before next
curr[1] = [entry, next]
pass
pass
return res
# end of translation of contributed code
# ==
def inferBonds(mol): # [probably by Will; TODO: needs docstring]
#bruce 071030 moved this from bonds.py to bonds_from_atoms.py
# not sure how big a margin we should have for "coincident"
maxBondLength = 2.0
# first remove any coincident singlets
singlets = filter(lambda a: a.is_singlet(), mol.atoms.values())
removable = { }
sngen = NeighborhoodGenerator(singlets, maxBondLength)
for sing1 in singlets:
key1 = sing1.key
pos1 = sing1.posn()
for sing2 in sngen.region(pos1):
key2 = sing2.key
dist = vlen(pos1 - sing2.posn())
if key1 != key2:
removable[key1] = sing1
removable[key2] = sing2
for badGuy in removable.values():
badGuy.kill()
from operations.bonds_from_atoms import make_bonds
make_bonds(mol.atoms.values())
return
# ==
from utilities.debug import register_debug_menu_command
def remake_bonds_in_selection( glpane ):
"""
Remake all bonds between selected atoms (or between atoms in selected chunks),
in the given Selection object (produced by e.g. selection_from_part),
by destroying all old bonds between selected atoms and all open bonds on them,
changing all selected atoms to their default atomtype,
and creating new single bonds using Eric Drexler's greedy algorithm which considers
bond lengths and angles for sp3 atoms.
Note: bonds between selected and unselected atoms are not altered, but are noticed
when deciding what new bonds to make.
Note: the current algorithm might make additional stretched bonds, in cases when
it ought to infer non-sp3 atomtypes and make fewer bonds.
"""
#bruce 071030 fixed several bugs in this function I wrote long ago;
# evidently it never worked -- was it finished?? Now it works, at least
# for the trivial test case of 2 nearby C(sp3) atoms.
atmlist = glpane.assy.getSelectedAtoms()
# notes: this includes atoms inside selected chunks;
# it also includes a selected jig's atoms, unlike most atom operations.
atmdict = dict([(atm.key, atm) for atm in atmlist]) # for efficiency of membership test below
n_bonds_destroyed = 0
n_atomtypes_changed = 0
n_atoms = len(atmlist)
for atm in atmlist:
if atm.atomtype is not atm.element.atomtypes[0]:
n_atomtypes_changed += 1 # this assume all atoms will be changed to default atomtype, not an inferred one
# count them all before changing them or destroying any bonds,
# in case some atomtypes weren't initialized yet
# (since their getattr method looks at number of bonds)
for atm in atmlist:
for b in atm.bonds[:]:
atm2 = b.other(atm)
if atm2.key in atmdict:
###e to also zap singlets we'd need "or atm2.element is Singlet" and to prevent b.bust from remaking them!
# (singlets can't be selected)
b.bust()
n_bonds_destroyed += 1 # (count real bonds only)
atm.set_atomtype(atm.element.atomtypes[0]) ###k this might remake singlets if it changes atomtype
#e future optim: revise above to also destroy singlets and bonds to them
# (btw I think make_bonds doesn't make any singlets as it runs)
n_bonds_made = make_bonds(atmlist)
#e it would be nice to figure out how many of these are the same as the ones we destroyed, etc
for atm in atmlist:
atm.remake_bondpoints()
env.history.message(
"on %d selected atoms, replaced %d old bond(s) with %d new (or same) bond(s); changed %d atomtype(s) to default" %
(n_atoms, n_bonds_destroyed, n_bonds_made, n_atomtypes_changed)
)
#e note, present implem marks lots of atoms as changed (from destroying and remaking bonds) which did not change;
# this only matters much for redrawing speed (of unchanged chunks) and since file is always marked as changed
# even if nothing changed at all.
return
register_debug_menu_command( "Remake Bonds", remake_bonds_in_selection )
#end
| NanoCAD-master | cad/src/operations/bonds_from_atoms.py |
# Copyright 2008 Nanorex, Inc. See LICENSE file for details.
"""
ops_debug.py -- various operations/commands for debugging
@author: Bruce
@version: $Id$
@copyright: 2008 Nanorex, Inc. See LICENSE file for details.
"""
import sys, os
import utilities.EndUser as EndUser
from utilities.constants import CAD_SRC_PATH
from utilities.debug import print_compact_traceback
from utilities.debug import register_debug_menu_command
from platform_dependent.PlatformDependent import find_or_make_Nanorex_subdir
from foundation.FeatureDescriptor import find_or_make_descriptor_for_possible_feature_object
from foundation.FeatureDescriptor import command_package_part_of_module_name
from foundation.FeatureDescriptor import otherCommandPackage_Descriptor
# ==
def _tempfilename( basename): # todo: rename, refile
"""
@warning: does not check whether file already exists.
"""
tmpdir = find_or_make_Nanorex_subdir("TemporaryFiles") # also hardcoded elsewhere
return os.path.join( tmpdir, basename )
# ==
def import_all_modules_cmd(glpane): #bruce 080721
"""
"""
del glpane
_original_cwd = os.getcwd() # so we can restore it before returning
try:
os.chdir(CAD_SRC_PATH)
# this doesn't work, don't know why:
## pipe = os.popen("./tools/AllPyFiles.sh")
## modules = pipe.readlines() # IOError: [Errno 4] Interrupted system call
## pipe.close()
# so try this instead:
tmpfile = _tempfilename( "_all_modules")
os.system("./tools/AllPyFiles.sh > '%s'" % tmpfile)
file1 = file(tmpfile, "rU")
modules = file1.readlines()
file1.close
os.remove(tmpfile)
print
print "will scan %d source files from AllPyFiles" % len(modules) # 722 files as of 080721!
modules.sort()
SKIP_THESE = ("_import_roots", "main", "ExecSubDir")
import_these = []
cinit = 0
for module in modules:
module = module.strip()
if module.startswith("./"):
module = module[2:]
basename = module
assert os.path.exists(module), "should exist: %r" % (module,)
assert module.endswith(".py"), "should end with .py: %r" % (module,)
module = module[:-3]
if module.endswith("/__init__"):
# don't bother with this if its directory is empty;
# otherwise assume it'll be imported implicitly
cinit += 1
continue
if module in SKIP_THESE or ' ' in module or '-' in module:
# those funny chars can happen when developers have junk files lying around
# todo: do a real regexp match, permit identifiers and '/' only;
# or, only do this for files known to svn?
print "skipping import of", basename
continue
import_these.append(module.replace('/', '.'))
continue
if cinit:
print "(skipping direct import of %d __init__.py files)" % cinit
print
print "will import %d modules" % len(import_these)
for module in import_these:
statement = "import " + module
try:
exec statement
except:
print_compact_traceback("ignoring exception in %r: " % statement)
pass
print "done importing all modules"
print
except:
print_compact_traceback("ignoring exception: ")
os.chdir(_original_cwd)
return # from import_all_modules_cmd
# ==
def export_command_table_cmd(glpane, _might_reload = True): #bruce 080721, unfinished
"""
@note: since this only covers loaded commands, you might want to
run "Import all source files" before running this.
"""
if _might_reload:
try:
import operations.ops_debug as _this_module
# (to be precise: new version of this module)
reload(_this_module)
_this_module.export_command_table_cmd # make sure it's there
except:
print_compact_traceback("fyi: auto-reload failed: ")
pass
else:
_this_module.export_command_table_cmd(glpane, _might_reload = False)
return
pass
del glpane
global_values = {} # id(val) -> val (not all vals are hashable)
mcount = 0
all_command_packages_dict = {}
for module in sys.modules.itervalues():
if module:
# Note: this includes built-in and extension modules.
# If they were easy to exclude, we'd exclude them here,
# since we are only looking for objects defined in modules
# in cad/src. I guess comparing __file__ and CAD_SRC_PATH
# would not be too hard... so do that if the need arises.
mcount += 1
for name in dir(module):
value = getattr(module, name)
global_values[id(value)] = value # also store module and name?
if 0:
print module # e.g. <module 'commands.Move' from '/Nanorex/trunk/cad/src/commands/Move/__init__.pyc'>
print getattr(module, '__file__', '<no file>') # e.g. /Nanorex/trunk/cad/src/commands/Move/__init__.pyc
# e.g. <module 'imp' (built-in)> has no file; name is 'imp'
print getattr(module, '__name__', '<no name>') # e.g. commands.Move
# all modules have a name.
print
cp = command_package_part_of_module_name( module.__name__)
if cp:
all_command_packages_dict[ cp] = cp
pass
continue
print "found %d distinct global values in %d modules" % ( len(global_values), mcount)
if 1:
# not really needed, just curious how many types of global values there are
global_value_types = {} # maps type -> type (I assume all types are hashable)
for v in global_values.itervalues():
t = type(v)
global_value_types[t] = t
print "of %d distinct types" % len(global_value_types)
# 745 types!
# e.g. one class per OpenGL function, for some reason;
# and some distinct types which print the same,
# e.g. many instances of <class 'ctypes.CFunctionType'>.
# print global_value_types.values() # just to see it...
print
print "found %d command_packages" % len(all_command_packages_dict) # a dict, from and to their names
all_command_packages_list = all_command_packages_dict.values()
all_command_packages_list.sort()
# print "\n".join( all_command_packages_list)
# print
# find command descriptors in global_values
descriptors = {}
for thing in global_values.itervalues():
d = find_or_make_descriptor_for_possible_feature_object( thing)
if d is not None:
descriptors[d] = d # duplicates can occur
# add notes about the command packages in which we didn't find any commands,
# and warn about those in which we found more than one
command_packages_with_commands = {}
for d in descriptors:
cp = d.command_package
if cp:
if cp not in all_command_packages_dict:
print "bug: command package not found in initial scan:", cp
if command_packages_with_commands.has_key(cp):
# this is normal now; todo: print at most once per cp
print "fyi: command package with more than one command:", cp
command_packages_with_commands[ cp] = cp
for cp in all_command_packages_list:
if not cp in command_packages_with_commands:
d = otherCommandPackage_Descriptor(cp)
descriptors[d] = d
# change descriptors into a list, and sort it
items = [ ( descriptor.sort_key(), descriptor) for descriptor in descriptors]
### or call sort_by or sorted_by, if it exists?
items.sort()
descriptors = [ descriptor for junk, descriptor in items ]
# print results
print "found %d commands:" % len(descriptors)
print
for descriptor in descriptors:
descriptor.print_plain() # todo: add more info to that; print into a file
print
print "done"
return # from export_command_table_cmd
# ==
def initialize(): # called from startup_misc.py
if EndUser.enableDeveloperFeatures():
register_debug_menu_command( "Import all source files", import_all_modules_cmd )
register_debug_menu_command( "Export command table", export_command_table_cmd )
return
# end
| NanoCAD-master | cad/src/operations/ops_debug.py |
# Copyright 2004-2007 Nanorex, Inc. See LICENSE file for details.
"""
ops_connected.py -- operations on the connectivity of bond networks.
@author: Josh, Bruce
@version: $Id$
@copyright: 2004-2007 Nanorex, Inc. See LICENSE file for details.
History:
bruce 050507 made this by collecting appropriate methods (by Josh) from class Part.
bruce 050520 added new code (mostly in a separate new file) for Select Doubly.
bruce 050629 code cleanup.
"""
from utilities.Log import greenmsg, redmsg
import foundation.env as env
class ops_connected_Mixin:
"Mixin for providing Select Connected and Select Doubly methods to class Part"
#mark 060128 made this more general by adding the atomlist arg.
def selectConnected(self, atomlist = None):
"""
Selects any atom that can be reached from any currently
selected atom through a sequence of bonds.
@param atomlist: If supplied, use this list of atoms to select connected
atoms instead of the currently selected atoms.
@type atomlist: List of atoms.
@attention: Only correctly reports the number newly selected atoms.
"""
###@@@ should make sure we don't traverse interspace bonds, until all bugs creating them are fixed
cmd = greenmsg("Select Connected: ")
if atomlist is None and not self.selatoms:
msg = redmsg("No atoms selected")
env.history.message(cmd + msg)
return
if atomlist is None: # test for None since atomlist can be an empty list.
atomlist = self.selatoms.values()
catoms = self.getConnectedAtoms(atomlist)
if not len(catoms):
return
natoms = 0
for atom in catoms[:]:
if not atom.picked:
atom.pick()
if atom.picked:
# Just in case a selection filter was applied to this atom.
natoms += 1
else:
natoms += 1 # Counts atom that is already picked.
from platform_dependent.PlatformDependent import fix_plurals
info = fix_plurals( "%d new atom(s) selected." % natoms)
env.history.message( cmd + info)
self.o.gl_update()
def unselectConnected(self, atomlist=None):
"""
Unselect any atom that can be reached from any currently
selected atom through a sequence of bonds.
If <atomlist> is supplied, use it instead of the currently selected atoms.
"""
cmd = greenmsg("Unselect Connected: ")
if atomlist is None and not self.selatoms:
msg = redmsg("No atoms selected")
env.history.message(cmd + msg)
return
if atomlist is None: # test for None since atomlist can be an empty list.
atomlist = self.selatoms.values()
catoms = self.getConnectedAtoms(atomlist)
if not len(catoms): return
natoms = 0
for atom in catoms[:]:
if atom.picked:
atom.unpick()
if not atom.picked:
# Just in case a selection filter was applied to this atom.
natoms += 1
from platform_dependent.PlatformDependent import fix_plurals
info = fix_plurals( "%d atom(s) unselected." % natoms)
env.history.message( cmd + info)
self.o.gl_update()
def deleteConnected(self, atomlist=None): # by mark
"""
Delete any atom that can be reached from any currently
selected atom through a sequence of bonds, and that is acceptable to the current selection filter.
If <atomlist> is supplied, use it instead of the currently selected atoms.
"""
cmd = greenmsg("Delete Connected: ")
if atomlist is None and not self.selatoms:
msg = redmsg("No atoms selected")
env.history.message(cmd + msg)
return
if atomlist is None: # test for None since atomlist can be an empty list.
atomlist = self.selatoms.values()
catoms = self.getConnectedAtoms(atomlist)
if not len(catoms): return
natoms = 0
for atom in catoms[:]:
if atom.killed():
continue
#bruce 060331 precaution, to avoid counting bondpoints twice
# (once when atom is them, once when they die when we kill their base atom)
# if they can be in the passed-in list or the getConnectedAtoms retval
# (I don't know if they can be)
if atom.is_singlet():
continue #bruce 060331 precaution, related to above but different (could conceivably have valence errors w/o it)
if atom.filtered():
continue #bruce 060331 fix a bug (don't know if reported) by doing 'continue' rather than 'return'.
# Note, the motivation for 'return' might have been (I speculate) to not traverse bonds through filtered atoms
# (so as to only delete a connected set of atoms), but the old code's 'return' was not a correct
# implementation of that, in general; it might even have deleted a nondeterministic set of atoms,
# depending on python dict item order and/or their order of deposition or their order in the mmp file.
natoms += 1
atom.kill()
from platform_dependent.PlatformDependent import fix_plurals
info = fix_plurals( "%d connected atom(s) deleted." % natoms)
#bruce 060331 comment: this message is sometimes wrong, since caller has deleted some atoms on click 1 of
# a double click, and then calls us on click 2 to delete the atoms connected to the neighbors of those.
# To fix this, the caller ought to pass us the number of atoms it deleted, for us to add to our number,
# or (better) we ought to return the number we delete so the caller can print the history message itself.
env.history.message( cmd + info)
## self.o.gl_update()
self.w.win_update() #bruce 060331 possible bugfix (bug is unconfirmed) -- update MT too, in case some chunk is gone now
return
def selectDoubly(self):
"""
Select any atom that can be reached from any currently
selected atom through two or more non-overlapping sequences of
bonds. Also select atoms that are connected to this group by
one bond and have no other bonds.
"""
###@@@ same comment about interspace bonds as in selectConnected
cmd = greenmsg("Select Doubly: ")
if not self.selatoms:
msg = redmsg("No atoms selected")
env.history.message(cmd + msg)
return
alreadySelected = len(self.selatoms.values())
from operations.op_select_doubly import select_doubly # new code, bruce 050520
#e could also reload it now to speed devel!
select_doubly(self.selatoms.values()) #e optim
totalSelected = len(self.selatoms.values())
from platform_dependent.PlatformDependent import fix_plurals
info = fix_plurals("%d new atom(s) selected (besides the %d initially selected)." % \
(totalSelected - alreadySelected, alreadySelected) )
env.history.message( cmd + info)
if totalSelected > alreadySelected:
## otherwise, means nothing new selected. Am I right? ---Huaicai, not analyze the markdouble() algorithm yet
#self.w.win_update()
self.o.gl_update()
return
# == helpers for SelectConnected (for SelectDoubly, see separate file imported above)
def getConnectedAtoms(self, atomlist, singlet_ok = False, _return_marked = False):
"""
Return a list of atoms reachable from all the atoms in atomlist,
not following bonds which are "not really connected" (e.g. pseudo-DNA strand-axis bonds).
Normally never returns singlets. Optional arg <singlet_ok> permits returning singlets.
[Private option _return_marked just returns the internal marked dictionary
(including singlets regardless of other options).]
"""
marked = {} # maps id(atom) -> atom, for processed atoms
todo = atomlist # list of atoms we must still mark and explore (recurse on all unmarked neighbors)
# from elements import Singlet
for atom in todo:
marked[id(atom)] = atom # since marked means "it's been appended to the todo list"
while todo:
newtodo = []
for atom in todo:
assert id(atom) in marked
#e could optim by skipping singlets, here or before appending them.
#e in fact, we could skip all univalent atoms here, but (for non-singlets)
# only if they were not initially picked, so nevermind that optim for now.
for b in atom.bonds:
at1, at2 = b.atom1, b.atom2 # simplest to just process both atoms, rather than computing b.other(atom)
really_connected = True # will be changed to False for certain bonds below.
if not self.o.tripleClick:
# New feature:
# Don't consider PAM strand-axis bonds as really connected unless
# the user did a triple-click (on a PAM atom).
# (initial kluge for trying it out -- needs cleanup, generalization,
# optim (use element attrs, not lists [done now]), control by option
# of this method, and needs to also affect
# neighbors_of_last_deleted_atom() in selectMode.py ###e) [bruce 070411]
#
###e really_connected should probably be an attr of each bond,
# renamed to b.connected, computed from its elements, via a global helper
# function in bond_constants.py which computes it for a pair of atoms.
# Someday we might have other kinds of non-connected bonds,
# e.g. hydrogen bonds, or higher-level analogues of that. [bruce 070411]
#
# update:
# Revised for new role 'unpaired-base' -- just select connected sets of
# the same role, or connected bondpoints. (We have to include those even
# though they're not selectable, since we have an option to include them.)
# (Future: maybe we could generalize this to "same role or any connected
# chemical atoms", but then chemical atoms could bridge otherwise-disconnected
# sets of different non-None roles. Doesn't matter yet, since chemical atoms
# (besides X) bonded to PAM atoms are not yet supported.)
# [bruce 080117]
if at1.element.role != at2.element.role:
if not at1.is_singlet() and not at2.is_singlet():
really_connected = False
if really_connected:
if id(at1) not in marked: #e could also check for singlets here...
marked[id(at1)] = at1
newtodo.append(at1)
if id(at2) not in marked:
marked[id(at2)] = at2
newtodo.append(at2)
todo = newtodo
if _return_marked:
return marked # KLUGE [bruce 070411], should split out a separate method instead
# (but this form is safer for now -- cvs merge conflicts/errors are less likely this way)
alist = []
for atom in marked.itervalues():
if singlet_ok:
alist.append(atom)
elif not atom.is_singlet():
alist.append(atom)
return alist
def getConnectedSinglets(self, atomlist):
"""
Return a list of singlets reachable from all the atoms in atomlist.
"""
marked = self.getConnectedAtoms( atomlist, _return_marked = True )
# use private option of sibling method, to incorporate the new details
# of its functionality (i.e. its meaning of "connected"/"reachable")
# [bruce 070411]
slist = []
for atom in marked.itervalues():
if atom.is_singlet():
slist.append(atom)
return slist
pass # end of class ops_connected_Mixin
# end
| NanoCAD-master | cad/src/operations/ops_connected.py |
# Copyright 2006-2007 Nanorex, Inc. See LICENSE file for details.
"""
Sponsors.py - sponsors system, exporting PermissionDialog and SponsorableMixin
@author: Will
@version: $Id$
@copyright: 2006-2007 Nanorex, Inc. See LICENSE file for details.
Motivation and design rationale:
We want to recoup some of the costs of developing NanoEngineer-1 in a
way consistent with its GPL licensing. One way to do that is to have
sponsors, and to offer the sponsors advertising space in a way that
doesn't bother the user. Some UI dialogs will have buttons with
sponsor logos on them, and if you click on a sponsor logo button,
you'll get more information and maybe a link to their website. There
are no unsolicited pop-ups in this system.
We want to be able to update sponsor information without asking the
user to download a new version. So we have the program fetch recent
sponsor information from our server. We don't want this to annoy the
user, in terms of either network bandwidth or privacy concerns, so
we have a permission dialog that explains what we're doing and asks
the user for permission to do it.
Module classification:
Contains many levels of code, but exports only a widget and a widget-helper.
Still, functionally it may belong in its own toplevel package. [bruce 071217]
"""
import base64
import md5
import os
import random
import re
import socket
import string
import threading
import types
import urllib
from xml.dom.minidom import parseString
from PyQt4.Qt import QDialog
from PyQt4.Qt import QImage
from PyQt4.Qt import QPixmap
from PyQt4.Qt import QSize
from PyQt4.Qt import QIcon
from PyQt4.Qt import QGridLayout
from PyQt4.Qt import QTextBrowser
from PyQt4.Qt import QPushButton
from PyQt4.Qt import SIGNAL
import foundation.env as env
from utilities import debug_flags
from platform_dependent.PlatformDependent import find_or_make_Nanorex_subdir
from foundation.wiki_help import WikiHelpBrowser
from utilities.debug import print_compact_stack, print_compact_traceback
from utilities.qt4transition import qt4todo
from utilities.prefs_constants import sponsor_download_permission_prefs_key
from utilities.prefs_constants import sponsor_permanent_permission_prefs_key
from utilities.prefs_constants import sponsor_md5_mismatch_flag_key
from utilities.Log import redmsg, orangemsg, greenmsg
from utilities.icon_utilities import geticon
_sponsordir = find_or_make_Nanorex_subdir('Sponsors')
_sponsors = { }
# Include a trailing slash in the following sponsor server URLs.
_sponsor_servers = \
['http://nanoengineer-1.com/NE1_Sponsors/',
#'file:///transfers/',
]
def _fixHtml(rc): #bruce 071217 renamed this to be private
startUrl=re.compile('\[')
middleUrl=re.compile(' ')
finishUrl=re.compile('\]')
rc = string.replace(rc, '[P]', '<p>')
rc = string.replace(rc, '[p]', '<p>')
rc = string.replace(rc, '[ul]', '<ul>')
rc = string.replace(rc, '[/ul]', '</ul>')
rc = string.replace(rc, '[li]', '<li>')
while True:
m = startUrl.search(rc)
if m == None:
return rc
s, e = m.start(), m.end()
m = middleUrl.search(rc[e:])
s2, e2 = m.start() + e, m.end() + e
m = finishUrl.search(rc[e2:])
s3, e3 = m.start() + e2, m.end() + e2
mid = "<a href=\"%s\">%s</a>" % (rc[e:s2], rc[e2:s3])
rc = rc[:s] + mid + rc[e3:]
class _Sponsor: #bruce 071217 renamed this to be private
"""
"""
def __init__(self, name, text, imgfile):
self.name = name
self.text = text
self.imgfile = imgfile
def __repr__(self):
return '<' + self.name + '>'
def configureSponsorButton(self, btn):
"""
Load the image in the Sponsor button I{btn} that is displayed at the
top of the Property Manager.
@param btn: The sponsor button.
@type btn: QToolButton
"""
qimg = QImage(self.imgfile)
pixmap = QPixmap.fromImage(qimg)
size = QSize(pixmap.width(), pixmap.height())
btn.setIconSize(size)
btn.setIcon(QIcon(pixmap))
def wikiHelp(self):
parent = env.mainwindow()
w = WikiHelpBrowser(self.text,parent,caption=self.name)
w.show()
def _get_remote_file(filename, prefix):
"""
Get file I{filename} from the sponsor server.
@param filename: the name of the file to get
@type filename: string
@param prefix: a short string that is expected at the beginning of the file
for the retrieval to be denoted as successful
@type prefix: string
@return: gotContents, fileContents
@rtype: gotContents - True if the file contents were successfully retrieved,
False otherwise
fileContents - the contents of the requested file
"""
# Try to connect for up to five seconds per host
socket.setdefaulttimeout(5)
fileContents = ""
gotContents = False
for host in _sponsor_servers:
url = host + filename
try:
fileHandle = urllib.urlopen(url)
fileContents = fileHandle.read()
fileHandle.close()
if fileContents.startswith(prefix):
gotContents = True
break
except IOError:
pass # Fail silently
return gotContents, fileContents
def _download_xml_file(xmlfile):
(gotSponsorsFile, fileContents) = _get_remote_file("sponsors.xml", "<?xml")
if gotSponsorsFile:
# If we got this far, we have info to replace the local copy of
# sponsors.xml. If we never got this far but a local copy exists,
# then we'll just use the existing local copy.
if os.path.exists(xmlfile):
os.remove(xmlfile)
fileHandle = open(xmlfile, 'w')
fileHandle.write(fileContents)
fileHandle.close()
def _load_sponsor_info(xmlfile, win):
def getXmlText(doc, tag):
parent = doc.getElementsByTagName(tag)[0]
rc = ""
for node in parent.childNodes:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
if os.path.exists(xmlfile):
try:
f = open(xmlfile)
r = f.read()
f.close()
info = parseString(r)
for sp_info in info.getElementsByTagName('sponsor'):
sp_name = getXmlText(sp_info, 'name')
sp_imgfile = os.path.join(_sponsordir, 'logo_%s.png' % sp_name)
sp_keywords = getXmlText(sp_info, 'keywords')
sp_keywords = map(lambda x: x.strip(),
sp_keywords.split(','))
sp_text = _fixHtml(getXmlText(sp_info, 'text'))
if not os.path.exists(sp_imgfile) or \
os.path.getctime(sp_imgfile) < os.path.getctime(xmlfile):
sp_png = base64.decodestring(getXmlText(sp_info, 'logo'))
open(sp_imgfile, 'wb').write(sp_png)
sp = _Sponsor(sp_name, sp_text, sp_imgfile)
for keyword in sp_keywords:
if not _sponsors.has_key(keyword):
_sponsors[keyword] = [ ]
_sponsors[keyword].append(sp)
except:
print_compact_traceback("trouble getting sponsor info: ")
print_compact_stack("trouble getting sponsor info: ")
for dialog in win.sponsoredList():
try:
dialog.setSponsor()
except:
pass
def _force_download():
# Don't check if the MD5 matches. Don't check if the XML file is
# older than two days old. Just download it unconditionally.
env.history.message(orangemsg("FOR DEBUG ONLY! _force_download() " +
"does not respect user privacy preferences."))
xmlfile = os.path.join(_sponsordir, 'sponsors.xml')
win = env.mainwindow()
_download_xml_file(xmlfile)
if not os.path.exists(xmlfile):
raise Exception('_force_download failed')
_load_sponsor_info(xmlfile, win)
env.history.message(greenmsg("_force_download() is finished"))
############################################
class PermissionDialog(QDialog, threading.Thread):
# Politely explain what we're doing as clearly as possible. This will be the
# user's first experience of the sponsorship system and we want to use the
# Google axiom of "Don't be evil".
text = ("We would like to use your network connection to update a list of our " +
"sponsors. This enables us to recoup some of our development costs " +
"by putting buttons with sponsor logos on some dialogs. If you click " +
"on a sponsor logo button, you will get a small window with some " +
"information about that sponsor. May we do this? Otherwise we'll " +
"just use buttons with our own Nanorex logo.")
def __init__(self, win):
self.xmlfile = os.path.join(_sponsordir, 'sponsors.xml')
self.win = win
self.needToAsk = False
self.downloadSponsors = False
threading.Thread.__init__(self)
if not self.refreshWanted():
return
if env.prefs[sponsor_permanent_permission_prefs_key]:
# We have a permanent answer so no need for a dialog
if env.prefs[sponsor_download_permission_prefs_key]:
self.downloadSponsors = True
return
self.needToAsk = True
QDialog.__init__(self, None)
self.setObjectName("Permission")
self.setModal(True) #This fixes bug 2296. Mitigates bug 2297
layout = QGridLayout()
self.setLayout(layout)
layout.setMargin(0)
layout.setSpacing(0)
layout.setObjectName("PermissionLayout")
self.text_browser = QTextBrowser(self)
self.text_browser.setObjectName("text_browser")
layout.addWidget(self.text_browser,0,0,1,4)
self.text_browser.setMinimumSize(400, 80)
self.setWindowTitle('May we use your network connection?')
self.setWindowIcon(geticon('ui/border/MainWindow.png'))
self.text_browser.setPlainText(self.text)
self.accept_button = QPushButton(self)
self.accept_button.setObjectName("accept_button")
self.accept_button.setText("Always OK")
self.accept_once_button = QPushButton(self)
self.accept_once_button.setObjectName("accept_once_button")
self.accept_once_button.setText("OK now")
self.decline_once_button = QPushButton(self)
self.decline_once_button.setObjectName("decline_once_button")
self.decline_once_button.setText("Not now")
self.decline_always_button = QPushButton(self)
self.decline_always_button.setObjectName("decline_always_button")
self.decline_always_button.setText("Never")
layout.addWidget(self.accept_button,1,0)
layout.addWidget(self.accept_once_button,1,1)
layout.addWidget(self.decline_once_button,1,2)
layout.addWidget(self.decline_always_button,1,3)
self.connect(self.accept_button,SIGNAL("clicked()"),self.acceptAlways)
self.connect(self.accept_once_button,SIGNAL("clicked()"),self.acceptJustOnce)
self.connect(self.decline_once_button,SIGNAL("clicked()"),self.declineJustOnce)
self.connect(self.decline_always_button,SIGNAL("clicked()"),self.declineAlways)
def acceptAlways(self):
env.prefs[sponsor_download_permission_prefs_key] = True
env.prefs[sponsor_permanent_permission_prefs_key] = True
self.downloadSponsors = True
self.close()
def acceptJustOnce(self):
env.prefs[sponsor_permanent_permission_prefs_key] = False
self.downloadSponsors = True
self.close()
def declineAlways(self):
env.prefs[sponsor_download_permission_prefs_key] = False
env.prefs[sponsor_permanent_permission_prefs_key] = True
self.close()
def declineJustOnce(self):
env.prefs[sponsor_permanent_permission_prefs_key] = False
self.close()
def run(self):
#
# Implements superclass's threading.Thread.run() function
#
if self.downloadSponsors:
_download_xml_file(self.xmlfile)
self.finish()
env.prefs[sponsor_md5_mismatch_flag_key] = self.md5Mismatch()
def refreshWanted(self):
if not os.path.exists(self.xmlfile):
return True
if env.prefs[sponsor_md5_mismatch_flag_key]:
return True
return False
def md5Mismatch(self):
# Check the MD5 hash - if it hasn't changed, then there is
# no reason to download sponsors.xml.
try:
(gotMD5_File, remoteDigest) = \
_get_remote_file("sponsors.md5", "md5:")
if gotMD5_File:
m = md5.new()
m.update(open(self.xmlfile).read())
localDigest = "md5:" + base64.encodestring(m.digest())
remoteDigest = remoteDigest.rstrip()
localDigest = localDigest.rstrip()
return (remoteDigest != localDigest)
else:
return True
except:
return True
def finish(self):
_load_sponsor_info(self.xmlfile, self.win)
pass
###############################################
# This is the default sponsor image (and text) for the PM sponsor button.
_nanorexText = """\
Nanorex created NanoEngineer-1, the program you're using right now.
<p>
See the <A HREF="http://www.nanoengineer-1.net/">NanoEngineer-1 wiki </A>
for tutorials and other information.</p>
<p>
See <A HREF="http://www.nanorex.com/">www.nanorex.com</A> for more information
about Nanorex.</p>
"""
from utilities.icon_utilities import image_directory
_sponsorImagePath = os.path.join( image_directory(), "ui/sponsors/nanorex.png")
_defaultSponsor = _Sponsor('Nanorex',
_fixHtml(_nanorexText), _sponsorImagePath)
###############################################
class SponsorableMixin:
"""
To use this mixin class, instances of a main class which inherits it
(which is typically a QDialog or PM_Dialog) should provide:
- an attribute sponsor_keyword, which can be None, or a keyword
string, or a list or tuple of sponsor keyword strings.
- an attribute sponsor_btn, which must be a QPushButton object
whose pixmap will be replaced with a sponsor logo during this
mixin's __init__ or setSponsor methods. This button must
already exist when our __init__ method is called.
This mixin class then provides:
- an __init__ method (which should only be called when the above
attributes are ready)
- a setSponsor method which may be called at any time after that,
to change sponsors (might be useful whether or not
sponsor_keyword has changed, since sponsors are sometimes chosen
at random based on it, and the info used to choose them might
have been updated)
- an action method meant to be used by the caller as Qt slot
methods, which can be named either 'sponsor_btn_clicked' or
'open_sponsor_homepage'.
"""
sponsor_keyword = None # Nanorex is the default sponsor.
def __init__(self):
self.setSponsor()
def setSponsor(self):
keyword = self.sponsor_keyword
if type(keyword) in (types.ListType, types.TupleType):
keyword = random.choice(keyword)
try:
sponsor = random.choice(_sponsors[keyword])
except KeyError:
sponsor = _defaultSponsor
self.__sponsor = sponsor
sponsor.configureSponsorButton(self.sponsor_btn)
def sponsor_btn_clicked(self):
self.__sponsor.wikiHelp()
def open_sponsor_homepage(self):
self.__sponsor.wikiHelp()
pass
# end
| NanoCAD-master | cad/src/sponsors/Sponsors.py |
NanoCAD-master | cad/src/sponsors/__init__.py |
|
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details.
"""
version.py -- provide version information for NanoEngineer-1,
including author list, program name, release info, etc.
@author: Will, Mark
@version: $Id$
@copyright: 2005-2008 Nanorex, Inc. See LICENSE file for details.
NOTE: this is copied and imported by autoBuild.py in a directory
which contains no other files, so it needs to be completely self-contained.
(I.e. it should not import anything else in its source directory,
only builtin Python modules.)
Module classification, and refactoring needed:
This should be given a less generic name, and also split into a
high-level module (since it would differ for different applications
built from the same code base) and low-level global access to the
"currently running app's name and version info"
(perhaps implemented similarly to EndUser)
for uses like printing version info into output files
(as in our caller in files_pdb).
In the meantime, to avoid package import cycles, we pretend this is
entirely low-level and classify it as either utilities or constants.
[bruce 071215]
"""
import NE1_Build_Constants
# Note: __copyright__ and __author__ below are about NE1 as a whole,
# not about this specific file. (In some source files, __author__
# is defined and is about that specific file. That usage of
# __author__ is deprecated, superceded by an @author line in the
# module docstring. [bruce 071215 comment])
__copyright__ = "Copyright 2004-2008, Nanorex, Inc."
# Alphabetical by last name
__author__ = """
Damian Allis
K. Eric Drexler
Russ Fish
Josh Hall
Brian Helfrich
Eric Messick
Huaicai Mo
Tom Moore
Piotr Rotkiewicz
Ninad Sathaye
Mark Sims
Bruce Smith
Will Ware
"""
class Version:
"""
Example usage:
from utilities.version import Version
v = Version()
print v, v.product, v.authors
"""
# Every instance of Version will share the same state
tmpary = NE1_Build_Constants.NE1_RELEASE_VERSION.split(".")
__shared_state = {
"major": int(tmpary[0]),
"minor": int(tmpary[1]),
"releaseType": "",
"releaseCandidate": NE1_Build_Constants.NE1_OFFICIAL_RELEASE_CANDIDATE,
"releaseDate": NE1_Build_Constants.NE1_RELEASE_DATE,
"product": "NanoEngineer-1",
"copyright": __copyright__,
"authors": __author__
}
if len(tmpary) >= 3: #tiny and teensy are both not required in version
__shared_state["tiny"] = int(tmpary[2])
if len(tmpary) == 4:
__shared_state["teensy"] = int(tmpary[3])
def __init__(self):
# Use Alex Martelli's singleton recipe
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531
self.__dict__ = self.__shared_state
def __setattr__(self, attr, value): # attributes write-protected
raise AttributeError, attr
def __repr__(self):
major = self.__shared_state["major"]
minor = self.__shared_state["minor"]
str = "%d.%d" % (major, minor)
if self.__shared_state.has_key("tiny"):
teensy = self.__shared_state["tiny"]
str += ".%d" % teensy
if self.__shared_state.has_key("teensy"):
teensy = self.__shared_state["teensy"]
str += ".%d" % teensy
return str
###############################
if __name__ == "__main__":
v = Version()
print v
for x in dir(v):
print x + ":", getattr(v, x)
print
# test write protection
try:
v.foo = "bar"
print "WRITE PROTECTION IS BROKEN"
except AttributeError:
pass
| NanoCAD-master | cad/src/utilities/version.py |
# Copyright 2007-2008 Nanorex, Inc. See LICENSE file for details.
"""
EndUser.py
Some program features are intended specifically for developers, and
are best left disabled for end users. Examples might be debugging
prints, and the ability to reload a changed module. Such code is
wrapped with a test which calls enableDeveloperFeatures() here.
Early in startup, setDeveloperFeatures() should be called after
detecting whether this is an end user or developer run. Until this is
called, enableDeveloperFeatures() will default to False, indicating
an end user run (but will print a bug warning if it's ever called then).
@author: Eric Messick
@version: $Id$
@copyright: 2007-2008 Nanorex, Inc. See LICENSE file for details.
@license: GPL
"""
_developerFeatures = False
_developerFeatures_set_yet = False
def enableDeveloperFeatures():
"""
Returns True if developer features should be enabled.
Call this to see if you should enable a particular developer feature.
"""
if not _developerFeatures_set_yet:
print "bug: enableDeveloperFeatures() queried before " \
" setDeveloperFeatures() called; returning %r" % \
_developerFeatures
return _developerFeatures
def setDeveloperFeatures(developerFeatures):
"""
Called at startup once we figure out whether this is a developer run
or an end user run.
"""
global _developerFeatures, _developerFeatures_set_yet
_developerFeatures = developerFeatures
_developerFeatures_set_yet = True
return
_alternateSourcePath = None
def getAlternateSourcePath():
"""
Returns the path to a directory other than the one main.py was
found in, which will be searched first for any imports, or None if
not set. Allows users to override any python files without
modifing the released copies.
"""
return _alternateSourcePath
def setAlternateSourcePath(path):
"""
Called from main.py after adding the alternate source path to the
front of the search path, but before importing any files that
might be affected by that. Should not be called by any other code.
Allows other code to determine if this has been done, and to
obtain the value via getAlternateSourcePath().
"""
global _alternateSourcePath
assert _alternateSourcePath is None
_alternateSourcePath = path
return
# end
| NanoCAD-master | cad/src/utilities/EndUser.py |
# Copyright 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
debug_flags.py -- provide the atom_debug developer debugging flag,
and other debugging flags which might be changed at runtime.
TODO: rename atom_debug, and perhaps change some of its uses
into more specific debugging flags.
@author: Bruce
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
Notes:
atom_debug can be set on startup from an environment variable,
or by a developer directly editing this file in their local copy.
(See this module's code for details.)
It can be changed during one session of NE1 using the ATOM_DEBUG
checkmark item in the GLPane debug menu.
It is not persistent between sessions.
TODO: document code usage notes -- how to use it, what to use it for.
"""
import os
# ==
# Developers who want atom_debug to start out True on every launch
# can uncomment the following line -- but don't commit that change!
## atom_debug = True
# WARNING: having atom_debug set from startup is currently broken,
# at least when ALTERNATE_CAD_SRC_PATH is being used. It causes code
# to run which doesn't otherwise run, and which fails with fatal tracebacks.
# I don't know why, or for how long this has been true. I used to start
# with it set all the time, but that was a few months back and before I
# routinely used ALTERNATE_CAD_SRC_PATH (in which state I've never before
# started with it set). [bruce 071018]
# ==
try:
atom_debug # don't disturb it if already set (e.g. by .atom-debug-rc)
except:
try:
atom_debug = os.environ['ATOM_DEBUG'] # as a string; should be "1" or "0"
except:
atom_debug = 0
try:
atom_debug = int(atom_debug)
except:
pass
atom_debug = not not atom_debug
if atom_debug:
print "fyi: user has requested ATOM_DEBUG feature; extra debugging code enabled; might be slower"
# ==
# debug flags for dna updater, controlled by debug_prefs
# in dna_updater.dna_updater_prefs.
# The default values set here don't matter, afaik,
# since they are replaced by debug_pref values before use.
# [bruce 080228 moved these here]
DEBUG_DNA_UPDATER_MINIMAL = True
DEBUG_DNA_UPDATER = True
DEBUG_DNA_UPDATER_VERBOSE = False
DNA_UPDATER_SLOW_ASSERTS = True
# end
| NanoCAD-master | cad/src/utilities/debug_flags.py |
# Copyright 2007 Nanorex, Inc. See LICENSE file for details.
"""
Initialize.py
Routines for supporting initialization functions.
An initialization function should look like this::
def initialize():
if (Initialize.startInitialization(__name__)):
return
... initialization code here ...
Initialize.endInitialization(__name__)
This will prevent the initialization code from being run more than
once. Circular dependencies among initialization functions are also
detected. To break such loops, you can try dividing a single
initialization function into early and late parts. If you do this,
pass the same identifing string as the 'extra' argument to each of
startInitialization and endInitialization. [QUESTION: do you mean
the same string in a single initialize function, but different
strings in each initialize function, or the same string in both
initialize functions? Guess: the former. [bruce 070702]]
If you wish your initialization code to be rerun, you can call
forgetInitialization, which will cause startInitialization with the
same arguments to return False on its next call.
Note that nothing in this module calls initialization functions in
the first place, or helps determine when to call them. You must add
at least one call to each one to an appropriate place in the code,
and you are on your own to make sure it is called before its side
effects were first needed, but not too early to work or be legal.
(One way is to call it before every attempt to rely on its side
effects, but this might be inefficient.)
@author: Eric Messick
@version: $Id$
@copyright: 2007 Nanorex, Inc.
@license: GPL
"""
_RUNNING = "Running"
"Constant for storing in _ms_initializationStatus"
_ms_initializationStatus = {}
"""
Dictionary mapping name+extra to the state of the given function.
Takes on one of three values::
True: Function has already completed.
_RUNNING: Function is currently running.
False: Function has not started.
Note that a missing key means the same as False.
"""
_VERBOSE = False
import exceptions
class _InitializationLoop(exceptions.Exception):
def __init__(self, args):
self.args = args
def startInitialization(name, extra=""):
"""
Called at the beginning of each initialization function.
@param name: which module is being initialized (pass __name__).
@type name: string
@param extra: optional. which function in this module.
@type extra: string
@return: True if this function is either running or completed.
"""
key = name + extra
currentState = False
if (_ms_initializationStatus.has_key(key)):
currentState = _ms_initializationStatus[key]
if (currentState):
if (currentState is _RUNNING):
raise _InitializationLoop, key
if (_VERBOSE):
print "initialize recalled: " + key
return True
_ms_initializationStatus[key] = _RUNNING
if (_VERBOSE):
print "initializing " + key
return False
def endInitialization(name, extra=""):
"""
Called at the end of each initialization function.
@param name: which module is being initialized (pass __name__).
@type name: string
@param extra: optional. which function in this module.
@type extra: string
"""
key = name + extra
if (_VERBOSE):
print "done initializing: " + key
_ms_initializationStatus[key] = True
def forgetInitialization(name, extra=""):
"""
Called to allow an initialization function to run again.
@param name: which module is being initialized (pass __name__).
@type name: string
@param extra: optional. which function in this module.
@type extra: string
"""
key = name + extra
_ms_initializationStatus[key] = False
| NanoCAD-master | cad/src/utilities/Initialize.py |
# Copyright 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
utilities/Log.py -- utility functions related to message logging
@author: Mark
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
History:
Mark wrote these in HistoryWidget.py.
Bruce added quote_html and graymsg.
EricM moved them into a separate file in 2007.
"""
def greenmsg(text):
"""
Add the html tags needed to display text in green in the HistoryWidget.
"""
return "<span style=\"color:#006600\">" + text + "</span>"
def redmsg(text):
"""
Add the html tags needed to display text in red in the HistoryWidget.
Used for serious error messages, bug reports, etc.
"""
return "<span style=\"color:#ff0000\">" + text + "</span>"
def orangemsg(text): # some redmsgs might need replacement with this
"""
Add the html tags needed to display text in orange in the HistoryWidget.
Used for warnings, and some info the user always needs to be aware of.
"""
return "<span style=\"color:#e65900\">" + text + "</span>"
def graymsg(text): #bruce 080201 added this to replace _graymsg
"""
Add the html tags needed to display text in gray in the HistoryWidget.
Used for developer/debug messages (should probably not be used unless
debug_prefs or similar debugging options are set).
"""
return "<span style=\"color:#808080\">" + text + "</span>"
def _graymsg(text): # remove this when its uses are all converted to graymsg
"""
This is the old name of graymsg (which it calls), but is now deprecated.
New code should use graymsg instead.
"""
return graymsg(text)
# ==
def quote_html(text):
"""
Encode a few html special characters in text, so that it is safe
to display in the history widget, and so the special characters
will look like their original forms.
@note: if quote_html is used in conjunction with e.g. redmsg,
it must be used inside it, or it will break the effect of redmsg.
I.e. use them together like this: redmsg(quote_html(text)).
"""
for char, string in [('&', '&'), ('<', '<'), ('>', '>')]:
# note: & has to come first
text = text.replace(char, string)
return text
# end
| NanoCAD-master | cad/src/utilities/Log.py |
# Copyright 2004-2009 Nanorex, Inc. See LICENSE file for details.
"""
constants.py -- constants and trivial functions used in multiple modules.
Everything defined here must require no imports except from builtin
modules or PyQt, and use names that we don't mind reserving throughout NE1.
(At the moment there are some exceptions to that, but these should be
cleaned up.)
(Ideally this module would also contain no state; probably we should move
gensym out of it for that reason.)
@version: $Id$
@copyright: 2004-2009 Nanorex, Inc. See LICENSE file for details.
"""
from PyQt4.Qt import Qt
import os
# ==
#Urmi 20080617: grid origin related constants: UM 20080616
PLANE_ORIGIN_LOWER_LEFT = 0
PLANE_ORIGIN_LOWER_RIGHT = 1
PLANE_ORIGIN_UPPER_LEFT = 2
PLANE_ORIGIN_UPPER_RIGHT = 3
LABELS_ALONG_ORIGIN = 0
LABELS_ALONG_PLANE_EDGES = 1
MULTIPANE_GUI = True
# enable some code which was intended to permit the main window to contain
# multiple PartWindows. Unfortunately we're far from that being possible,
# but we're also (I strongly suspect, but am not sure) now dependent on
# this value being True, having not maintained the False case for a long
# time. If this is confirmed, we should remove the code for the False case
# and remove this flag, and then decide whether the singleton partWindow
# should continue to exist. [bruce 071008, replacing a debug_pref with
# this flag]
DIAMOND_BOND_LENGTH = 1.544
#bruce 051102 added this based on email from Damian Allis:
# > The accepted bond length for diamond is 1.544 ("Interatomic
# > Distances,' The Chemical Society, London, 1958, p.M102)....
# ==
RECENTFILES_QSETTINGS_KEY = '/Nanorex/NE1/recentFiles'
# ==
shiftModifier = 33554432
cntlModifier = 67108864
# note: in Qt/Mac, this flag indicates the command key, not the control key.
altModifier = 134217728
# note: in Qt/Mac, this flag indicates the Alt/Option modifier key.
# Todo: it would be better if we replaced the above by the equivalent
# named constants provided by Qt.
# [namely, Qt.ShiftModifier, Qt.ControlModifier, Qt.AltModifier.]
# [later, before 080728: This has been done for all of their uses in the code
# except in the definition of debugModifiers (below), probably during the port
# to Qt4.]
#
# Before doing this, we should find out how they correspond on each platform --
# for example, I don't know whether Qt's named constant for the control key
# will have the same numeric value on Windows and Mac, as our own named constant
# 'cntlModifier' does. So no one should replace the above numbers by
# Qt's declared names before they check this out on each platform.
# [bruce 040916]
# debugModifiers should be an unusual combination of modifier keys, used
# to bring up an undocumented debug menu intended just for developers
# (if a suitable preference is set). The following value is good for
# the Mac; someone on Windows or Linux can decide what value would be
# good for those platforms, and make this conditional on the
# platform. (If that's done, note that sys.platform on Mac might not
# be what you'd guess -- it can be either "darwin" or "mac" (I think),
# depending on the python installation.) -- bruce 040916
debugModifiers = cntlModifier | shiftModifier | altModifier
# note: on the mac, this really means command-shift-alt
# ==
# Trivial functions that might be needed early during app startup
# (good to put here to avoid recursive import problems involving other modules)
# or in many modules.
# (Only a very few functions are trivial enough to be put here,
# and their names always need to be suitable for using up in every module.)
def noop(*args, **kws):
pass
def intRound(num): #bruce 080521
"""
Round a number (int or float) to the closest int.
@warning: int(num + 0.5) is *not* a correct formula for this
(when num is negative), since Python int() rounds
towards zero, not towards negative infinity
[http://docs.python.org/lib/built-in-funcs.html].
"""
return int(round(num))
def str_or_unicode(qstring): #bruce 080529
"""
Return str(qstring), unless that fails with UnicodeEncodeError,
in which case return unicode(qstring).
@param qstring: anything, but typically a QString object.
"""
try:
return str(qstring)
except UnicodeEncodeError:
return unicode(qstring)
pass
def genKey(start = 1):
#bruce 050922 moved this here from chem.py and Utility.py, added start arg
"""
produces generators that count indefinitely
"""
i = start
while 1:
yield i
i += 1
pass
atKey = genKey(start = 1)
# generator for atom.key attribute, also used for fake atoms.
# [moved here from chem.py to remove import cycle, bruce 080510]
# As of bruce 050228, we now make use of the fact that this produces keys
# which sort in the same order as atoms are created (e.g. the order they're
# read from an mmp file), so we now require this in the future even if the
# key type is changed. [Note: this comment appears in two files.]
# ==
_gensym_counters = {} # holds last-used value for each fixed prefix (default 0)
def _fix_gensym_prefix(prefix): #bruce 070604
"""
[private helper function for gensym and relatives]
"""
assert type(prefix) in (type(""), type(u""))
if prefix and prefix[-1].isdigit():
# This special behavior guarantees that every name gensym returns is
# unique. As of bruce 070603, I think it never happens, based on the
# existing calls of gensym.
# Note: someday we might change the added char to ' ' if prefix
# contains ' ', and/or also do this if prefix ends with a letter (so
# most gensym callers can rely on this rule rather than adding '-'
# themselves).
# NOTE: this special rule is still needed, even if we never want
# default new node names to contain '-' or ' '. It's ok since they
# also won't include digits in the prefix, so this rule won't happen.
# [bruce 080407 comment]
prefix = prefix + '-'
return prefix
def gensym(prefix, assy = None):
#bruce 070603 rewrite, improved functionality (replaces three separate
# similar definitions)
#bruce 080407: new option to pass assy, used for names_to_avoid
"""
Return prefix with a number appended, where the number is 1 more
than the last time we were called for the same prefix, or 1 the first time
we see that prefix. Note that this means we maintain an independent counter
for each different prefix we're ever called with.
In order to ensure that every name we ever return is unique (in spite of our
independent counters reusing the same values for different prefixes), we append
'-' to prefix if it ends with a digit already, before looking up and appending
the counter for that prefix.
(The prefix is typically related to a Node classname, but can be more or less
specialized, e.g. when making chunks of certain kinds (like DNA) or copying nodes
or library parts.)
@param prefix: prefix for generated name.
@param assy: if provided and not None, don't duplicate any node name
presently used in assy.
"""
# REVIEW: maybe: move this code into a new method in class Assembly
prefix = _fix_gensym_prefix(prefix)
names_to_avoid = {}
# maps name -> anything, for 0 or more names we don't want to generate
# fill names_to_avoid with node names from assy, if provided
if assy is not None:
def avoid_my_name(node):
names_to_avoid[node.name] = None
assy.root.apply2all( avoid_my_name )
# could optim and not delve inside nodes that hide contents
new_value = _gensym_counters.get(prefix, 0) + 1
name = prefix + str(new_value)
while name in names_to_avoid:
new_value += 1
name = prefix + str(new_value)
_gensym_counters[prefix] = new_value
return name
def permit_gensym_to_reuse_name(prefix, name): #bruce 070604
"""
This gives gensym permission to reuse the given name which it returned based on the given prefix,
if it can do this and still follow its other policies. It is not obligated to do this.
"""
prefix = _fix_gensym_prefix(prefix)
last_used_value = _gensym_counters.get(prefix, 0)
last_used_name = prefix + str(last_used_value)
if name == last_used_name:
# this is the only case in which we can safely do anything.
corrected_last_used_value = last_used_value - 1
assert corrected_last_used_value >= 0
# can't happen if called on names actually returned from gensym
_gensym_counters[prefix] = corrected_last_used_value
return
# ==
def average_value(seq, default = 0.0):
"""
Return the numerical average value of seq (a Python sequence or equivalent),
or (by default) 0.0 if seq is empty.
Note: Numeric contains a function named average, which is why we don't use that name.
"""
#bruce 070412; renamed and moved from selectMode.py to constants.py 070601
#e should typetest seq if we can do so efficiently
# WARNING: this uses <built-in function sum>, not Numeric.sum.
if not seq:
return default
return sum(seq) / len(seq)
# ==
def common_prefix( seq1, *moreseqs ):
"""
Given one or more python sequences (as separate arguments),
return the initial common prefix of them (determined by != of elements)
(as a list and/or the sequence type of the first of these sequences --
which of these sequence types to use is not defined by this function's
specification, but it will be one of those three types)
(it's also undefined whether the retval might be the same mutable object
as the first argument!)
"""
#bruce 080626, modified from a version in node_indices.py
length_ok = len(seq1)
for seq2 in moreseqs:
if len(seq2) < length_ok:
length_ok = len(seq2)
for i in xrange(length_ok):
if seq1[i] != seq2[i]:
length_ok = i
break
return seq1[0:length_ok] # might be all or part of seq1, or 0-length
# ==
# Display styles (aka display modes)
# TODO: this entire section ought to be split into its own file,
# or perhaps more than one (constants, globals, helper functions).
# (Two helper functions have since been moved to mmp_dispnames.py --
# perhaps more defs or code should join them there, not sure. [bruce 090116])
#
# BUT, the loop below, initializing ATOM_CONTENT_FOR_DISPLAY_STYLE,
# needs to run before dispNames, or (preferably) on a copy of it
# from before it's modified, by external init code. [bruce 080324 comment]
# [that has a grammar error, not quite sure what was intended -- bruce 090116]
# The global variables are: remap_atom_dispdefs, dispNames, new_dispNames,
# dispLabel.
remap_atom_dispdefs = {} #bruce 080324 moved this here from displaymodes.py
# These are arranged in order of increasing thickness of the bond
# representation. They are indices of dispNames and dispLabel. [Josh 11/2]
diDEFAULT = 0 # the fact that diDEFAULT == 0 is public. [bruce 080206]
diINVISIBLE = 1
diTrueCPK = 2 # CPK
# [renamed from old name diVDW, bruce 060607; corresponding UI change was
# by mark 060307] (This is not yet called diCPK, to avoid confusion, since
# that name was used for diBALL until today. After some time goes by, we
# can rename this to just diCPK.)
diLINES = 3
diBALL = 4 # "Ball and Stick"
# [renamed from old incorrect name diCPK, bruce 060607; corresponding UI
# change was by mark 060307]
diTUBES = 5
# WARNING (kluge):
# the order of the following constants has to match how the lists dispNames and
# dispLabel (defined below) are extended by side effects of imports of
# corresponding display styles in startup_misc.py. (Needs cleanup.)
# [bruce 080212 comment; related code has comments with same signature]
diDNACYLINDER = 6
diCYLINDER = 7
diSURFACE = 8
diPROTEIN = 9
# note: some of the following lists are extended later at runtime
# [as of bruce 060607]
dispNames = ["def", "inv", "vdw", "lin", "cpk", "tub"]
# these dispNames can't be easily revised, since they are used in mmp files;
# cpk and vdw are misleading as of 060307.
# NOTE: as of bruce 080324, dispNames is now private.
# Soon it will be renamed and generalized to permit aliases for the names.
# Then it will become legal to read (but not yet to write) the new forms
# of the names which are proposed in bug 2662.
new_dispNames = ["def", "Invisible", "CPK", "Lines", "BallAndStick", "Tubes"]
#bruce 080324 re bug 2662; permit for reading, but don't write them yet
# Note: some defs were moved from here to mmp_dispnames.py [bruce 090116]
# <properDisplayNames> used by write_qutemol_pdb_file() in qutemol.py only.
# Set _qxDNACYLINDER to "def" until "dnacylinder" is supported in QuteMolX.
_qxDNACYLINDER = "def"
properDisplayNames = ["def", "inv", "cpk", "lin", "bas", "tub", _qxDNACYLINDER]
#dispLabel = ["Default", "Invisible", "VdW", "Lines", "CPK", "Tubes"]
dispLabel = ["Default", "Invisible", "CPK", "Lines", "Ball and Stick", "Tubes"]
# Changed "CPK" => "Ball and Stick" and "VdW" => "CPK". mark 060307.
def _f_add_display_style_code( disp_name, disp_label, allowed_for_atoms):
"""
[friend function for displaymodes.py]
"""
#bruce 080324 split this out of displaymodes.py, to permit making
# these globals (dispNames, dispLabel) private soon
if disp_name in dispNames:
# this is only legal [nim] if the classname is the same;
# in that case, we ought to replace things (useful for reload
# during debugging)
assert 0, "reload during debug for display modes " \
"is not yet implemented; or, non-unique " \
"mmp_code %r (in dispNames)" % (disp_name,)
if disp_name in new_dispNames: #bruce 080415
# same comment applies as above
assert 0, "reload during debug for display modes " \
"is not yet implemented; or, non-unique " \
"mmp_code %r (in new_dispNames)" % (disp_name,)
assert len(dispNames) == len(dispLabel)
assert len(dispNames) == len(new_dispNames) #bruce 080415
dispNames.append(disp_name)
new_dispNames.append(disp_name) #bruce 080415 fix bug 2809 in
# saving nodes with "chunk display styles" set (not in .rc1)
dispLabel.append(disp_label)
ind = dispNames.index(disp_name) # internal value used by setDisplayStyle
# note: this always works, since we appended the same disp_name to
# *both* dispNames and new_dispNames [bruce 080415 comment]
if not allowed_for_atoms:
remap_atom_dispdefs[ind] = diDEFAULT # kluge?
return ind
# ==
# display style for new glpanes (#e should be a user preference) [bruce 041129]
# Now in user prefs db, set in GLPane.__init__ [Mark 050715]
# WARNING: this is no longer used as the default global display style,
# and now has a different value from that, but it is still used in other ways,
# which would need analysis in order to determine whether they can be replaced
# with the actual default global display style. Needs cleanup.
# [bruce 080606 comment]
default_display_mode = diTUBES
TubeRadius = 0.3 # (i.e. "TubesSigmaBondRadius")
diBALL_SigmaBondRadius = 0.1
diDNACYLINDER_SigmaBondRadius = 1.3
# ==
# atom content flags [bruce 080306]
# (so far, we only have these for display style, but more might be added
# for other aspects of atoms, such as kind of element, whether selected,
# whether highlighted, whether has error, etc;
# in spite of the term "atom content" we might also add some for nodes,
# e.g. all the same ones mentioned for atoms.)
ATOM_CONTENT_FOR_DISPLAY_STYLE = []
# modified by the loop below to be same length as dispNames
AC_HAS_INDIVIDUAL_DISPLAY_STYLE = 1
AC_INVISIBLE = 1 << diINVISIBLE
# note: fewer bits than ATOM_CONTENT_FOR_DISPLAY_STYLE[diINVISIBLE]
for _disp in range(len(dispNames)):
# WARNING:
# - this must run before dispNames is modified by external code
# - it assumes no styles defined in displaymodes.py can apply to atoms
if not _disp:
assert _disp == diDEFAULT
_content_for_disp = 0
elif _disp == diINVISIBLE:
# don't redundantly count this as "individual display style"
_content_for_disp = AC_INVISIBLE
else:
_content_for_disp = \
(AC_HAS_INDIVIDUAL_DISPLAY_STYLE + (1 << _disp))
# this uses bits 1 through len(dispNames) - 1,
# plus bit 0 for "any of those"
ATOM_CONTENT_FOR_DISPLAY_STYLE.append(_content_for_disp)
# ==
# constants related to bounding boxes containing atoms and bonds [piotr 080402]
# The estimated maximum sphere radius in any display style.
# The maximum VdW atom radius is 5.0 A.
# It can be increased by 25% in User Preferences.
# Highlighting increases this radius by 0.2A.
# Total = 5.0A * 1.25 + 0.2A = 6.2A
MAX_ATOM_SPHERE_RADIUS = 6.2
# Margin value for bounding box (used in BoundingBox.py)
BBOX_MARGIN = 1.8
# The minimal bounding sphere radius for a single atom of VdW radius = 0.0,
# calculated as follows: BB_MIN_RADIUS = sqrt(3 * (BBOX_MARGIN) ^ 2)
BBOX_MIN_RADIUS = 3.118
# ==
# PAM models. (Possible values of atom.element.pam, besides None,
# and of some "info chunk" attributes in the mmp format, besides "".
# Values must never be changed (unless info chunk read/write code
# is revised to hide the change), since they are part of mmp format.)
#
# [bruce 080321]
MODEL_PAM3 = 'PAM3'
MODEL_PAM5 = 'PAM5'
PAM_MODELS = (MODEL_PAM3, MODEL_PAM5)
MODEL_MIXED = 'PAM_MODEL_MIXED' # review: good this is not in PAM_MODELS?
# Dna constants presently needed outside of dna package.
# After sufficient refactoring, these could be moved inside it.
Pl_STICKY_BOND_DIRECTION = 1 # should be 1 or -1;
# the bond direction from Pl to the Ss it wants to stay with when possible.
# (This value (1) is consistent with which strand-ends get Pls
# in the PAM5 generator as of 080312, and with other evidence #doc)
# [bruce 080118/080326] #e rename?
# ==
# constants for bondpoint_policy [bruce 080603]
BONDPOINT_LEFT_OUT = "BONDPOINT_LEFT_OUT"
BONDPOINT_UNCHANGED = "BONDPOINT_UNCHANGED" # not yet specifiable
BONDPOINT_ANCHORED = "BONDPOINT_ANCHORED" # not yet specifiable
BONDPOINT_REPLACED_WITH_HYDROGEN = "BONDPOINT_REPLACED_WITH_HYDROGEN"
# ==
# constants for readmmp
SUCCESS = 'SUCCESS'
ABORTED = 'ABORTED'
READ_ERROR = 'READ ERROR'
# ==
def filesplit(pathname):
"""
Splits pathname into directory part (not ending with '/'),
basename, and extension (including '.', or can be "")
and returns them in a 3-tuple.
For example, filesplit('~/foo/bar/gorp.xam') ==> ('~/foo/bar', 'gorp', '.xam').
Compare with _fileparse (deprecated), whose returned dir ends with '/'.
"""
#bruce 050413 _fileparse variant: no '/' at end of dirname
#bruce 071030 moved this from movieMode to constants
dir1, file1 = os.path.split(pathname)
base, ext = os.path.splitext(file1)
return dir1, base, ext
# ==
def remove_prefix(str1, prefix):
# TODO: put this into a new file, utilities.string_utils?
"""
Remove an optional prefix from a string:
if str1 starts with prefix, remove it (and return the result),
otherwise return str1 unchanged.
@param str1: a string that may or may not start with prefix.
@type str1: string
@param prefix: a string to remove if it occurs at the beginning of str1.
@type prefix: string
@return: a string, equal to str1 with prefix removed, or to str1.
"""
if str1.startswith(prefix):
return str1[len(prefix):]
else:
return str1
pass
# ==
# ave_colors() logically belongs in some "color utilities file",
# but is here so it is defined early enough for use in computing
# default values of user preferences in prefs_constants.py.
def ave_colors(weight, color1, color2):
"""
Return a weighted average of two colors,
where weight gives the amount of color1 to include.
(E.g., weight of 1.0 means use only color1, 0.0 means use only color2,
and ave_colors(0.8, color, black) makes color slightly darker.)
Color format is a 3-tuple of RGB components from 0.0 to 1.0
(e.g. black is (0.0, 0.0, 0.0), white is (1.0, 1.0, 1.0)).
This is also the standard format for colors in our preferences database
(which contains primitive Python objects encoded by the shelve module).
Input color components can be ints, but those are coerced to floats,
NOT treated as in the range [0,255] like some other color-related functions do.
Output components are always floats.
Input colors can be any 3-sequences (including Numeric arrays);
output color is always a tuple.
"""
#bruce 050805 moved this here from handles.py, and revised it
#e (perhaps we could optimize this using some Numeric method)
weight = float(weight)
return tuple([weight * c1 + (1 - weight) * c2
for c1, c2 in zip(color1, color2)])
def colors_differ_sufficiently(color1, color2, minimum_difference = 0.51 ):
"""
Return True if the difference between color1 and color2
(as vectors in an RGB unit cube) is greater than minimum_difference
(0.51 by default). Otherwise, return False.
"""
# [probably by Mark, circa 080710]
# [revised by bruce 080711 to remove import cycle involving VQT]
# bruce 080910 renamed this from color_difference, since it does
# not return the color difference, and revised default value since
# all calls were passing the same value of minimum_difference.
color_diff_squared = sum([(color2[i] - color1[i]) ** 2
for i in (0, 1, 2)
])
if color_diff_squared > minimum_difference ** 2:
return True
return False
def getTextHaloColor(textColor):
"""
@return: a good halo color, given a text color.
The halo color will be either light gray or dark gray
and will not be too close to textColor.
"""
if colors_differ_sufficiently(lightgray, textColor):
return lightgray
else:
return darkgray
pass
# colors
# [note: some of the ones whose names describe their function
# are default values for user preferences]
black = (0.0, 0.0, 0.0)
white = (1.0, 1.0, 1.0)
darkblue = (0.0, 0.0, 0.6) # Was blue. Mark 2008-06-27
blue = (0.0, 0.0, 1.0)
aqua = (0.15, 1.0, 1.0)
orange = (1.0, 0.25, 0.0)
darkorange = (0.6, 0.3, 0.0)
red = (1.0, 0.0, 0.0)
lightred_1 = (0.99, 0.501, 0.505) #reddish pink color
yellow = (1.0, 1.0, 0.0)
green = (0.0, 1.0, 0.0)
lightgreen = (0.45, 0.8, 0.45) # bruce 080206
lightgreen_2 = (0.596, 0.988, 0.596)
darkgreen = (0.0, 0.6, 0.0)
magenta = (1.0, 0.0, 1.0)
cyan = (0.0, 1.0, 1.0)
lightgray = (0.8, 0.8, 0.8)
gray = (0.5, 0.5, 0.5)
darkgray = (0.3, 0.3, 0.3)
navy = (0.0, 0.09, 0.44)
darkred = (0.6, 0.0, 0.2)
violet = (0.6, 0.1, 0.9)
purple = (0.4, 0.0, 0.6)
darkpurple = (0.3, 0.0, 0.3)
pink = (0.8, 0.4, 0.4)
olive = (0.3, 0.3, 0.0)
steelblue = (0.3, 0.4, 0.5)
brass = (0.5, 0.5, 0.0)
copper = (0.3, 0.3, 0.1)
mustard = (0.78, 0.78, 0.0)
applegreen = (0.4, 0.8, 0.4)
banana = (0.8901, 0.8117, 0.3411)
silver = (0.7529, 0.7529, 0.7529)
gold = (1, 0.843, 0)
ivory = (1, 1, 0.9411)
#ninad20060922 using it while drawing origin axis
lightblue = ave_colors(0.03, white, blue)
# Note: that color is misnamed -- it's essentially just blue.
# Or maybe the definition has a typo?
# This needs cleanup... in the meantime,
# consider also lighterblue
lighterblue = ave_colors( 0.5, white, blue)
# Following color is used to draw the back side of a reference plane.
#Better call it brownish yellow or greenish brown?? lets just call it brown
#(or suggest better name by looking at it. ) - ninad 20070615
brown = ave_colors(0.5, black, yellow)
# The background gradient types/values.
# Gradient values are one more than the gradient constant values in
# Preferences.py (i.e. bgBLUE_SKY = BG_BLUE_SKY + 1).
bgSOLID = 0
bgBLUE_SKY = 1
bgEVENING_SKY = 2
bgSEAGREEN = 3
# GLPane "Blue Sky" gradient
bluesky = (0.9, 0.9, 0.9), (0.9, 0.9, 0.9), (0.33, 0.73, 1.0), (0.33, 0.73, 1.0)
# GLPane "Evening Sky" gradient
eveningsky = (0.0, 0.0, 0.0), (0.0, 0.0, 0.0), (0.0, 0.0, 0.3), (0.0, 0.0, 0.3)
# GLPane "Sea Green" gradient
bg_seagreen = ((0.905, 0.905, 0.921),
(0.905, 0.905, 0.921),
(0.6, 0.8, 0.8),
(0.6, 0.8, 0.8) )
bg_seagreen_UNUSED_FOR_DEBUG = (0.894, 0.949, 0.894), (0.862, 0.929, 0.862), \
(0.686, 0.843, 0.843), (0.905, 0.905, 0.921), \
(0.862, 0.929, 0.862), (0.839, 0.921, 0.839), \
(0.67, 0.835, 0.835), (0.686, 0.843, 0.843), \
(0.686, 0.843, 0.843), (0.67, 0.835, 0.835), \
(0.6, 0.8, 0.8), (0.6, 0.8, 0.8), \
(0.905, 0.905, 0.921), (0.686, 0.843, 0.843), \
(0.6, 0.8, 0.8), (0.701, 0.85, 0.85)
## PickedColor = (0.0, 0.0, 1.0) # no longer used as of 080603
ErrorPickedColor = (1.0, 0.0, 0.0) # for atoms with wrong valence, etc
elemKeyTab = [('H', Qt.Key_H, 1),
('B', Qt.Key_B, 5),
('C', Qt.Key_C, 6),
('N', Qt.Key_N, 7),
('O', Qt.Key_O, 8),
('F', Qt.Key_F, 9),
('Al', Qt.Key_A, 13),
('Si', Qt.Key_Q, 14),
('P', Qt.Key_P, 15),
('S', Qt.Key_S, 16),
('Cl', Qt.Key_L, 17)]
# ==
# values for assy.selwhat variable [moved here from assembly.py by bruce 050519]
# bruce 050308 adding named constants for selwhat values;
# not yet uniformly used (i.e. most code still uses hardcoded 0 or 2,
# and does boolean tests on selwhat to see if chunks can be selected);
# not sure if these would be better off as assembly class constants:
# values for assy.selwhat: what to select: 0=atoms, 2 = molecules.
# SELWHAT_NAMES is for use in human-readable messages.
SELWHAT_ATOMS = 0
SELWHAT_CHUNKS = 2
SELWHAT_NAMES = {SELWHAT_ATOMS: "Atoms", SELWHAT_CHUNKS: "Chunks"}
# mark 060206 adding named constants for selection shapes.
SELSHAPE_LASSO = 'LASSO'
SELSHAPE_RECT = 'RECTANGLE'
# mark 060206 adding named constants for selection logic.
SUBTRACT_FROM_SELECTION = 'Subtract Inside'
OUTSIDE_SUBTRACT_FROM_SELECTION = 'Subtract Outside'
# OUTSIDE_SUBTRACT_FROM_SELECTION is used only in CrystalShape.py
ADD_TO_SELECTION = 'Add'
START_NEW_SELECTION = 'New'
DELETE_SELECTION = 'Delete'
# ==
DEFAULT_COMMAND = 'SELECTMOLS' # commandName of default command
# command level constants [bruce 080725]
CL_DEFAULT_MODE = 'CL_DEFAULT_MODE'
CL_ENVIRONMENT_PROVIDING = 'CL_ENVIRONMENT_PROVIDING'
CL_MISC_TOPLEVEL = 'CL_MISC_TOPLEVEL'
CL_SUBCOMMAND = 'CL_SUBCOMMAND'
CL_EDIT_GENERIC = 'CL_EDIT_GENERIC'
CL_EXTERNAL_ACTION = 'CL_EXTERNAL_ACTION'
CL_GLOBAL_PROPERTIES = 'CL_GLOBAL_PROPERTIES'
CL_VIEW_CHANGE = 'CL_VIEW_CHANGE'
CL_REQUEST = 'CL_REQUEST' # for a request command (only one level?)
CL_ABSTRACT = 'CL_ABSTRACT' # for abstract command classes
# (warning if instantiated directly)
CL_UNUSED = 'CL_UNUSED' # for command classes thought to be presently unused
# (warning if instantiated directly, or (if practical) if a subclass is
# instantiated)
# ==
# The far clipping plane normalized z value, actually it's a little closer
# than the actual far clipping plane to the eye. This is used to draw the blue
# sky backround polygon, and also used to check if user click on empty space
# on the screen.
GL_FAR_Z = 0.999
# ==
# Determine CAD_SRC_PATH.
# For developers, this is the directory containing
# NE1's toplevel Python code, namely .../cad/src;
# for users of a built release, this is the directory
# containing the same toplevel Python modules as that does,
# as they're built into NE1 and importable while running it.
# Note that if ALTERNATE_CAD_SRC_PATH is defined, it will influence
# the value of this constant (i.e. this constant always honors
# that value).
# [bruce 080111, comment revised 080721]
try:
__file__
except:
# CAD_SRC_PATH can't be determined (by the present code)
# (does this ever happen?)
print "can't determine CAD_SRC_PATH"
CAD_SRC_PATH = None
else:
CAD_SRC_PATH = os.path.dirname(__file__)
assert os.path.basename(CAD_SRC_PATH) == "utilities"
CAD_SRC_PATH = os.path.dirname(CAD_SRC_PATH)
#print "CAD_SRC_PATH = %r" % CAD_SRC_PATH
# [review: in a built Mac release, CAD_SRC_PATH might be
# .../Contents/Resources/Python/site-packages.zip, or a related pathname
# containing one more directory component; but an env var RESOURCEPATH
# (spelling?) should also be available (only in a release and only on Mac),
# and might make more sense to use then.]
pass
# end
| NanoCAD-master | cad/src/utilities/constants.py |
# Copyright 2006-2007 Nanorex, Inc. See LICENSE file for details.
"""
parse_utils.py -- utilities for general parsing, and for parsing streams of python tokens.
Also a prototype "description class" which can be used to represent results of parsing a "description".
Also an example grammar, which can be used for parsing "parameter-set description files".
(FYI: All these things are used to parse "parameter dialog description files", *.desc.)
@author: Bruce
@version: $Id$
@copyright: 2006-2007 Nanorex, Inc. See LICENSE file for details.
TODO:
This ought to be split into several files, and generalized, and ThingData renamed
and cleaned up. And optimized (easy), since the parser is probably quadratic time
in input file size, at least when used in the usual way, on a list that comes
from generate_tokens.
"""
from tokenize import generate_tokens
from tokenize import tok_name
import sys
# == basic general parser
debug_grammar = False # set to True for more info about grammar in syntax error messages
class ParseFail(Exception): pass #e make it a more specific kind of exception?
class SyntaxError(Exception): pass
def parse(pat, rest):
"""
either return (res, newrest), or raise ParseFail or SyntaxError
"""
## if type(pat) == type(""):
## pass
#e other python types with special meanings, like list?
try:
retval = "<none yet>" # for debugging
retval = pat.parse(rest)
res, newrest = retval
except ParseFail:
raise
except SyntaxError:
raise
except:
print "retval at exception: %r" % (retval,)
raise SyntaxError, "exception %s %s at %s" % (sys.exc_info()[0], sys.exc_info()[1], describe_where(rest)) ###k ##e need more info, about pat?
##e maybe also postprocess the results using another method of pat, or call pat.parse1 to do it all,
# to support uniform keyword args for postprocess funcs (with access to rest's env)
try:
resultfunc = pat.kws.get('result') # this would be cleaner if some pat method did the whole thing...
except:
return res, newrest
if resultfunc:
try:
return resultfunc(res), newrest
except:
print "resultfunc %r failed on res %r" % (resultfunc, res) # this seems often useful
raise
return res, newrest
def parse_top(pat, rest):
try:
return parse(pat, rest)
except ParseFail:
return "ParseFail", None
except SyntaxError, e:
return 'SyntaxError: ' + str(e), None
pass
class ParseRule:
"""
subclasses are specific parse-rule constructors; their instances are therefore parse rules
"""
def __init__(self, *args, **kws):
self.args = args
self.kws = kws
self.validate()
return
def validate(self):
"subclasses can have this check for errors in args and kws, preprocess them, etc"
#e the subclasses in this example might not bother to define this as an error checker
pass
#e need __str__ for use in syntax error messages
pass
class Seq(ParseRule):
def parse(self, rest):
res = []
for arg in self.args:
try:
res0, rest = parse(arg, rest)
except ParseFail:
if not res:
raise
# this is mostly useless until we have sensible __str__ (and even then, is mainly only useful for debugging grammar):
if debug_grammar:
msg = "Can't complete %s, stuck at arg %d = %s\nat %s" % (self, len(res) + 1, arg, describe_where(rest))
else:
msg = "%s" % (describe_where(rest),)
raise SyntaxError, msg
res.append(res0)
return res, rest
pass
class Alt(ParseRule):
def parse(self, rest):
for arg in self.args:
try:
return parse(arg, rest)
except ParseFail:
continue
raise ParseFail
pass
# == higher-level general parser utilities
class ForwardDef(ParseRule):
"""
For defining placeholders for recursive patterns;
by convention, arg0 (optional) is some sort of debug name;
self.pat must be set by caller before use
"""
def parse(self, rest):
return parse(self.pat, rest)
pass
def ListOf(pat):
"""
0 or more pats
"""
res = ForwardDef()
res.pat = Optional(Seq(pat, res,
result = lambda (p,r): [p] + r # fix retval format to be a single list (warning: quadratic time)
# note: this has value None: [p].extend(r)
# (and it too would make entire ListOf take quadratic time if it worked)
))
return res
def Optional(pat):
return Alt(pat, Nothing)
class NothingClass(ParseRule):
def parse(self, rest):
return [], rest # the fact that this is [] (rather than e.g. None) is depended on by ListOf's result lambda
pass
Nothing = NothingClass()
# == some things that are specific for rest being a list of 5-tuples coming from tokenize.generate_tokens
##e ideally, these would be methods of a class which represented this kind of input, and had an efficient .next method,
# and methods for other kinds of access into it; the current implem might be quadratic time in the number of tokens,
# depending on how python lists implement the [1:] operation.
def describe_where(rest):
"""
assume rest is a list of token 5-tuples as returned by generate_tokens
"""
if not rest:
return "end of input"
toktype, tokstring, (srow, scol), (erow, ecol), line = rest[0]
res1 = "line %d, column %d:" % (srow, scol) # tested! exactly correct (numbering columns from 0, lines from 1)
res2 = "*******>" + line.rstrip() ##e should also turn tabs to spaces -- until we do, use a prefix of length 8
res3 = "*******>" + scol * ' ' + '^'
return '\n'.join([res1,res2,res3])
def token_name(rest):
if not rest:
return None
return tok_name[rest[0][0]]
IGNORED_TOKNAMES = ('NL', 'COMMENT') # this is really an aspect of our specific grammar
# note: NL is a continuation newline, not a syntactically relevant newline
# (for that see Newline below, based on tokentype NEWLINE)
class TokenType(ParseRule):
def validate(self):
toknames = self.args[0]
# let this be a string (one name) or a list (multiple names) (list has been tested but might not be currently used)
want_tokname_dflt = True
if type(toknames) == type(""):
want_tokname_dflt = False
toknames = [toknames]
self.want_tokname = self.kws.get('want_tokname', want_tokname_dflt)
assert type(toknames) == type([])
for tokname in toknames:
assert type(tokname) == type("") and tokname in tok_name.itervalues(), \
"illegal tokname: %r (not found in %r)" % \
( tokname, tok_name.values() )
self.toknames = toknames
try:
self.cond = self.args[1]
except IndexError:
self.cond = lambda tokstring: True
def parse(self, rest):
"""
assume rest is a list of token 5-tuples as returned by generate_tokens
"""
while rest and token_name(rest) in IGNORED_TOKNAMES:
rest = rest[1:] # this might be inefficient for long inputs, and for that matter, so might everything else here be
# note, this filtering is wasted (redone many times at same place) if we end up parsefailing, but that's tolerable for now
if not rest or token_name(rest) not in self.toknames:
raise ParseFail
tokstring = rest[0][1]
if self.want_tokname:
res = (token_name(rest), tokstring)
else:
res = tokstring
if not self.cond(res):
raise ParseFail
return res, rest[1:]
pass
def Op( opstring):
return TokenType('OP', lambda token: token == opstring)
### REVIEW: why doesn't this lambda need the "opstring = opstring" kluge?
# Has notneeding this been tested? [bruce comment 070918]
# == the specific grammar of "parameter-set description files"
# TODO: split this grammar (including IGNORED_TOKNAMES above, somehow) into its own file
# thing = name : arglist
# optional indented things
# ignore everything else (some are errors) (easiest: filter the ok to ignore, stop at an error, print it at end)
def make_number(token, sign = 1): # other signs untested
for type in (int, long, float):
try:
return type(token) * sign
except:
pass
raise SyntaxError, "illegal number: %r" % (token,) ### flaw: this doesn't include desc of where it happened...
Name = TokenType('NAME')
Colon = Op(':')
Minus = Op('-')
End = TokenType('ENDMARKER')
Newline = TokenType('NEWLINE')
# Arg = TokenType(['NUMBER', 'STRING', 'NAME'])
Number = TokenType('NUMBER', result = make_number)
Name = TokenType('NAME')
String = TokenType('STRING', result = eval)
# eval is to turn '"x"' into 'x'; it's safe since the tokenizer promises this is a string literal
# String, Name
Arg = Alt( Number,
String,
Name, #e do STRING & NAME results need to be distinguished?? We'll see...
Seq( Minus, Number, result = lambda (m,n): - n )
)
Arglist = ListOf(Arg) # not comma-sep; whitespace sep is ok (since ws is ignored by tokenizer) ##k test that!
def Indented(pat):
return Seq(TokenType('INDENT'), pat, TokenType('DEDENT'), result = lambda (i,p,d): p )
Thing = ForwardDef("Thing")
Thing.pat = Seq( Name, Colon, Arglist, Newline, Optional(Indented(ListOf(Thing))),
result = lambda (name,c,args,nl,subthings): makeThing(name, args, subthings)
)
Whole = Seq(ListOf(Thing), End, result = lambda (lt,e): lt )
# ==
# Description objects (prototype)
class attr_interface_to_dict:
# make sure all of our methods and data start with '_'!
def __init__(self, _dict1):
self._dict1 = _dict1
def __getattr__(self, attr): # in class attr_interface_to_dict
if attr.startswith('_'):
raise AttributeError, attr
# Something like this is needed, even if _dict1 contains such an attr,
# so such an attr (if specially named) won't fool Python into giving us different semantics.
# But if we decide _dict1 should be able to contain some names of this form, we could make
# the prohibition tighter as long as it covered all Python special attrnames and our own attr/method names.
try:
return self._dict1[attr]
except KeyError:
raise AttributeError, attr
pass
pass
class Info:
def __init__(self, *_args, **kws): # sort of like ParseRule -- unify them?
self._args = _args
self.kws = kws
self.init()
def init(self):
pass
def __repr__(self):
return "%s%r" % (self.__class__.__name__, self._args) ##k crude approx.
pass
class ThingData(Info):
"""
#doc...
the data in a thing
"""# could this be Thing -- that symbol's value would be both a constructor and a parserule... not sure...
options = {} # save RAM & time by sharing this when surely empty... could cause bugs if it's altered directly by outside code
option_attrs = attr_interface_to_dict(options) # also shared, also must be replaced if nonempty
def init(self):
self.name, self.thingargs, self.subthings = self._args # possible name conflict: .args vs .thingargs
#070330 improving the internal documentation:
## print "debug ThingData: name = %r, thingargs = %r, subthings = %r" % self._args
# for an option setting like "max = 9999.0":
# name = 'max', thingargs = [9999.0], subthings = []
# so name is the option name, thingargs contains one member which is the value, subthings is empty.
# for a subobject:
# name = 'parameter', thingargs = ['L2'], subthings = [ThingData()...]
# so name is the type (or used by the parent to choose the type), thingargs has one (optional?) member which is the name,
# and subthings contains both actual subobjects and option settings.
# for widget: combobox, two kluges are used: it acts as both a subthing and an option setting,
# and its own subthings, which look like option settings, also have an order which is preserved (I think).
# Both kluges apply to everything -- all option settings stay around in the subthings list,
# and (I think) all subthing typenames get treated as options set to the subthing name.
self.args = self.thingargs # already assumed twice, in the using code for desc... should translate remaining thingargs -> args
if self.subthings:
self.options = {}
self.option_attrs = attr_interface_to_dict(self.options)
## self.optattrs = AttrHolder() # not yet needed... maybe better to make an obj providing attr interface to self.options
for sub in self.subthings:
sub.maybe_set_self_as_option_in(self.options)
## print "options:",self.options
return
def maybe_set_self_as_option_in(self, dict1):
"""
If self is an option setting, set it in dict1
"""
#e in future might need more args, like an env, or might need to store a formula
# (which might indicate switching to getattr interface?)
if not self.subthings and len(self.thingargs) == 1:
dict1[self.name] = self.thingargs[0]
elif len(self.thingargs) == 1:
# this is the "simplest hack that could possibly work" to let widget=combobox work as a condition
# even though it has subthings which further define the combobox. we'll see if doing it generally
# causes any trouble and/or is useful in other cases. Note that we stored only 'combobox' as the value,
# not a combobox datum extended with those subthings (its items). (As if the cond was really about the class of value?)
dict1[self.name] = self.thingargs[0]
def pprint(self, indent = ""):
name, args, subthings = self._args
print indent + name + ': ' + ', '.join(map(repr,args)) # works decently except for 1.4099999999999999 rather than 1.41
for sub in subthings:
sub.pprint( indent + ' ' )
def kids(self, kinds):
# kinds could already be a list
if type(kinds) == type(''):
kinds = (kinds,)
res = []
for kid in self.subthings:
# could be a real kid, or just an assignment; use kinds to tell (assume caller knows what it's doing)
#e (maybe we even permit multiple assignments and let the last one done *before a kid is made* control that kid????)
if kid.name in kinds:
res.append(kid)
return res
def isa(self, kind, **conds):
"""
Predicate: are we of this kind, and do we match conditions like xxx=yyy for our option xxx being yyy?
"""
#### LOGIC BUG: for symbolic options, the stored value is a string, the match is a string, all ok.
# but for booleans, the stored val is 'true' or 'false' -- not ok. How do we get best of all these worlds?? ####
if self.name != kind:
return False
for param, val in conds.items():
#### LOGIC BUG 2 - do we match if we don't store param at all? who supplies defaults?
# for now: let missing param be the same as a value of None. (this is used in matching widget = ('lineedit',None), etc)
if self.matches( self.options.get(param, None), val):
pass
else:
return False
continue
return True
def matches(self, paramval, valpattern):
return paramval == valpattern or (type(valpattern) == type(()) and paramval in valpattern)
# note: 'in' is not using recursive match, just ==
def as_expr(self):
"""
Return an Expr form of self. (Only call after it's fully parsed, since parsing is destructive.)
"""
# 070330, experimental. Will require exprs module. Not yet called. For now, advise don't call except when a debug_pref is set.
#e name -> exprhead? using an env? via a Symbol?
pass
pass
def makeThing(name, args, subthings):
"""
#doc...
Note: we don't yet know if the ThingData we return will end up as a subobject
or an option-value-setting of its parent... its parent will call
thingdata.maybe_set_self_as_option_in(parent) to make and execute that decision.
"""
if not args and not subthings:
print "warning: \"%s:\" with no args or subthings" % (name,)
return ThingData(name, args, subthings)
# == test code (might not be up to date)
if __name__ == '__main__':
from pprint import pprint
## filename = "testui.txt"
filename = "../plugins/CoNTub/HJ-params.desc"
file = open(filename, 'rU')
gentok = generate_tokens(file.readline)
# print gentok # a generator object
# pprint( list(gentok) ) # works
if 0: # works
res = []
for toktype, tokstring, (srow, scol), (erow, ecol), line in gentok:
# print toktype, tokstring
res.append( (toktype, tok_name[toktype], tokstring) )
res.sort()
pprint(res)
res, newrest = parse_top(Whole, list(gentok))
print len(` res `), 'chars in res' #3924
print res # might be an error message
if newrest is not None:
print newrest
print res[0].pprint() #k
print "test done"
# that might be working... now what?
# the language has an ambiguity... exprhead:args {moreargs}, vs option:val meaning option=val.
# we'll turn things into objects, recognize some subthings as those objects and others as assigments (perhaps with decorations).
# (or, we'll decide that all assignments use '=' not ':'. Tried it... decided too hard to casually write the file this way.)
#### possible bugs:
# - I never tried a negative or explicit-positive number -- now, negative works, explicit-positive doesn't but should ###
# - Won't work for args sep by comma or in parens (doesn't yet matter)
# end
| NanoCAD-master | cad/src/utilities/parse_utils.py |
NanoCAD-master | cad/src/utilities/__init__.py |
|
# Copyright 2006-2008 Nanorex, Inc. See LICENSE file for details.
"""
exception_classes.py -- exception classes for general use
@author: Will
@version: $Id$
@copyright: 2006-2008 Nanorex, Inc. See LICENSE file for details.
Note:
This module can't be called exceptions.py, since that confuses
some other modules which try to import the Python builtin module
of the same name.
History:
Will wrote these in GeneratorBaseClass.py for its own use.
Since then they have become used by unrelated code,
so Bruce 080730 moved them into their own file.
"""
# REVIEW: AbstractMethod should ideally be merged with the other
# 2 or 3 variants of this idea, or replaced with the exception built into
# Python for this purpose.
# [070724 code review]
class AbstractMethod(Exception):
def __init__(self):
Exception.__init__(self, "Abstract method - must be overloaded")
# REVIEW:
# The following should be taught to help print messages about themselves,
# so that handlePluginExceptions (in GeneratorBaseClass) doesn't need to
# catch each one individually. This should be revisited after our overall
# error handling code is revised.
# [070724 code review]
#
# REVIEW: I suspect these exceptions are not handled in the best way, and in
# particular, I am not sure it's useful to have a CadBug exception class,
# given that any unexpected exception (of any class) also counts as a "bug
# in the cad code".
# [bruce 070719 comments]
#
# The docstrings are also not good enough (all the same).
class CadBug(Exception):
"""
Useful for distinguishing between an exception from subclass
code which is a bug in the cad, a report of an error in the
plugin, or a report of a user error.
"""
def __init__(self, arg = None):
if arg is not None:
Exception.__init__(self, arg)
else:
Exception.__init__(self)
pass
class PluginBug(Exception):
"""
Useful for distinguishing between an exception from subclass
code which is a bug in the cad, a report of an error in the
plugin, or a report of a user error.
"""
def __init__(self, arg = None):
if arg is not None:
Exception.__init__(self, arg)
else:
Exception.__init__(self)
pass
class UserError(Exception):
"""
Useful for distinguishing between an exception from subclass
code which is a bug in the cad, a report of an error in the
plugin, or a report of a user error.
"""
def __init__(self, arg = None):
if arg is not None:
Exception.__init__(self, arg)
else:
Exception.__init__(self)
pass
# end
| NanoCAD-master | cad/src/utilities/exception_classes.py |
# Copyright 2006-2008 Nanorex, Inc. See LICENSE file for details.
"""
GlobalPreferences.py
Routines that test for various global user preferences.
Note: this module is likely to be imported early, and should be
considered a low level support module. As such, importing it should
not drag in much else. As of 2007/09/05, that's probably not true
yet. [That goal may be impractical and not really necessary, given
the kinds of things in it so far -- bruce 080220 comment]
@author: Eric Messick
@version: $Id$
@copyright: 2006-2008 Nanorex, Inc. See LICENSE file for details.
"""
from utilities.prefs_constants import permit_atom_chunk_coselection_prefs_key
from utilities.debug_prefs import debug_pref, Choice_boolean_False, Choice_boolean_True
from utilities.debug import print_compact_traceback
import sys
# ==
DEBUG_BAREMOTION = False #bruce 080129, for bug 2606; should be disabled for commits
DEBUG_BAREMOTION_VERBOSE = False
# ==
_pyrex_atoms_failed = False
_pyrex_atoms_succeeded = False
_pyrex_atoms_unwanted_this_session = False
def usePyrexAtomsAndBonds(): #bruce 080218, revised/renamed 080220
"""
Should we, and if so can we successfully, import the necessary symbols
from atombase (compiled from atombase.pyx and associated files)
for using the "Pyrex atoms" C/Pyrex code to optimize classes Atom and Bond?
"""
global _pyrex_atoms_failed, _pyrex_atoms_succeeded, _pyrex_atoms_unwanted_this_session
if _pyrex_atoms_failed or _pyrex_atoms_unwanted_this_session:
return False
if _pyrex_atoms_succeeded:
return True
res = debug_pref("Enable pyrex atoms in next session?",
Choice_boolean_False,
## non_debug = True, # revised this option and menu text (thus prefs key), bruce 080221
# make this ATOM_DEBUG only for release (since it's a slowdown), bruce 080408
prefs_key = True)
# uncomment the following line to temporarily override the above debug_pref,
# e.g. to recover from trying it out and having it abort NE1 on startup
# (hypothetical error, not known to happen):
## res = False # do not commit with this line active
if res:
# make sure it works, before telling caller to use it
try:
_test_atombase()
except:
# note: the known possible exceptions would be caught by
# "except (ImportError, ValueError):"
_pyrex_atoms_failed = True # don't try it again
msg = "exception importing atombase as requested -- won't use it: "
print_compact_traceback(msg)
import foundation.env as env # import cycle??
# note: in present implem of history [080220], this is printed too
# early to show up in the widget, but hopefully that will be fixed
env.history.redmsg("ERROR: unable to use experimental Pyrex Atoms and Bonds from atombase module; see console prints")
res = False
else:
_pyrex_atoms_succeeded = True
# for now, we need a can't miss note for success, as well (red, though not an error):
print "\nNOTE: using experimental Pyrex Atoms and Bonds from atombase module\n"
import foundation.env as env # import cycle??
env.history.redmsg("NOTE: using experimental Pyrex Atoms and Bonds from atombase module")
pass
if not res:
_pyrex_atoms_unwanted_this_session = True # might be because it failed
assert _pyrex_atoms_failed or _pyrex_atoms_succeeded or _pyrex_atoms_unwanted_this_session
# be sure we make up our mind whether to use them only once per session
# (so debug pref change does not take effect until we rerun NE1)
return res
def _test_atombase():
import atombase # this must not be made into a toplevel import!
from atombase import AtomBase, AtomDictBase, BondBase, BondDictBase
return
def debug_pyrex_atoms():
res = debug_pref("debug pyrex atoms?",
Choice_boolean_False,
## non_debug = True,
# make ATOM_DEBUG only for release (not useful enough
# for non_debug), bruce 080408
prefs_key = True )
return res
# ==
# bruce 060721; was intended to become constant True for A9; as of 080320 it's not planned for A10
# but might be good to try to get to afterwards
def permit_atom_chunk_coselection():
res = debug_pref("permit atom/chunk coselection?",
## use Choice_boolean_True once this has no obvious bugs
Choice_boolean_False,
## non_debug = True,
# make ATOM_DEBUG only for release (since maybe unsafe,
# not useful since unsupported), bruce 080408
prefs_key = permit_atom_chunk_coselection_prefs_key )
return res
# ==
def disable_do_not_draw_open_bonds():
"""
Whether to disable all behavior which under some conditions
refrains from drawing open bonds or bondpoints
which would be drawn according to "traditional" rules
(those in place before 2007).
Can be useful for debugging, if the developer remembers it's enabled.
"""
res = debug_pref("DNA: draw all open bonds?",
# the name starts with DNA because the special rules
# it turns off only affect DNA
Choice_boolean_False,
non_debug = True, #bruce 080406
# leave this visible w/o ATOM_DEBUG for release [bruce 080408]
prefs_key = True)
return res
# ==
def _debug_pref_use_dna_updater(): #bruce 080320 moved this here from master_model_updater.py, made private
res = debug_pref("DNA: enable dna updater?", #bruce 080317 revised text
Choice_boolean_True, #bruce 080317 False -> True
## non_debug = True,
# make ATOM_DEBUG only for release (since unsafe to change (undo bugs),
# not useful since off is more and more unsupported), bruce 080408
prefs_key = "A10/DNA: enable dna updater?" #bruce 080317 changed prefs_key
)
return res
def dna_updater_is_enabled(): #bruce 080320
return _debug_pref_use_dna_updater()
# ==
def debug_pref_enable_pam_convert_sticky_ends(): #bruce 080514; remove when this feature fully works
res = debug_pref("DNA: ghost bases when converting sticky ends to PAM5?", #bruce 080529 revised text
Choice_boolean_True, #bruce 080602 revised default value & prefs_key
non_debug = True, #bruce 080529
prefs_key = "v1.1/DNA: PAM3+5 make ghost bases for sticky ends?"
)
return res
debug_pref_enable_pam_convert_sticky_ends()
def debug_pref_remove_ghost_bases_from_pam3(): #bruce 080602
res = debug_pref("DNA: remove ghost bases when converting to PAM3?",
Choice_boolean_True, # because they mess up DNA ui ops
non_debug = True, # because you should keep them for more accurate repeated Minimize
prefs_key = "v1.1/DNA: remove ghost bases when converting to PAM3?"
)
return res
debug_pref_remove_ghost_bases_from_pam3()
# ==
def debug_pref_write_bonds_compactly(): #bruce 080328
# note: reading code for this was made active in same commit, 080328.
# note: this could be used for non-dna single bond chains too,
# so the function name, preks_key, and associated abstract methods
# needn't contain the term "dna", though the menu text is clearer
# by containing it.
res = debug_pref("mmp format: write dna bonds compactly?",
Choice_boolean_False,
# we will change this to True as soon as all developers
# have the necessary reading code
non_debug = True,
prefs_key = "A10/mmp format: write bonds compactly? "
)
return res
def debug_pref_read_bonds_compactly(): #bruce 080328
res = debug_pref("mmp format: read dna bonds compactly?",
Choice_boolean_True, # use False to simulate old reading code for testing
## non_debug = True, # temporary
# make ATOM_DEBUG only for release (not useful enough
# for non_debug), bruce 080408
prefs_key = True # temporary
)
return res
# exercise them, to put them in the menu
debug_pref_write_bonds_compactly()
debug_pref_read_bonds_compactly()
# ==
def debug_pref_write_new_display_names(): #bruce 080328
# note: reading code for this was made active a few days before 080328;
# this affects *all* mmp files we write (for save, ND1, NV1)
res = debug_pref("mmp format: write new display names?",
# we will change this to True as soon as all developers
# have the necessary reading code... doing that, 080410
Choice_boolean_True,
non_debug = True,
prefs_key = "A10/mmp format: write new display names?"
)
return res
def debug_pref_read_new_display_names(): #bruce 080328
res = debug_pref("mmp format: read new display names?",
Choice_boolean_True, # use False to simulate old reading code for testing
## non_debug = True, # temporary
# make ATOM_DEBUG only for release (not useful enough
# for non_debug), bruce 080408
prefs_key = True # temporary
)
return res
# exercise them, to put them in the menu
debug_pref_write_new_display_names()
debug_pref_read_new_display_names()
# ==
def use_frustum_culling(): #piotr 080401
"""
If enabled, perform frustum culling in GLPane.
"""
res = debug_pref("GLPane: enable frustum culling?",
Choice_boolean_True,
non_debug = True,
# leave this visible w/o ATOM_DEBUG for release
# [bruce 080408]
prefs_key = "A10/GLPane: enable frustum culling?")
return res
# ==
def pref_MMKit_include_experimental_PAM_atoms(): #bruce 080412
res = debug_pref("MMKit: include experimental PAM atoms (next session)?",
Choice_boolean_False,
# not on by default, and not visible without ATOM_DEBUG,
# since these elements would confuse users
prefs_key = "A10/MMKit: include experimental PAM atoms?" )
return res
# ==
def pref_drop_onto_Group_puts_nodes_at_top(): #bruce 080414; added after 1.0.0rc0 was made
"""
If enabled, nodes dropped directly onto Groups in the Model Tree
are placed at the beginning of their list of children,
not at the end as was done before.
"""
res = debug_pref("Model Tree: drop onto Group puts nodes at top?",
Choice_boolean_True, # this default setting fixes a longstanding NFR
non_debug = True,
# leave this visible w/o ATOM_DEBUG for release
# [bruce 080414]
prefs_key = "A10/Model Tree: drop onto Group puts nodes at top?")
return res
pref_drop_onto_Group_puts_nodes_at_top()
# exercise it at startup to make sure it's in the debug prefs menu
# TODO: have an init function in this file, run after history is available ###
# (not sure if first import of this file is after that)
# ==
def _kluge_global_mt_update():
import foundation.env as env
# note: this doesn't cause a module import cycle,
# but it might be an undesirable inter-package import.
# (it's also done in a few other places in this file.)
win = env.mainwindow()
win.mt.mt_update()
return
def pref_show_node_color_in_MT():
#bruce 080507, mainly for testing new MT method repaint_some_nodes;
# won't yet work for internal groups that act like MT leaf nodes
# such as DnaStrand
"""
If enabled, show node colors in the Model Tree.
"""
res = debug_pref("Model Tree: show node colors?",
Choice_boolean_False,
prefs_key = True,
call_with_new_value = (lambda val: _kluge_global_mt_update())
)
return res
def pref_show_highlighting_in_MT():
#bruce 080507
"""
If enabled, highlighting objects in GLPane causes corresponding
highlighting in the MT of their containing nodes,
and (in future) mouseovers in MT may also cause highlighting
in both places.
"""
res = debug_pref("Model Tree: show highlighted objects?",
Choice_boolean_True,
non_debug = True,
prefs_key = True,
call_with_new_value = (lambda val: _kluge_global_mt_update())
)
return res
# ==
# bondpoint_policy helper function and preferences.
# A future refactoring might make this a method or class,
# but for now it's unclear how to do that (sim_aspect
# needs this before it has a writemmp_mapping to ask it of),
# and there's only one global policy ever used (derived from prefs),
# so this way is easiest for now.
# [bruce 080507/080603]
def pref_minimize_leave_out_PAM_bondpoints(): #bruce 080507
"""
If enabled, bondpoints on PAM atoms are left out of simulations
and minimizations, rather than being converted to H (as always occurred
until now) or anchored (not yet possible) or left unchanged.
@warning: not yet fully implemented.
"""
res = debug_pref("Minimize: leave out PAM bondpoints? (partly nim)",
Choice_boolean_False, # not yet safe or tested (and partly nim)
## non_debug = True, # since won't be implemented for v1.1
prefs_key = True
)
return res
pref_minimize_leave_out_PAM_bondpoints()
def pref_minimize_leave_PAM_bondpoints_unchanged(): #bruce 080603
"""
If enabled, bondpoints on PAM atoms are left unchanged during simulations
and minimizations, rather than being converted to H (as always occurred
until now) or anchored (not yet possible) or left out (not yet correctly
implemented).
"""
res = debug_pref("Minimize: leave PAM bondpoints unchanged?",
Choice_boolean_True,
non_debug = True, # should be easy to test or change
prefs_key = True
)
return res
pref_minimize_leave_PAM_bondpoints_unchanged()
from utilities.constants import BONDPOINT_LEFT_OUT
from utilities.constants import BONDPOINT_UNCHANGED
from utilities.constants import BONDPOINT_ANCHORED # not yet specifiable
from utilities.constants import BONDPOINT_REPLACED_WITH_HYDROGEN
def bondpoint_policy(bondpoint, sim_flag): #bruce 080507/080603
"""
Determine how to treat the given bondpoint,
perhaps influenced by debug_prefs and/or whether we're writing
to a simulation that wants bondpoints modified (sim_flag).
@return: one of these constants:
BONDPOINT_LEFT_OUT,
BONDPOINT_UNCHANGED,
BONDPOINT_ANCHORED,
BONDPOINT_REPLACED_WITH_HYDROGEN.
@see: nsinglets_leftout in class sim_aspect
@see: sim attribute in class writemmp_mapping
"""
## assert bondpoint.element is Singlet # (no need, and avoid import)
if not sim_flag:
return BONDPOINT_UNCHANGED
if len(bondpoint.bonds) != 1:
# should never happen
print "bug: %r has %d bonds, namely %r" % \
(bondpoint, len(bondpoint.bonds), bondpoint.bonds)
## someday: return BONDPOINT_LEFT_OUT # or BONDPOINT_UNCHANGED??
# for now, only this is safe:
return BONDPOINT_UNCHANGED
other = bondpoint.bonds[0].other(bondpoint)
if other.element.pam:
if pref_minimize_leave_out_PAM_bondpoints():
return BONDPOINT_LEFT_OUT # BUG: not yet fully implemented by callers
elif pref_minimize_leave_PAM_bondpoints_unchanged():
return BONDPOINT_UNCHANGED
else:
return BONDPOINT_REPLACED_WITH_HYDROGEN
else:
return BONDPOINT_REPLACED_WITH_HYDROGEN
pass
# ==
def pref_create_pattern_indicators():
#bruce 080520
"""
If enabled, each run of Minimize or Simulate
(in any form, e.g. Adjust or Adjust Atoms too)
creates graphical pattern indicators,
and records extra atom and bond tooltip info,
to show details of how the force field is implemented
on the current model.
"""
res = debug_pref("Minimize: force field graphics?",
Choice_boolean_False,
non_debug = True,
prefs_key = True
)
return res
pref_create_pattern_indicators()
# ==
def pref_skip_redraws_requested_only_by_Qt():
#bruce 080516 moved this here, revised default to be off on Windows
"""
If enabled, GLPane paintGL calls not requested by our own gl_update calls
are skipped, as an optimization. See comments where used for details
and status. Default value depends on platform as of 080516.
"""
if sys.platform == "win32":
# Windows -- enabling this causes bugs on at least one system
choice = Choice_boolean_False
else:
# non-Windows -- no known bugs as of 080516
#bruce 080512 made this True, revised prefs_key
choice = Choice_boolean_True
res = debug_pref("GLPane: skip redraws requested only by Qt?",
choice,
non_debug = True, #bruce 080130
prefs_key = "GLPane: skip redraws requested only by Qt?"
)
return res
pref_skip_redraws_requested_only_by_Qt()
# ==
def debug_pref_support_Qt_4point2(): #bruce 080725
res = debug_pref("support Qt 4.2 (next session)?",
Choice_boolean_False,
prefs_key = True
)
return res
# ==
ENABLE_PROTEINS = debug_pref("Enable Proteins? (next session)",
Choice_boolean_True,
non_debug = True,
prefs_key = "v1.2/Enable Proteins?"
)
# ==
def _debug_pref_keep_signals_always_connected(): #Ninad 2008-08-13
#If the above flag, if True, signals are connected when PM is created
# (always True by default) If this is False, the signals are connected
#in show() method of the PM and disconnected in the close() method
##Based on Bruce's comment 2008-09-23:
## The following bug is unlikely because presumably the user can never see
## the old PM object and the underlying call is a memory leak
## issue than anything else (harder to fix) --
## What happens when you are in something with signals (e.g. extrude) and do
## file->close or file->open.
## A bug could happen if the signals remain connected to the old command
##object. ....
res = debug_pref("Keep signals always connected (next session)?",
#bruce 080925 revised menu text
Choice_boolean_True,
prefs_key = True
)
return res
KEEP_SIGNALS_ALWAYS_CONNECTED = _debug_pref_keep_signals_always_connected()
# ==
def _debug_pref_break_strands_feature(): #Ninad 2008-08-18
#debug flag for experimental code Ninad is
#working on (various break strands options).
#Note that this flag is also used in BreakStrand_Command
#UPDATE 2008-08-19: This preference is set to True by default
res = debug_pref("DNA: debug new break strands options feature (next session)",
Choice_boolean_False,
prefs_key = True
)
return res
DEBUG_BREAK_OPTIONS_FEATURE = _debug_pref_break_strands_feature()
# end
| NanoCAD-master | cad/src/utilities/GlobalPreferences.py |
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details.
"""
prefs_constants.py
Constants and utilities related to user preferences,
which need to be defined immediately upon startup.
@author: Mark, Bruce, Ninad
@version: $Id$
@copyright: 2005-2008 Nanorex, Inc. See LICENSE file for details.
History:
Mark 050629 moved some A6 prefs keys he had earlier defined and organized
in UserPrefs.py, into constants.py.
Bruce 050805 moved those into this new file, and added more.
Module classification:
"utilities" or perhaps "constants" for now, even though it can be
thought of as containing app-specific knowledge; for reasons and caveats
and desirable refactoring, see preferences.py docstring. The reason it
is even lower than foundation is to avoid package import cycles, e.g. if
foundation -> io -> this, or if utilities.GlobalPreferences imports this.
[bruce 071215]
Refactoring needed:
- See preferences.py docstring.
- Has a few functions that ought to be split out, like
getDefaultWorkingDirectory.
"""
### do no imports that would not be ok for constants.py to do! ###
from utilities.constants import yellow, pink, red, black, magenta, mustard
from utilities.constants import blue, gray, white, green, orange
from utilities.constants import lightgray, lightblue, lightgreen
from utilities.constants import darkred, darkblue, darkgreen
from utilities.constants import ave_colors, diBALL, bgBLUE_SKY
import sys, os # for getDefaultWorkingDirectory
# ==
# constants related to user chosen hover highlight and selection color
# HH = Hover Highlighting (color) Style
HHS_HALO = 'HALO'# default
HHS_SOLID = 'SOLID'
HHS_SCREENDOOR1 = 'SCREENDOOR1'
HHS_CROSSHATCH1 = 'CROSSHATCH1'
HHS_BW_PATTERN = 'BW_PATTERN'
HHS_POLYGON_EDGES = 'POLYGON_EDGES'
HHS_DISABLED = 'DISABLED'
HHS_INDEXES = [HHS_HALO, HHS_SOLID, HHS_SCREENDOOR1, HHS_CROSSHATCH1,
## russ 080604 NIMs: HHS_BW_PATTERN, HHS_POLYGON_EDGES, HHS_DISABLED]
HHS_POLYGON_EDGES]
HHS_OPTIONS = ["Colored halo (default)",
"Solid color",
"Screendoor pattern",
"Crosshatch pattern",
## russ 080604 NIM: "Black-and-white pattern",
"Colored polygon edges",
## russ 080604 NIM: "Disable highlighting"
]
# SS = Selection (color) Style
SS_HALO = 'HALO'# default
SS_SOLID = 'SOLID'
SS_SCREENDOOR1 = 'SCREENDOOR1'
SS_CROSSHATCH1 = 'CROSSHATCH1'
SS_BW_PATTERN = 'BW_PATTERN'
SS_POLYGON_EDGES = 'POLYGON_EDGES'
SS_INDEXES = [SS_HALO, SS_SOLID, SS_SCREENDOOR1, SS_CROSSHATCH1,
## russ 080604 NIM: SS_BW_PATTERN,
SS_POLYGON_EDGES]
SS_OPTIONS = ["Colored halo (default)",
"Solid color",
"Screendoor pattern",
"Crosshatch pattern",
## russ 080604 NIM: "Black-and-white pattern",
"Colored polygon edges"]
# Compass position constants. These are used to preserve the preference value
# for the compass position and relate directly to the radio button group values for the options
# presented in the Preferences/General dialog. Do not change the value of these 4 constants!
# Mark 050919.
# (Note: they are also used for other, analogous purposes. [bruce 071215 comment])
# UPPER_RIGHT will conflict with the Confirmation Corner when it is implemented in A10.
UPPER_RIGHT = 0 # May need to remove this option in A10. Mark 2007-05-07.
UPPER_LEFT = 1
LOWER_LEFT = 2 # New default. Mark 2007-05-07
LOWER_RIGHT = 3
# View Projection Types
PERSPECTIVE = 0
ORTHOGRAPHIC = 1
# Grid Plan Grid Line Types
NO_LINE = 0
SOLID_LINE = 1
DASHED_LINE = 2
DOTTED_LINE = 3
# Grid Plane Grid Types
SQUARE_GRID = 0
SiC_GRID = 1
# these match the indexes in minimize_engine_combobox in
# MinimizeEnergyPropDialog and UserPrefsDialog
MINIMIZE_ENGINE_UNSPECIFIED = -1
MINIMIZE_ENGINE_ND1_FOREGROUND = 0
MINIMIZE_ENGINE_GROMACS_FOREGROUND = 1
MINIMIZE_ENGINE_GROMACS_BACKGROUND = 2
# ==
# Keys for user preferences
# (the string constants should start with the first released version they'll appear in)
# General prefs
displayCompass_prefs_key = 'A6/Display Compass'
displayCompassLabels_prefs_key = 'A7/Display Compass Label'
compassPosition_prefs_key = 'A6/Compass Position'
displayOriginAxis_prefs_key = 'A6/Display Origin Axis'
originAxisColor_prefs_key = 'V111/Origin Axis Color'
displayPOVAxis_prefs_key = 'A6/Display POV Axis'
povAxisColor_prefs_key = 'V111/Point of View Axis Color'
displayConfirmationCorner_prefs_key = 'V111/Display POV Axis'
enableAntiAliasing_prefs_key = 'V120/Full screen anti-aliasing'
defaultProjection_prefs_key = 'A7/Default Projection'
animateHighQualityGraphics_prefs_key = 'A7/Animate with High Quality Graphics' #mark 060315. NIY.
animateStandardViews_prefs_key = 'A7/Animate Standard Views'
animateMaximumTime_prefs_key = 'A7/Maximum Animation Time'
workingDirectory_prefs_key = 'WorkingDirectory' # Moved here from startup_funcs.py. Mark 060726.
mouseSpeedDuringRotation_prefs_key = 'A9/Mouse Speed During Rotation' #Ninad 060906
displayOriginAsSmallAxis_prefs_key = 'A9/Display Origin As Small Axis' #Ninad 060920
displayRulers_prefs_key = 'A10/Display rulers'
displayVertRuler_prefs_key = 'A10/Display vertical ruler'
displayHorzRuler_prefs_key = 'A10/Display horizontal ruler'
rulerPosition_prefs_key = 'A10/Ruler Position'
rulerColor_prefs_key = 'A10/Ruler Color'
rulerOpacity_prefs_key = 'A10/Ruler Opacity'
showRulersInPerspectiveView_prefs_key = 'A10/Show Rulers In Perspective View'
fogEnabled_prefs_key = "V110/Enable fog"
# Cursor text prefs on "Graphics Area" page.
cursorTextFontSize_prefs_key = "V120/Cursor text font size"
cursorTextColor_prefs_key = "V120/Cursor text color"
#General preferences for copy-paste operation (see ops_copy_mixin._pasteGroup
#for details) Feature introduced in v1.1.0, on 2008-06-06
pasteOffsetScaleFactorForChunks_prefs_key = 'V110/Scale factor is used to offset chunks to be pasted w.r.t. original chunks'
pasteOffsetScaleFactorForDnaObjects_prefs_key = 'V110/Scale factor is used to offset dna objects to be pasted w.r.t. original dna objects'
# Color prefs (for "Color" page).
backgroundColor_prefs_key = 'A9/Background Color'
backgroundGradient_prefs_key = 'V1.2.0.0/Background Gradient'
hoverHighlightingColorStyle_prefs_key = 'V110/3D hover highlighting color style rev1'
hoverHighlightingColor_prefs_key = 'V110/3D hover highlighting color'
selectionColorStyle_prefs_key = 'V110/3D selection color style rev1'
selectionColor_prefs_key = 'V110/3D selection color'
haloWidth_prefs_key = 'V110/halo width in pixels'
# Special colors pref key(s).
# DarkBackgroundContrastColor_prefs_key provides a dark color (black or
# some dark shade of gray) that is guaranteed to contrast well with the current
# background color.
DarkBackgroundContrastColor_prefs_key = 'V111/Dark Background Contrast Color'
# LightBackgroundContrastColor_prefs_key provides a light color (white or
# some light shade of gray) that is guaranteed to contrast well with the current
# background color.
LightBackgroundContrastColor_prefs_key = 'V111/Light Background Contrast Color'
# Mouse wheel Prefs
mouseWheelDirection_prefs_key = 'A10/Mouse Wheel Direction'
zoomInAboutScreenCenter_prefs_key = 'A10/Mouse Wheel Zoom In To Screen Center'
zoomOutAboutScreenCenter_prefs_key = 'A10/Mouse Wheel Zoom Out To Screen Center'
mouseWheelTimeoutInterval_prefs_key = 'V110/Mouse Wheel Event Timeout Interval'
# Pan settings
panArrowKeysDirection_prefs_key = 'V1.2.0.0/Pan Arrow Keys Direction'
#GLpane scale preferences
#GLPane scale preferences . As of 2008-04-07, the GLPane_scale_* preferece
#can not be set by the user. Its just used internally.
#@see: GLPane_Minimial.__init__() and GLPane._adjust_GLPane_scale_if_needed()
#for more implementation details
#The GLPane scale setup initially (at startup) i.e. while starting a new
#(empty) model
startup_GLPane_scale_prefs_key = 'A10/GLPane Scale at startup'
#Preferred GLPane scale is user starts with an empty model and directly enters
#commands such as Build Atoms.
GLPane_scale_for_atom_commands_prefs_key = 'A10/ Initial GLPane scale for Atom commands'
#Preferred GLPane scale if user starts with an empty model and without changing
#the initial scale, directly enters a Dna command (such as BuildDna)
GLPane_scale_for_dna_commands_prefs_key = 'A10/Initial GLPane Scale for Dna commands'
#ToolTip Prefs
dynamicToolTipWakeUpDelay_prefs_key = 'A9/DynamicToolTip Wake Up Delay'
dynamicToolTipAtomDistancePrecision_prefs_key = 'A9/DynamicToolTip Atom Distance Precision'
dynamicToolTipBendAnglePrecision_prefs_key = 'A9/DynamicToolTip Bend Angle Precision'
dynamicToolTipTorsionAnglePrecision_prefs_key = 'A9/DynamicToolTip Torsion Angle Precision'
dynamicToolTipAtomChunkInfo_prefs_key = 'A9/DynamicToolTip Atom Chunk Info'
dynamicToolTipBondChunkInfo_prefs_key = 'A9/DynamicToolTip Bond Chunk Info'
dynamicToolTipAtomPosition_prefs_key = 'A9/DynamicToolTip Atom Position'
dynamicToolTipAtomDistanceDeltas_prefs_key = 'A9/DynamicToolTip Atom Distance Deltas'
dynamicToolTipBondLength_prefs_key = 'A9/DynamicToolTip Bond Length'
dynamicToolTipAtomMass_prefs_key = 'A9/DynamicToolTip Atom Mass'
dynamicToolTipVdwRadiiInAtomDistance_prefs_key = 'A10/tooltip Vdw Radii In Atom Distance'
# Minimize prefs for Adjust All and Adjust Selection (presently on General prefs pane)
# (note, Adjust Atoms does not yet have its own prefs -- its values are derived from these
# but differently than for Adjust All/Sel)
#mark 060627, revised by bruce 060628, 060705 for A8
Adjust_watchRealtimeMinimization_prefs_key = 'A7/Watch Realtime Minimization' # same key as in A7
Adjust_endRMS_prefs_key = 'A8/End RMS Adjust'
Adjust_endMax_prefs_key = 'A8/End Max Adjust'
Adjust_cutoverRMS_prefs_key = 'A8/Cutover RMS Adjust'
Adjust_cutoverMax_prefs_key = 'A8/Cutover Max Adjust'
Adjust_minimizationEngine_prefs_key = 'A10/Adjust Minimization Engine'
#Ninad 20070509 Adjust , Minimize and Simulation(Dynamics) Preferences for DNA
#reduced model(Enable or disable elecrostatics)
electrostaticsForDnaDuringAdjust_prefs_key = 'A9/ Electrostatics for Dna During Adjust'
electrostaticsForDnaDuringMinimize_prefs_key = 'A9/ Electrostatics For Dna During Minimize'
electrostaticsForDnaDuringDynamics_prefs_key = 'A9/ Electrostatics For Dna During Simulation'
neighborSearchingInGromacs_prefs_key = 'A110/Neighbor Searching in GROMACS' # Eric M 20080515
# Minimize prefs for Minimize Energy dialog (independent settings, different defaults) [bruce 060705]
Minimize_watchRealtimeMinimization_prefs_key = 'A8/Watch Realtime Minimization Minimize'
Minimize_endRMS_prefs_key = 'A8/End RMS Minimize'
Minimize_endMax_prefs_key = 'A8/End Max Minimize'
Minimize_cutoverRMS_prefs_key = 'A8/Cutover RMS Minimize'
Minimize_cutoverMax_prefs_key = 'A8/Cutover Max Minimize'
Minimize_minimizationEngine_prefs_key = 'A10/Minimize Minimization Engine'
# Pref to add potential energy to trace file
Potential_energy_tracefile_prefs_key = 'A8/Potential energy checkbox'
# Atom prefs
atomHighlightColor_prefs_key = 'A6/Atom Highlight Color'
deleteAtomHighlightColor_prefs_key = 'A10/Delete Atom Highlight Color'
bondpointHighlightColor_prefs_key = 'A7/Bondpoint Highlight Color'
bondpointHotspotColor_prefs_key = 'A6/Atom Hotspot Color'
startupGlobalDisplayStyle_prefs_key = 'A6/Default Display Mode'
diBALL_AtomRadius_prefs_key = 'A7/CPK Atom Radius Percentage' # this is about diBALL which as of 060307 is called Ball and Stick in UI
#bruce 060607 renamed cpkAtomRadius_prefs_key -> diBALL_AtomRadius_prefs_key ###DOIT
cpkScaleFactor_prefs_key = 'A7/CPK Scale Factor' # this is about diTrueCPK which as of 060307 is called CPK in UI
levelOfDetail_prefs_key = 'A7/Level Of Detail'
keepBondsDuringTransmute_prefs_key = 'A9/Keep Bonds During Transmute'
reshapeAtomsSelection_prefs_key = 'A10/Reshape Atoms Selection in Build Atoms'
indicateOverlappingAtoms_prefs_key = "A10/GLPane: indicate overlapping atoms? "
# Bond prefs
bondHighlightColor_prefs_key = 'A6/Bond Highlight Color'
deleteBondHighlightColor_prefs_key = 'A10/Delete Bond Highlight Color'
bondStretchColor_prefs_key = 'A6/Bond Stretch Color'
bondVaneColor_prefs_key = 'A6/Bond Vane Color'
diBALL_bondcolor_prefs_key = 'A6/Bond CPK Color' # this is about diBALL, not CPK [bruce 060607 comment]
#bruce 060607 renamed bondCPKColor_prefs_key -> diBALL_bondcolor_prefs_key ###DOIT
showBondStretchIndicators_prefs_key = 'A9/ Show Bond Stretch Indicators'
pibondStyle_prefs_key = 'A6/Pi Bond Style'
pibondLetters_prefs_key = 'A6/Pi Bond Letters'
showValenceErrors_prefs_key = 'A6/Show Valence Errors'
#display lines mode line thickness, mark 050831
linesDisplayModeThickness_prefs_key = 'A7/Line Thickness for Lines Display Mode'
#CPK cylinder radius (percentage), mark 051003
diBALL_BondCylinderRadius_prefs_key = 'A7/CPK Cylinder Radius Percentage' # about diBALL, called Ball and Stick as of 060307
#bruce 060607 renamed cpkCylinderRadius_prefs_key -> diBALL_BondCylinderRadius_prefs_key ###DOIT
diDNACYLINDER_BondCylinderRadius_prefs_key = 'A10/DNA Cylinder Bond Radius Percentage'
#== DNA PREFERENCES ============================================================
adnaBasesPerTurn_prefs_key = 'A10/A-DNA bases per turn' # Twist computed from this.
adnaRise_prefs_key = 'A10/A-DNA rise step'
bdnaBasesPerTurn_prefs_key = 'A10/B-DNA bases per turn' # Twist computed from this.
bdnaRise_prefs_key = 'A10/B-DNA rise step'
zdnaBasesPerTurn_prefs_key = 'A10/Z-DNA bases per turn' # Twist computed from this.
zdnaRise_prefs_key = 'A10/Z-DNA rise step'
dnaDefaultStrand1Color_prefs_key = 'V110/DNA default strand1 color'
dnaDefaultStrand2Color_prefs_key = 'V110/DNA default strand2 color'
dnaDefaultSegmentColor_prefs_key = 'A10/DNA default segment color'
dnaStrutScaleFactor_prefs_key = 'A10/DNA strut scale factor'
arrowsOnBackBones_prefs_key = 'A9/ Show arrows on all directional bonds'
arrowsOnThreePrimeEnds_prefs_key = 'A9/ Show three prime ends as out arrow heads'
arrowsOnFivePrimeEnds_prefs_key = 'A9/ Show five prime ends as in arrow heads'
useCustomColorForThreePrimeArrowheads_prefs_key = 'A111/ Use custom color for three-prime arrowheads/spheres'
dnaStrandThreePrimeArrowheadsCustomColor_prefs_key = 'A111/ Custom color for strand three-prime arrowheads/spheres'
useCustomColorForFivePrimeArrowheads_prefs_key = 'A111/ Use custom color for five-prime arrowheads/spheres'
dnaStrandFivePrimeArrowheadsCustomColor_prefs_key = 'A111/ Custom color for five-prime strand arrowheads/spheres'
#Join strands command prefs
joinStrandsCommand_arrowsOnThreePrimeEnds_prefs_key = 'A110/ While in Join strands command, show three prime ends as out arrow heads'
joinStrandsCommand_arrowsOnFivePrimeEnds_prefs_key = 'A110/ While in Join strands command, show five prime ends as in arrow heads'
joinStrandsCommand_useCustomColorForThreePrimeArrowheads_prefs_key = 'A110/ While in Join strands command, use custom color for three-prime arrowheads/spheres'
joinStrandsCommand_dnaStrandThreePrimeArrowheadsCustomColor_prefs_key = 'A110/ While in Join strands command, Custom color for strand three-prime arrowheads/spheres'
joinStrandsCommand_useCustomColorForFivePrimeArrowheads_prefs_key = 'A110/ While in Join strands command,use custom color for five-prime arrowheads/spheres'
joinStrandsCommand_dnaStrandFivePrimeArrowheadsCustomColor_prefs_key = 'A110/ While in Join strands command, Custom color for strand five-prime arrowheads/spheres'
joinStrandsCommand_clickToJoinDnaStrands_prefs_key = 'V112/ Click on a strand to join it with the nearest strand on the same segment'
joinStrandsCommand_recursive_clickToJoinDnaStrands_prefs_key = 'V112/ Recursively join the DNAStrands three prime end with a neighboring five prime end.'
#Urmi 20080617: display grid in Plane Property Manager pref keys
PlanePM_showGrid_prefs_key = 'V111/Show Grid on the Plane'
PlanePM_showGridLabels_prefs_key = 'V111/Show Grid Labels on the Plane'
#Break strands command prefs
breakStrandsCommand_arrowsOnThreePrimeEnds_prefs_key = 'A110/ While in Break strands command, show three prime ends as out arrow heads'
breakStrandsCommand_arrowsOnFivePrimeEnds_prefs_key = 'A110/ While in Break strands command, show five prime ends as in arrow heads'
breakStrandsCommand_useCustomColorForThreePrimeArrowheads_prefs_key = 'A110/ While in Break strands command, use custom color for three-prime arrowheads/spheres'
breakStrandsCommand_dnaStrandThreePrimeArrowheadsCustomColor_prefs_key = 'A110/ While in Break strands command, Custom color for strand three-prime arrowheads/spheres'
breakStrandsCommand_useCustomColorForFivePrimeArrowheads_prefs_key = 'A110/ While in Break strands command,use custom color for five-prime arrowheads/spheres'
breakStrandsCommand_dnaStrandFivePrimeArrowheadsCustomColor_prefs_key = 'A110/ While in Break strands command, Custom color for strand five-prime arrowheads/spheres'
breakStrandsCommand_numberOfBasesBeforeNextBreak_prefs_key = 'A111/Number of bases before the next break site'
#Dna base number label prefs
dnaBaseNumberLabelChoice_prefs_key = 'V120/display dna base numbering using the choice given by this keys value'
dnaBaseNumberingOrder_prefs_key = 'V120/ display dna base numbering labels using the ordering choice given by this keys value'
dnaBaseNumberLabelColor_prefs_key = 'V120/ display dna base numbering labels using color given by this keys value'
#Various cursor text prefs =======================
dnaDuplexEditCommand_showCursorTextCheckBox_prefs_key = 'A110/Show cursor text while drawing the duplex'
dnaDuplexEditCommand_cursorTextCheckBox_numberOfBasePairs_prefs_key = 'A110/Show number of basepair info in cursor text while in DnaDulex_Editcommand'
dnaDuplexEditCommand_cursorTextCheckBox_numberOfTurns_prefs_key = 'A110/Show number of turns info in cursor text while in DnaDulex_Editcommand'
dnaDuplexEditCommand_cursorTextCheckBox_length_prefs_key = 'A110/Show duplex length info in cursor text while in DnaDulex_Editcommand'
dnaDuplexEditCommand_cursorTextCheckBox_angle_prefs_key = 'A110/Show angle info in cursor text while in DnaDulex_Editcommand'
dnaSegmentEditCommand_showCursorTextCheckBox_prefs_key = 'A110/Show cursor text while drawing the duplex in DnaSegment EditCommand'
dnaSegmentEditCommand_cursorTextCheckBox_numberOfBasePairs_prefs_key = 'A110/Show number of basepair info in cursor text while in DnaSegment_Editcommand'
dnaSegmentEditCommand_cursorTextCheckBox_length_prefs_key = 'A110/Show duplex length info in cursor text while in DnaSegment_Editcommand'
dnaSegmentEditCommand_cursorTextCheckBox_changedBasePairs_prefs_key = 'A110/Show changed number of basepairs info in cursor text while in DnaSegment_Editcommand'
#DnaSegment_ResizeHandle preferences
dnaSegmentResizeHandle_discRadius_prefs_key = 'V111/Radius of the disc component of the DnaSegment resize handle'
dnaSegmentResizeHandle_discThickness_prefs_key = 'V111/Thickness of the disc component of the DnaSegment resize handle'
dnaStrandEditCommand_showCursorTextCheckBox_prefs_key = 'A110/Show cursor text while drawing the duplex in DnaStrand_EditCommand'
dnaStrandEditCommand_cursorTextCheckBox_numberOfBases_prefs_key = 'A110/Show number of bases info in cursor text while in DnaStrand_Editcommand'
dnaStrandEditCommand_cursorTextCheckBox_changedBases_prefs_key = 'A110/Show changed number of basepairs info in cursor text while in DnaStrand_Editcommand'
#DNA srand or segment search type preference
dnaSearchTypeLabelChoice_prefs_key = 'V112/Dna Strand or sgment search type choice'
makeCrossoversCommand_crossoverSearch_bet_given_segments_only_prefs_key = 'A110/search for crossover sites between the given dna segments only'
# DNA Minor Groove Error Indicator prefs
dnaDisplayMinorGrooveErrorIndicators_prefs_key = 'A10/Display DNA minor groove error indicators'
dnaMinMinorGrooveAngle_prefs_key = 'A10/DNA minimum minor groove angle'
dnaMaxMinorGrooveAngle_prefs_key = 'A10/DNA maximum minor groove angle'
dnaMinorGrooveErrorIndicatorColor_prefs_key = 'A10/DNA minor groove error indicator color'
# DNA renditions prefs. Mark 2008-05-15
dnaRendition_prefs_key = 'A110/DNA rendition'
# DNA style prefs piotr 080310
dnaStyleStrandsShape_prefs_key = 'A10/DNA style strands shape'
dnaStyleStrandsColor_prefs_key = 'A10/DNA style strands color'
dnaStyleStrandsScale_prefs_key = 'A10/DNA style strands scale'
dnaStyleStrandsArrows_prefs_key = 'A10/DNA style strands arrows'
dnaStyleAxisShape_prefs_key = 'A10/DNA style axis shape'
dnaStyleAxisColor_prefs_key = 'A10/DNA style axis color'
dnaStyleAxisScale_prefs_key = 'A10/DNA style axis scale'
dnaStyleAxisEndingStyle_prefs_key = 'A10/DNA style axis ending style'
dnaStyleStrutsShape_prefs_key = 'A10/DNA style struts shape'
dnaStyleStrutsColor_prefs_key = 'A10/DNA style struts color'
dnaStyleStrutsScale_prefs_key = 'A10/DNA style struts scale'
dnaStyleBasesShape_prefs_key = 'A10/DNA style bases shape'
dnaStyleBasesColor_prefs_key = 'A10/DNA style bases color'
dnaStyleBasesScale_prefs_key = 'A10/DNA style bases scale'
assignColorToBrokenDnaStrands_prefs_key = 'A10/Assign color to broken DNA strands'
# DNA labels and base orientation preferences. 080325 piotr
dnaStrandLabelsEnabled_prefs_key = 'A10/DNA strand labels enabled'
dnaStrandLabelsColor_prefs_key = 'A10/DNA strand labels color'
dnaStrandLabelsColorMode_prefs_key = 'A10/DNA strand labels color mode'
dnaBaseIndicatorsEnabled_prefs_key = 'A10/DNA base orientation indicators enabled'
dnaBaseInvIndicatorsEnabled_prefs_key = 'A10/DNA base inverse orientation indicators enabled'
dnaBaseIndicatorsAngle_prefs_key = 'A10/DNA base orientation indicators angle'
dnaBaseIndicatorsColor_prefs_key = 'A10/DNA base orientation indicators color'
dnaBaseInvIndicatorsColor_prefs_key = 'A10/DNA base inverse orientation indicators color'
dnaBaseIndicatorsDistance_prefs_key = 'A10/DNA base orientation indicators distance'
dnaStyleBasesDisplayLetters_prefs_key = 'A10/DNA base letters enabled'
dnaBaseIndicatorsPlaneNormal_prefs_key = 'V110/DNA base orientation indicators plane option'
#Nanotube cursor texts ============
insertNanotubeEditCommand_showCursorTextCheckBox_prefs_key = 'A110/Show cursor text while drawing the nanotube in InsertNanotube_EditCommand'
insertNanotubeEditCommand_cursorTextCheckBox_length_prefs_key = 'A110/Show nanotube length info in cursor text while in InsertNanotube_Editcommand'
insertNanotubeEditCommand_cursorTextCheckBox_angle_prefs_key = 'A110/Show angle info in cursor text while in InsertNanotube_Editcommand'
editNanotubeEditCommand_showCursorTextCheckBox_prefs_key = 'A110/Show cursor text while resizing the nanotube in EditNanotube_EditCommand'
editNanotubeEditCommand_cursorTextCheckBox_length_prefs_key = 'A110/Show nanotube length info in cursor text while in EditNanotube_EditCommand'
# stereo view preferences [added by piotr 080516]
stereoViewMode_prefs_key = 'Stereo view mode'
stereoViewSeparation_prefs_key = 'Stereo view separation'
stereoViewAngle_prefs_key = 'Stereo view angle'
# Modes prefs [added by mark 050910]
# The background style and color for each mode is initialized in init_prefs()
# of the superclass basicMode (modes.py).
## startupMode_prefs_key = 'A7/Startup Mode' #bruce 080709 commented out, not used since A9
## defaultMode_prefs_key = 'A7/Default Mode'
buildModeAutobondEnabled_prefs_key = 'A7/Build Mode Autobond Enabled' # mark 060203.
buildModeWaterEnabled_prefs_key = 'A7/Build Mode Water Enabled' # mark 060203.
buildModeHighlightingEnabled_prefs_key = 'A7/Build Mode Highlighting Enabled' # mark 060203.
buildModeSelectAtomsOfDepositedObjEnabled_prefs_key = 'A7/Build Mode Select Atoms of Deposited Obj Enabled' # mark 060304.
# Selection Behavior
permit_atom_chunk_coselection_prefs_key = 'A9 devel2/permit_atom_chunk_coselection'
# Lighting prefs [most added by mark 051124 or later]
## old_glpane_lights_prefs_key = "glpane lighting" #bruce 051206 moved this here from GLPane;
# it was hardcoded in two methods in GLPane; maybe dates from before prefs_constants module;
# in the next commit it was abandoned and changed as a fix of bug 1181; see comments near its uses in GLPane.
glpane_lights_prefs_key = 'A7/glpane lighting' #bruce 051206 introduced this key to fix bug 1181
light1Color_prefs_key = 'A7/Light1 Color' #bruce 051206 comment: this looks redundant with elements in GLPane._lights; why?
light2Color_prefs_key = 'A7/Light2 Color'
light3Color_prefs_key = 'A7/Light3 Color'
material_specular_highlights_prefs_key = 'A7/Material Specular Highlights'
material_specular_finish_prefs_key = 'A7/Material Specular Finish'
material_specular_shininess_prefs_key = 'A7/Material Specular Shininess'
material_specular_brightness_prefs_key = 'A7/Material Specular Brightness'
# File management / filename / URL preferences [tentative category, added by bruce 051130, more comments below]
wiki_help_prefix_prefs_key = 'A7/Wiki Help Prefix'
# Plug-ins prefs [added by mark 050918]
qutemol_path_prefs_key = 'A9/QuteMol Path'
qutemol_enabled_prefs_key = 'A9/QuteMol Enabled'
nanohive_path_prefs_key = 'A7/Nano-Hive Executable Path'
nanohive_enabled_prefs_key = 'A7/Nano-Hive Enabled'
povray_path_prefs_key = 'A8/POV-Ray Executable Path'
povray_enabled_prefs_key = 'A8/POV-Ray Enabled'
megapov_path_prefs_key = 'A8/MegaPOV Executable Path'
megapov_enabled_prefs_key = 'A8/MegaPOV Enabled'
povdir_path_prefs_key = 'A8/POV Include Directory' # only in Mac A8, for Windows will be in A8.1 (Linux??) [bruce 060710]
povdir_enabled_prefs_key = 'A8/POV Include Directory Enabled' # ditto, and might not end up being used [bruce 060710]
gmspath_prefs_key = 'A6/GAMESS Path'
gamess_enabled_prefs_key = 'A7/GAMESS Enabled'
gromacs_path_prefs_key = 'A10/GROMACS Path'
gromacs_enabled_prefs_key = 'A10/GROMACS Enabled'
rosetta_path_prefs_key = 'V111/Rosetta Path'
rosetta_enabled_prefs_key = 'V111/Rosetta Enabled'
rosetta_dbdir_prefs_key = 'V111/Rosetta Database'
rosetta_database_enabled_prefs_key = 'V111/Rosetta Database Enabled'
rosetta_backrub_enabled_prefs_key = 'V120/Rosetta Backrub Enabled'
cpp_path_prefs_key = 'A10/cpp Path'
cpp_enabled_prefs_key = 'A10/cpp Enabled'
nv1_path_prefs_key = 'A10/NanoVision-1 Path'
nv1_enabled_prefs_key = 'A10/NanoVision-1 Enabled'
# Undo and History prefs
undoRestoreView_prefs_key = 'A7/Undo Restore View'
undoAutomaticCheckpoints_prefs_key = 'A7/Undo Automatic Checkpoints'
undoStackMemoryLimit_prefs_key = 'A7/Undo Stack Memory Limit'
historyHeight_prefs_key = 'A6/History Height'
historyMsgSerialNumber_prefs_key = 'A6/History Message Serial Number'
historyMsgTimestamp_prefs_key = 'A6/History Message Timestamp'
# Window prefs (used to be called Caption prefs)
rememberWinPosSize_prefs_key = "A7/Remember Window Pos and Size" #mark 060315. NIY.
mainwindow_geometry_prefs_key_prefix = "main window/geometry" #bruce 051218 moved this from debug.py
captionPrefix_prefs_key = 'A6/Caption Prefix'
captionSuffix_prefs_key = 'A6/Caption Suffix'
captionFullPath_prefs_key = 'A6/Caption Full Path'
useSelectedFont_prefs_key = 'A9/Use Selected Font'
displayFont_prefs_key = 'A9/Display Font'
displayFontPointSize_prefs_key = 'A9/Display Font Point Size'
mtColor_prefs_key = 'A9/Model Tree Background Color' # Not yet in Preferences. Mark 2007-06-04
toolbar_state_prefs_key = 'V111/ Toolbar State ' #this was introduce in A10 but the value (string) changed in V111 to fix a bug in Qt4.3.5
displayReportsWidget_prefs_key = 'A10/Display Reports Widget'
#colorTheme_prefs_key = 'A9/Color Theme'
# Sponsor prefs
sponsor_download_permission_prefs_key = 'A8/Sponsor download permission'
sponsor_permanent_permission_prefs_key = 'A8/Sponsor download permission is permanent'
# The following key is not a user preference, it's a state variable that is used
# to keep track of when the sponsor logos files change. This will go away once
# Sponsors.py is re-written to incorporate a thread-safe main program
# event/command queue that can be utilized to throw up a download-permission
# dialog at the same time new logos files are detected.
#
sponsor_md5_mismatch_flag_key = 'A9/Sponsor md5 file mismatch'
# Protein display style preferences
# piotr 080625
proteinStyle_prefs_key = 'V111/Protein display style'
proteinStyleSmooth_prefs_key = 'V111/Protein display style smoothness'
proteinStyleQuality_prefs_key = 'V111/Protein display style quality'
proteinStyleScaling_prefs_key = 'V111/Protein scaling'
proteinStyleScaleFactor_prefs_key = 'V111/Protein scale factor'
proteinStyleColors_prefs_key = 'V111/Protein colors'
proteinStyleAuxColors_prefs_key = 'V111/Protein aux colors'
proteinStyleCustomColor_prefs_key ='V111/Protein custom color'
proteinStyleAuxCustomColor_prefs_key ='V111/Protein aux custom color'
proteinStyleColorsDiscrete_prefs_key = 'V111/Protein discrete colors'
proteinStyleHelixColor_prefs_key ='V111/Protein helix color'
proteinStyleStrandColor_prefs_key ='V111/Protein strand color'
proteinStyleCoilColor_prefs_key ='V111/Protein coil color'
proteinCustomDescriptors_prefs_key ='V111/Protein custom mutation descriptors'
#==
# List of prefs keys (strings, not _prefs_key global variable names)
# which got stored into developers or users prefs dbs (since they were saved in code committed to cvs),
# but are no longer used now.
# This list is not yet used by the code, and when it is, its format might be revised,
# but for now, make sure each line has a comment which gives complete info
# about whether or not a released version ever stored prefs using the given keys
# (and if so, exactly which released versions);
# also, each line should be signed with a name and date of the abandonment of that key.
###@@@ THIS IS NOT COMPLETE since I didn't have time to add the ones I removed from cvs rev 1.62 just before A8.
# I also forgot to remove some recently when I renamed them from A8 devel to A8 devel2. -- bruce 060705
_abandoned_prefs_keys = [
'A7/Specular Highlights', # never released, superceded by 'A7/Material Specular Highlights' [mark 051205]
'A7/Whiteness', # never released, superceded by 'A7/Material Specular Finish' [mark 051205]
'A7/Shininess', # never released, superceded by 'A7/Material Specular Shininess' [mark 051205]
'A7/Material Brightness', # never released, superceded by 'A7/Material Specular Brightness' [mark 051205]
'glpane lighting', # was released in A6 and maybe some prior versions; superceded by 'A7/glpane lighting' [bruce 051206]
'A7/Selection Behavior', # only released in pre-release snapshots of A7. [mark 060304]
'A7/Select Atoms Mode Highlighting Enabled' # only released in pre-release snapshots of A7. [mark 060404]
]
#==
# Table of internal attribute names, default values, types, and prefs-db formats for some of these preferences.
# (This needs to be defined in a central place, and set up by code in preferences.py
# before any code can ask for preference values, so the default values can come from here.)
# computed default values; some of these names are also directly used by external code
# which is not yet fully revised to get the values from the prefs db.
_default_HICOLOR_real_atom = yellow
_default_HICOLOR_real_bond = yellow
_default_HICOLOR_delete_atom = red
_default_HICOLOR_delete_bond = red
_default_HICOLOR_bondpoint = pink
_default_toolong_color = ave_colors( 0.8, red, black) #bruce 050727 changed this from pure red; 050805 even for lines mode
_default_toolong_hicolor = ave_colors( 0.8, magenta, black) ## not yet in prefs db
_default_strandLabelsColor = black # piotr 080325 added these default colors
_default_baseIndicatorsColor = white
_default_baseInvIndicatorsColor = black
def _compute_default_bondVaneColor():
ord_pi_for_color = 0.5
# was ord_pi, when we let the color vary with ord_pi;
# if we later want that, then define two colors here and use them as endpoints of a range
color = ave_colors(ord_pi_for_color, blue, gray)
return ave_colors(0.8, color, black)
_default_bondVaneColor = _compute_default_bondVaneColor()
_default_bondColor = (0.25, 0.25, 0.25)
# Do not move getDefaultWorkingDirectory() to platform.py since it might
# create a recursive import problem. [Mark 060730.]
# [However, it probably doesn't belong in this file either.
# Sometime try putting it into a file in a platform-dependent package.
# bruce 071215 comment]
def getDefaultWorkingDirectory():
"""
Get the default Working Directory.
@return: The default working directory, which is platform dependent:
- Windows: $HOME\My Documents
- MacOS and Linux: $HOME
If the default working directory doesn't exist, return ".".
@rtype: string
"""
wd = ""
if sys.platform == 'win32': # Windows
# e.g. "C:\Documents and Settings\Mark\My Documents"
wd = os.path.normpath(os.path.expanduser("~/My Documents"))
# Check <wd> since some Windows OSes (i.e. Win95) may not have "~\My Documents".
if not os.path.isdir(wd):
wd = os.path.normpath(os.path.expanduser("~"))
else: # Linux and MacOS
# e.g. "/usr/mark"
wd = os.path.normpath(os.path.expanduser("~"))
if os.path.isdir(wd):
return wd
else:
print "getDefaultWorkingDirectory(): default working directory [", \
wd , "] does not exist. Setting default working directory to [.]"
return "."
_default_workingDirectory = getDefaultWorkingDirectory()
# the actual table (for doc, see the code that interprets it, in preferences.py)
prefs_table = (
# entries are: (attribute name, prefs type-and-db-format code, prefs key, optional default value)
##e add categories or tags?
# General preferences [added to this table by mark 050919]
('display_compass', 'boolean', displayCompass_prefs_key, True),
('display_compass_labels', 'boolean', displayCompassLabels_prefs_key, True),
('display_position', 'int', compassPosition_prefs_key, LOWER_LEFT), # Mark 2007-0507.
('display_origin_axis', 'boolean', displayOriginAxis_prefs_key, True),
('', 'color', originAxisColor_prefs_key, lightblue),
('display_pov_axis', 'boolean', displayPOVAxis_prefs_key, False),
('', 'color', povAxisColor_prefs_key, darkgreen),
('', 'boolean', displayConfirmationCorner_prefs_key, True),
('', 'boolean', enableAntiAliasing_prefs_key, False),
('default_projection', 'int', defaultProjection_prefs_key, ORTHOGRAPHIC), # Changed to Ortho. Mark 051029.
('animate_high_quality', 'boolean', animateHighQualityGraphics_prefs_key, True), # Mark 060315. NIY.
('animate_std_views', 'boolean', animateStandardViews_prefs_key, True), # Mark 051110.
('animate_max_time', 'float', animateMaximumTime_prefs_key, 1.0), # 1 second. Mark 060124.
('working_directory', 'string', workingDirectory_prefs_key, _default_workingDirectory ), # Mark 060726.
('startup_display_style', 'int', startupGlobalDisplayStyle_prefs_key, diBALL), # Mark 060815 diTUBES; revised Ninad 080423 diBALL
('mouse_speed_during_rotation', 'float', mouseSpeedDuringRotation_prefs_key, 0.6), # Ninad 060906.
('display origin as small axis', 'boolean', displayOriginAsSmallAxis_prefs_key, True), #Ninad 060920
# Cursor text preferences (located on "Graphics Area" page).
('', 'int', cursorTextFontSize_prefs_key, 11),
('', 'color', cursorTextColor_prefs_key, black ),
#Paste offset scale factor preferences (see Ops_copy_Mixin._pasteGroup)
('paste offset scale for chunks', 'float',
pasteOffsetScaleFactorForChunks_prefs_key, 0.1),
('paste offset scale for dna objects' , 'float',
pasteOffsetScaleFactorForDnaObjects_prefs_key, 3.0),
# Color (page) preferences
('', 'int', backgroundGradient_prefs_key, bgBLUE_SKY),
('', 'color', backgroundColor_prefs_key, white),
('', 'string', hoverHighlightingColorStyle_prefs_key, HHS_HALO),
('', 'color', hoverHighlightingColor_prefs_key, yellow),
('', 'string', selectionColorStyle_prefs_key, SS_HALO),
('', 'color', selectionColor_prefs_key, darkgreen),
('', 'int', haloWidth_prefs_key, 5),
# Special colors. Mark 2008-07-10
# DarkBackgroundContrastColor_prefs_key is the default color used for
# lassos and other line drawing colors the first time NE1 is run.
# These are both recomputed each time the background color is changed.
('', 'color', DarkBackgroundContrastColor_prefs_key, gray),
('', 'color', LightBackgroundContrastColor_prefs_key, gray),
# stereo view settings added by piotr 080516
('stereo_view_mode', 'int', stereoViewMode_prefs_key, 1),
('stereo_view_separation', 'int', stereoViewSeparation_prefs_key, 50),
('stereo_view_angle', 'int', stereoViewAngle_prefs_key, 50),
# Fog setting. Mark 2008-05-21
('', 'boolean', fogEnabled_prefs_key, False),
#GLPane scale preferences . As of 2008-04-07, the GLPane_scale_* preferece
#can not be set by the user. Its just used internally.
#@see: GLPane_Minimial.__init__() and GLPane._adjust_GLPane_scale_if_needed()
#for more implementation details
('', 'float', startup_GLPane_scale_prefs_key, 10.0),
('', 'float', GLPane_scale_for_atom_commands_prefs_key, 10.0),
('', 'float', GLPane_scale_for_dna_commands_prefs_key, 50.0),
# Mouse wheel prefs. Mark 2008-04-07
('', 'int', mouseWheelDirection_prefs_key, 0),
('', 'int', zoomInAboutScreenCenter_prefs_key, 0),
('', 'int', zoomOutAboutScreenCenter_prefs_key, 1),
('', 'float', mouseWheelTimeoutInterval_prefs_key, 0.5),
# Pan settings prefs. Mark 2008-12-12
('', 'int', panArrowKeysDirection_prefs_key, 1),
# Ruler prefs. Mark 2008-02-12
# Ruler constants defined in Constants_Rulers.py.
('', 'boolean', displayRulers_prefs_key, True),
('', 'boolean', displayVertRuler_prefs_key, True),
('', 'boolean', displayHorzRuler_prefs_key, True),
('', 'string', rulerPosition_prefs_key, 0), # 0 = lower left
('', 'color', rulerColor_prefs_key, mustard),
('', 'float', rulerOpacity_prefs_key, 0.7),
('', 'boolean', showRulersInPerspectiveView_prefs_key, False),
#Ninad 20070509 Adjust,Minimize and Simulation(Dynamics)preferences for DNA
#reduced model(enable or disable elecrostatics)
('Electrostatics for Dna During Adjust','boolean',
electrostaticsForDnaDuringAdjust_prefs_key, False),
('Electrostatics For Dna During Minimize', 'boolean',
electrostaticsForDnaDuringMinimize_prefs_key, True),
('Electrostatics For Dna During Simulation', 'boolean',
electrostaticsForDnaDuringDynamics_prefs_key, True),
('', 'boolean', neighborSearchingInGromacs_prefs_key, True), # Eric M 20080515
# Minimize prefs (some are in General prefs pane, some are in dialogs)
# [mark 060627, revised & extended by bruce 060628, 060705 for A8]
# (none yet are specific to Adjust Atoms aka Local Minimize)
('', 'boolean', Adjust_watchRealtimeMinimization_prefs_key, True),
('', 'float', Adjust_endRMS_prefs_key, 100.0), # WARNING: this value may also be hardcoded in runSim.py
('', 'float', Adjust_endMax_prefs_key, -1.0), # -1.0 means blank lineedit widget, and actual value is computed from other prefs
('', 'float', Adjust_cutoverRMS_prefs_key, -1.0),
('', 'float', Adjust_cutoverMax_prefs_key, -1.0),
('', 'int', Adjust_minimizationEngine_prefs_key, MINIMIZE_ENGINE_ND1_FOREGROUND),
('', 'boolean', Minimize_watchRealtimeMinimization_prefs_key, True),
('', 'float', Minimize_endRMS_prefs_key, +1.0), # WARNING: this value may also be hardcoded in runSim.py
('', 'float', Minimize_endMax_prefs_key, -1.0),
('', 'float', Minimize_cutoverRMS_prefs_key, -1.0),
('', 'float', Minimize_cutoverMax_prefs_key, -1.0),
('', 'int', Minimize_minimizationEngine_prefs_key, MINIMIZE_ENGINE_ND1_FOREGROUND),
# preference for adding potential energy to trace file
('', 'boolean', Potential_energy_tracefile_prefs_key, False),
# Atom preferences - colors (other than element colors, handled separately)
('atom_highlight_color', 'color', atomHighlightColor_prefs_key, _default_HICOLOR_real_atom ),
('delete_atom_highlight_color', 'color', deleteAtomHighlightColor_prefs_key, _default_HICOLOR_delete_atom ),
('bondpoint_highlight_color', 'color', bondpointHighlightColor_prefs_key, _default_HICOLOR_bondpoint),
('bondpoint_hotspot_color', 'color', bondpointHotspotColor_prefs_key, ave_colors( 0.8, green, black) ), #bruce 050808
## ('openbond_highlight_color', 'color', xxx_prefs_key, HICOLOR_singlet ), ## pink [not yet in prefs db]
# Atom preferences - other
('', 'float', diBALL_AtomRadius_prefs_key, 1.0), #mark 051003 [about Ball and Stick]
('cpk_scale_factor', 'float', cpkScaleFactor_prefs_key, 0.775), #mark 060307 [about diTrueCPK, called CPK in UI as of now]
('level_of_detail', 'int', levelOfDetail_prefs_key, -1), # -1 = Variable . mark & bruce 060215.
# Preference to force to keep bonds while transmuting atoms
('keep_bonds_during_transmute', 'boolean', keepBondsDuringTransmute_prefs_key, False),
('', 'boolean', reshapeAtomsSelection_prefs_key, False), # --Mark 2008-04-06
('', 'boolean', indicateOverlappingAtoms_prefs_key, False),
# Bond preferences - colors
('bond_highlight_color', 'color', bondHighlightColor_prefs_key, _default_HICOLOR_real_bond),
('delete_bond_highlight_color', 'color', deleteBondHighlightColor_prefs_key, _default_HICOLOR_delete_bond),
('bond_stretch_color', 'color', bondStretchColor_prefs_key, _default_toolong_color),
## ('bond_stretch_highlight_color', 'color', xxx_prefs_key, _default_toolong_hicolor), ## [not yet in prefs db]
('pi_vane_color', 'color', bondVaneColor_prefs_key, _default_bondVaneColor),
('', 'color', diBALL_bondcolor_prefs_key, _default_bondColor),
# Bond preferences - other
#ninad 070430 Enable or disable display of bond stretch indicators --
('show_bond_stretch_indicators', 'boolean',
showBondStretchIndicators_prefs_key, True),
('pi_bond_style', ['multicyl','vane','ribbon'], pibondStyle_prefs_key, 'multicyl' ),
('pi_bond_letters', 'boolean', pibondLetters_prefs_key, False ),
('show_valence_errors', 'boolean', showValenceErrors_prefs_key, True ), #bruce 050806 made this up
('', 'int', linesDisplayModeThickness_prefs_key, 1),
('', 'float', diBALL_BondCylinderRadius_prefs_key, 1.0),
('', 'float', diDNACYLINDER_BondCylinderRadius_prefs_key, 1.0),
# DNA preferences
# All DNA default values need to be confirmed by Eric D and Paul R.
# Mark 2008-01-31.
('', 'float', adnaBasesPerTurn_prefs_key, 10.0),
('', 'float', adnaRise_prefs_key, 3.391),
('', 'float', bdnaBasesPerTurn_prefs_key, 10.0),
('', 'float', bdnaRise_prefs_key, 3.180),
('', 'float', zdnaBasesPerTurn_prefs_key, 10.0),
('', 'float', zdnaRise_prefs_key, 3.715),
('', 'color', dnaDefaultStrand1Color_prefs_key, darkred),
('', 'color', dnaDefaultStrand2Color_prefs_key, darkblue),
('', 'color', dnaDefaultSegmentColor_prefs_key, gray),
('', 'float', dnaStrutScaleFactor_prefs_key, 1.0),
('', 'int', dnaSearchTypeLabelChoice_prefs_key, 0),
# Strand arrowheads display option prefs.
('', 'boolean', arrowsOnBackBones_prefs_key, True),
('', 'boolean', arrowsOnThreePrimeEnds_prefs_key, True),
('', 'boolean', arrowsOnFivePrimeEnds_prefs_key, False),
#custom color for arrowheads -- default changed to False in v1.1.0
('', 'boolean', useCustomColorForThreePrimeArrowheads_prefs_key, False),
('', 'color', dnaStrandThreePrimeArrowheadsCustomColor_prefs_key, green),
('', 'boolean', useCustomColorForFivePrimeArrowheads_prefs_key, False),
('', 'color', dnaStrandFivePrimeArrowheadsCustomColor_prefs_key, green),
#Join strands command arrowhead display pref.(should it override global pref)
('', 'boolean', joinStrandsCommand_arrowsOnThreePrimeEnds_prefs_key, True),
('', 'boolean', joinStrandsCommand_arrowsOnFivePrimeEnds_prefs_key, True),
('', 'boolean', joinStrandsCommand_useCustomColorForThreePrimeArrowheads_prefs_key, True),
('', 'color', joinStrandsCommand_dnaStrandThreePrimeArrowheadsCustomColor_prefs_key, green),
('', 'boolean', joinStrandsCommand_useCustomColorForFivePrimeArrowheads_prefs_key, True),
('', 'color', joinStrandsCommand_dnaStrandFivePrimeArrowheadsCustomColor_prefs_key, green),
('', 'boolean', joinStrandsCommand_clickToJoinDnaStrands_prefs_key, False),
('', 'boolean', joinStrandsCommand_recursive_clickToJoinDnaStrands_prefs_key, True),
#Dna base number label prefs
('', 'int', dnaBaseNumberLabelChoice_prefs_key, 0),
('', 'int', dnaBaseNumberingOrder_prefs_key, 0),
('', 'color', dnaBaseNumberLabelColor_prefs_key, green),
#Urmi 20080617: Plane_PM display grid prefs
('','boolean',PlanePM_showGrid_prefs_key, False),
('','boolean',PlanePM_showGridLabels_prefs_key, False),
#Break strands command preferences
#=== arrowhead display pref.(should it override global pref)
('', 'boolean', breakStrandsCommand_arrowsOnThreePrimeEnds_prefs_key, True),
('', 'boolean', breakStrandsCommand_arrowsOnFivePrimeEnds_prefs_key, True),
('', 'boolean', breakStrandsCommand_useCustomColorForThreePrimeArrowheads_prefs_key, True),
('', 'color', breakStrandsCommand_dnaStrandThreePrimeArrowheadsCustomColor_prefs_key, green),
('', 'boolean', breakStrandsCommand_useCustomColorForFivePrimeArrowheads_prefs_key, True),
('', 'color', breakStrandsCommand_dnaStrandFivePrimeArrowheadsCustomColor_prefs_key, green),
('', 'int', breakStrandsCommand_numberOfBasesBeforeNextBreak_prefs_key, 5),
#DNA cursor text preferences
#Cursor text prefs while in InsertDna_EditCommand
('', 'boolean',
dnaDuplexEditCommand_showCursorTextCheckBox_prefs_key, True),
('', 'boolean',
dnaDuplexEditCommand_cursorTextCheckBox_numberOfBasePairs_prefs_key, True),
('', 'boolean',
dnaDuplexEditCommand_cursorTextCheckBox_numberOfTurns_prefs_key, True),
('', 'boolean',
dnaDuplexEditCommand_cursorTextCheckBox_length_prefs_key, True),
('', 'boolean',
dnaDuplexEditCommand_cursorTextCheckBox_angle_prefs_key, True),
#DnaSegment_EditCommand
('', 'boolean',
dnaSegmentEditCommand_showCursorTextCheckBox_prefs_key, True),
('', 'boolean',
dnaSegmentEditCommand_cursorTextCheckBox_numberOfBasePairs_prefs_key, True),
('', 'boolean',
dnaSegmentEditCommand_cursorTextCheckBox_length_prefs_key, True),
('', 'boolean',
dnaSegmentEditCommand_cursorTextCheckBox_changedBasePairs_prefs_key, True),
#DnaSegment_ResizeHandle preferences
('', 'float', dnaSegmentResizeHandle_discRadius_prefs_key, 12.5),
('', 'float', dnaSegmentResizeHandle_discThickness_prefs_key, 0.25),
#DnaStrand_EditCommand
('', 'boolean',
dnaStrandEditCommand_showCursorTextCheckBox_prefs_key, True),
('', 'boolean',
dnaStrandEditCommand_cursorTextCheckBox_numberOfBases_prefs_key, True),
('', 'boolean',
dnaStrandEditCommand_cursorTextCheckBox_changedBases_prefs_key, True),
#Make crossovers command
('', 'boolean',
makeCrossoversCommand_crossoverSearch_bet_given_segments_only_prefs_key,
True),
#Nanotube cursor text prefs
('', 'boolean',
insertNanotubeEditCommand_cursorTextCheckBox_angle_prefs_key, True),
('', 'boolean',
insertNanotubeEditCommand_cursorTextCheckBox_length_prefs_key, True),
('', 'boolean',
insertNanotubeEditCommand_showCursorTextCheckBox_prefs_key, True),
#EditNanotube_EditCommand cursor texts
('', 'boolean',
editNanotubeEditCommand_cursorTextCheckBox_length_prefs_key, True),
('', 'boolean',
editNanotubeEditCommand_showCursorTextCheckBox_prefs_key, True),
# DNA minor groove error indicator prefs.
('', 'boolean', dnaDisplayMinorGrooveErrorIndicators_prefs_key, True),
('', 'int', dnaMinMinorGrooveAngle_prefs_key, 60), # revised per Eric D [bruce 080326]
('', 'int', dnaMaxMinorGrooveAngle_prefs_key, 150), # ditto
('', 'color', dnaMinorGrooveErrorIndicatorColor_prefs_key, orange),
# Only used in "Break Strands" PM.
('', 'boolean', assignColorToBrokenDnaStrands_prefs_key, True),
# DNA style preferences 080310 piotr
# updated on 080408
('', 'int', dnaRendition_prefs_key, 0),
('', 'int', dnaStyleStrandsShape_prefs_key, 2),
('', 'int', dnaStyleStrandsColor_prefs_key, 0),
('', 'float', dnaStyleStrandsScale_prefs_key, 1.0),
('', 'int', dnaStyleStrandsArrows_prefs_key, 2),
('', 'int', dnaStyleAxisShape_prefs_key, 1),
('', 'int', dnaStyleAxisColor_prefs_key, 0),
('', 'float', dnaStyleAxisScale_prefs_key, 1.1),
('', 'int', dnaStyleAxisEndingStyle_prefs_key, 0),
('', 'int', dnaStyleStrutsShape_prefs_key, 1),
('', 'int', dnaStyleStrutsColor_prefs_key, 0),
('', 'float', dnaStyleStrutsScale_prefs_key, 1.0),
('', 'int', dnaStyleBasesShape_prefs_key, 0),
('', 'int', dnaStyleBasesColor_prefs_key, 3),
('', 'float', dnaStyleBasesScale_prefs_key, 1.7),
('', 'boolean', dnaStyleBasesDisplayLetters_prefs_key, False),
# protein style preferences
# piotr 080625
('', 'int', proteinStyle_prefs_key, 2), # CA Trace (ball and stick) # Mark 2008-12-20
('', 'boolean', proteinStyleSmooth_prefs_key, True),
('', 'int', proteinStyleQuality_prefs_key, 10),
('', 'int', proteinStyleScaling_prefs_key, 0),
('', 'float', proteinStyleScaleFactor_prefs_key, 1.0),
('', 'int', proteinStyleColors_prefs_key, 0),
('', 'int', proteinStyleAuxColors_prefs_key, 0),
('', 'color', proteinStyleCustomColor_prefs_key, gray),
('', 'color', proteinStyleAuxCustomColor_prefs_key, gray),
('', 'boolean', proteinStyleColorsDiscrete_prefs_key, True),
('', 'color', proteinStyleHelixColor_prefs_key, red),
('', 'color', proteinStyleStrandColor_prefs_key, blue),
('', 'color', proteinStyleCoilColor_prefs_key, gray),
# piotr 080718
('', 'string', proteinCustomDescriptors_prefs_key, "Nonpolar:PGAVILMFWYC:"),
# DNA angle and base indicators 080325 piotr
('', 'boolean', dnaStrandLabelsEnabled_prefs_key, False),
('', 'color', dnaStrandLabelsColor_prefs_key, _default_strandLabelsColor),
('', 'int', dnaStrandLabelsColorMode_prefs_key, 0),
('', 'boolean', dnaBaseIndicatorsEnabled_prefs_key, False),
('', 'boolean', dnaBaseInvIndicatorsEnabled_prefs_key, False),
('', 'color', dnaBaseIndicatorsColor_prefs_key, _default_baseIndicatorsColor),
('', 'color', dnaBaseInvIndicatorsColor_prefs_key, _default_baseInvIndicatorsColor),
('', 'float', dnaBaseIndicatorsAngle_prefs_key, 30.0),
('', 'int', dnaBaseIndicatorsDistance_prefs_key, 0),
('', 'int', dnaBaseIndicatorsPlaneNormal_prefs_key, 0),
# Modes preferences [added to this table by mark 050910]
#bruce 080709 commented these out, not used since A9:
## ('startup_mode', 'string', startupMode_prefs_key, '$DEFAULT_MODE' ),
## ('default_mode', 'string', defaultMode_prefs_key, 'DEPOSIT' ), # as suggested by Eric. Mark 051028.
## #ninad070430: made select chunks mode the only startup and default option
## #for A9 based on discussion
## ('default_mode', 'string', defaultMode_prefs_key, 'SELECTMOLS' ),
('buildmode_autobond', 'boolean', buildModeAutobondEnabled_prefs_key, True ), # mark 060203.
('buildmode_water', 'boolean', buildModeWaterEnabled_prefs_key, False ), # mark 060218.
('buildmode_highlighting', 'boolean', buildModeHighlightingEnabled_prefs_key, True ), # mark 060203.
('buildmode_selectatomsdepositobj', 'boolean', buildModeSelectAtomsOfDepositedObjEnabled_prefs_key, False ), # mark 060310.
# Lighting preferences [added to this table by mark 051124]
# If any default light colors are changed here, you must also change the color of
# the light in '_lights' in GLPane to keep them synchronized. Mark 051204.
('light1_color', 'color', light1Color_prefs_key, white ),
('light2_color', 'color', light2Color_prefs_key, white ),
('light3_color', 'color', light3Color_prefs_key, white ),
# Material specular properties.
('ms_highlights', 'boolean', material_specular_highlights_prefs_key, True),
('ms_finish', 'float', material_specular_finish_prefs_key, 0.5),
('ms_shininess', 'float', material_specular_shininess_prefs_key, 35.0),
('ms_brightness', 'float', material_specular_brightness_prefs_key, 1.0), #bruce 051203 bugfix: default value should be 1.0
# File management / filename / URL preferences [added by bruce 051130; category is a guess, doesn't have prefs UI page yet]
('', 'string', wiki_help_prefix_prefs_key, "http://www.nanoengineer-1.net/mediawiki/index.php?title=" ),
# Plug-ins preferences [added to this table by mark 050919]
('qutemol_exe_path', 'string', qutemol_path_prefs_key, "" ),
('qutemol_enabled', 'boolean', qutemol_enabled_prefs_key, False ),
('nanohive_exe_path', 'string', nanohive_path_prefs_key, "" ),
('nanohive_enabled', 'boolean', nanohive_enabled_prefs_key, False ),
('povray_exe_path', 'string', povray_path_prefs_key, "" ),
('povray_enabled', 'boolean', povray_enabled_prefs_key, False ),
('megapov_exe_path', 'string', megapov_path_prefs_key, "" ),
('megapov_enabled', 'boolean', megapov_enabled_prefs_key, False ),
('povdir_path', 'string', povdir_path_prefs_key, "" ), #bruce 060710
('povdir_enabled', 'boolean', povdir_enabled_prefs_key, False ), #bruce 060710
('gamess_exe_path', 'string', gmspath_prefs_key, "" ),
('gamess_enabled', 'boolean', gamess_enabled_prefs_key, False ),
('gromacs_exe_path', 'string', gromacs_path_prefs_key, "" ),
('gromacs_enabled', 'boolean', gromacs_enabled_prefs_key, True ),
#Urmi 20080709: since this is not in the pref dialog as yet, we'll hard code
# for testing purposes
('rosetta_exe_path', 'string', rosetta_path_prefs_key, "" ),
('rosetta_enabled', 'boolean', rosetta_enabled_prefs_key, False ),
('rosetta_database_dir', 'string', rosetta_dbdir_prefs_key, ""),
('rosetta_database_enabled', 'boolean', rosetta_database_enabled_prefs_key, False ),
('rosetta_backrub_enabled', 'boolean', rosetta_backrub_enabled_prefs_key, False ),
('cpp_exe_path', 'string', cpp_path_prefs_key, "" ),
('cpp_enabled', 'boolean', cpp_enabled_prefs_key, True ),
('nv1_exe_path', 'string', nv1_path_prefs_key, "" ),
('nv1_enabled', 'boolean', nv1_enabled_prefs_key, False ),
# Undo and History preferences [added to this table by bruce 050810]
('', 'boolean', undoRestoreView_prefs_key, False), # mark 060314
('', 'boolean', undoAutomaticCheckpoints_prefs_key, True), # mark 060314
('', 'int', undoStackMemoryLimit_prefs_key, 100), # mark 060327
('', 'boolean', historyMsgSerialNumber_prefs_key, True),
('', 'boolean', historyMsgTimestamp_prefs_key, True),
('history_height', 'int', historyHeight_prefs_key, 4), # ninad 060904
# Window preferences [added to this table by bruce 050810]
('', 'boolean', rememberWinPosSize_prefs_key, False), # mark 060315. NIY.
('', 'string', captionPrefix_prefs_key, "" ),
('', 'string', captionSuffix_prefs_key, "*" ),
('', 'boolean', captionFullPath_prefs_key, False ),
('', 'boolean', useSelectedFont_prefs_key, False ),
('', 'string', displayFont_prefs_key, "defaultFont"),
('', 'int', displayFontPointSize_prefs_key, -1), # will be reset by the actual default font size.
("", 'color', mtColor_prefs_key, white ), # Model Tree bg color. Mark 2007-06-04
#('', 'string', colorTheme_prefs_key, "defaultColorTheme"), # Gray for A9. Mark 2007-05-27.
#Following saves the toolbar and dockwidget positions between NE1 sessions
('toolbar_state', 'string' , toolbar_state_prefs_key, 'defaultToolbarState'),
('', 'boolean', displayReportsWidget_prefs_key, True),
# ...
('', 'boolean', sponsor_download_permission_prefs_key, False ),
('', 'boolean', sponsor_permanent_permission_prefs_key, False ),
('', 'boolean', sponsor_md5_mismatch_flag_key, True ),
# Dynamic Tooltip preferences [added to this table by ninad 060818]
('wake_up_delay', 'float', dynamicToolTipWakeUpDelay_prefs_key, 1.0), # 1 second. Mark 060817.
('atom_distance_precision', 'int', dynamicToolTipAtomDistancePrecision_prefs_key, 3), # number of decimal places ninad 060821
('bend_angle_precision', 'int', dynamicToolTipBendAnglePrecision_prefs_key, 3), # number of decimal places
('atom_chunk_info', 'boolean', dynamicToolTipAtomChunkInfo_prefs_key, False), # checkbox for displaying chunk name an atom belongs to
('bond_chunk_info', 'boolean', dynamicToolTipBondChunkInfo_prefs_key, False), # checkbox -chunk name(s) of the two atoms in the bond
('atom_position', 'boolean', dynamicToolTipAtomPosition_prefs_key, False), #checkbox for displaying xyz pos
('atom_distance_deltas', 'boolean', dynamicToolTipAtomDistanceDeltas_prefs_key, False), # check box to display xyz deltas
('bond_length', 'boolean', dynamicToolTipBondLength_prefs_key, False), # displays the bond length (precision determined by atom distance precision) @@@ ninad060823: It only returns the nuclear distance between the bonded atoms doesn't return covalent bond length.
('atom_mass', 'boolean', dynamicToolTipAtomMass_prefs_key, False), #displays mass of the atom ninad060829
#This preference adds VDW radii of the two atoms to the distance
#in the 'distance between atoms' information given by the dynamic tooltip.
('vdw_radii_in_atom_distance', 'boolean',
dynamicToolTipVdwRadiiInAtomDistance_prefs_key,
False),
#=== Start of NIYs ninad060822===#
('torsion_angle_precision', 'int', dynamicToolTipTorsionAnglePrecision_prefs_key, 3), # number of decimal places
#===End of NIYs ninad060822 ===#
)
# end
| NanoCAD-master | cad/src/utilities/prefs_constants.py |
# Copyright 2005-2009 Nanorex, Inc. See LICENSE file for details.
"""
utilities/Comparison.py - provides same_vals, for correct equality comparison.
See also state_utils.py, which contains the closely related copy_val.
@author: Bruce
@version: $Id$
@copyright: 2005-2009 Nanorex, Inc. See LICENSE file for details.
History:
same_vals was written as part of state_utils.py [bruce]
moved same_vals into utilities/Comparison.py to break an import cycle
[ericm 071005]
moved SAMEVALS_SPEEDUP and "import samevals" along with it
(but left the associated files in cad/src, namely samevals.c [by wware],
setup2.py [now in outtakes], and part of Makefile) [bruce 071005]
"""
from types import InstanceType
_haveNumeric = True # might be modified below
try:
from Numeric import array, PyObject
except:
# this gets warned about in state_utils
_haveNumeric = False
_haveNumpy = True # might be modified below
try:
import numpy
numpy.ndarray # make sure this exists
except:
print "fyi: python same_vals can't import numpy.ndarray, won't handle it" ###
_haveNumpy = False
import foundation.env as env
_debug_same_vals = False #bruce 060419; relates to bug 1869
SAMEVALS_SPEEDUP = True
# If true, try to use the C extension version in samevals.c
# [which is not yet fully correct, IIRC -- bruce 071005 comment];
# will be set to False if "import samevals" fails below.
# Note: samevals.c [by wware] is still built and resides in cad/src,
# not cad/src/utilities, as of 071005, but that should not
# prevent import samevals from working here. If samevals.c is moved
# here into utilities/, then setup2.py [now in outtakes, nevermind]
# and part of Makefile need to be moved along with it. [bruce 071005 comment]
#bruce 080403 update: samevals.c has been replaced by samevals.pyx
# and samevalshelp.c, built by Makefile, all still at toplevel.
if SAMEVALS_SPEEDUP:
try:
# If we're using the samevals extension, we need to tell the
# extension what a Numeric array looks like, since the symbol
# PyArray_Type was not available at link time when we built
# the extension. [wware]
from samevals import setArrayType
import Numeric
setArrayType(type(Numeric.array((1,2,3))))
print "SAMEVALS_SPEEDUP is True, and import samevals succeeded"
except ImportError:
# Note: this error could be from importing samevals
# (an optional dll built from samevals.c) or Numeric.
# If the latter, it was avoidable using _haveNumeric,
# but I don't know whether samevals.c permits use when
# setArrayType was never called, so I'll let this code
# continue to disable SAMEVALS_SPEEDUP in either case.
# [bruce 071005]
print "samevals.so/dll or Numeric not available, not using SAMEVALS_SPEEDUP"
SAMEVALS_SPEEDUP = False
# ==
def same_vals(v1, v2): #060303
"""
(Note: there is a C version of this method which is normally used by NE1.
It has the same name as this method and overwrites this one
due to an assignment near the end of this method's source file.
This method is the reference version, coded in Python.
This version is used by some developers who don't build the C version
for some reason.)
Efficiently scan v1 and v2 in parallel to determine whether they're the
same, for purposes of undoable state or saved state.
Note: the only reason we really need this method (as opposed to just
using Python '==' or '!=' and our own __eq__ methods)
is because Numeric.array.__eq__ is erroneously defined, and if we were
using '==' or '!=' on a Python tuple containing a Numeric array,
we'd have no way of preventing this issue from making '==' or '!='
come out wrong on the tuple.
(For details, see bruce email to 'all' of 060302, partially included below.)
It turns out that you can safely naively use != on Numeric arrays,
but not ==, since they both act elementwise, and this only
does what you usually want with != . I knew this in the past
(and fixed some weird bugs caused by it) but forgot it recently,
so Undo was thinking that atom position arrays had not changed
provided at least one coordinate of one atom had not changed.
[But note that you can't use either '==' or '!=' on tuples that might
contain Numeric arrays, since either way, Python uses '==' on the
tuple elements.]
In particular:
a = Numeric.array((1, 2, 3))
b = Numeric.array((1, 2, 3))
assert a == b # result: [1 1 1], interpreted as True
assert not a != b # result: [0 0 0], interpreted as False
b = Numeric.array((1, 4, 5))
assert a != b # result: [1 0 0], interpreted as True
assert not a == b # result: [0 1 1], interpreted as True
# the last assertion fails!
Do the maintainers of Numeric consider this to be correct
behavior?!?!?!? Probably.
What they should have done was define a new ufunc for equality
testing, and made the semantics of __eq__ and __ne__ work as
expected. Probably too late to expect them to change this now.
As long as we have it, we might as well make it a bit more stringent
than Python '==' in other ways too, like not imitating the behaviors
(which are good for '==') of 1.0 == 1, array([1]) == [1], etc.
The only reason we'll count non-identical objects as equal is that
we're not interested in their addresses or in whether someone
will change one of them and not the other (for whole objects or for
their parts).
###doc for InstanceType... note that we get what we want by using
__eq__ for the most part...
"""
if v1 is v2:
# Optimization:
# this will happen in practice when whole undoable attrvals are
# immutable (so that we're comparing originals, not different copies),
# therefore it's probably common enough to optimize for.
# It's just as well we're not doing it in the recursive helper,
# since it would probably slow us down when done at every level.
# [060303 11pm]
return True
try:
_same_vals_helper(v1, v2)
except _NotTheSame:
if _debug_same_vals and not (v1 != v2):
print "debug_same_vals: " \
"same_vals says False but 'not !=' says True, for", v1, v2
# happens for bug 1869 (even though it's fixed;
# cause is understood)
return False
if _debug_same_vals and (v1 != v2):
print "debug_same_vals: " \
"same_vals says True but '!=' also says True, for", v1, v2
##@@ remove when pattern seen
return True
class _NotTheSame(Exception):
pass
def _same_list_helper(v1, v2):
n = len(v1)
if n != len(v2):
raise _NotTheSame
for i in xrange(n):
_same_vals_helper(v1[i], v2[i])
return
_same_tuple_helper = _same_list_helper
def _same_dict_helper(v1, v2):
if len(v1) != len(v2):
raise _NotTheSame
for key, val1 in v1.iteritems():
if not v2.has_key(key):
raise _NotTheSame
_same_vals_helper(val1, v2[key])
# if we get this far, no need to check for extra keys in v2,
# since lengths were the same
return
# implem/design discussion
# [very old; slightly clarified, bruce 090205; see new summary below]:
#
# Choice 1:
# no need for _same_InstanceType_helper; we set up all (old-style) classes
# so that their __eq__ method is good enough; this only works if we assume
# that any container-like instances (which compare their parts) are ones we
# wrote, so they don't use == on Numeric arrays, and don't use == or != on
# general values.
#
# Choice 2:
# on naive objects, we just require id(v1) == id(v2).
# Downside: legitimate data-like classes by others, with proper __eq__
# methods, will compare different when they should be same.
# Upside: if those classes have Numeric parts and compare them with ==,
# that's a bug, which we'll avoid.
# Note that if it's only our own classes which run, and if they have no bugs,
# then it makes no difference which choice we use.
#
### UNDECIDED. For now, doing nothing is equivalent to Choice 1.
# but note that choice 2 is probably safer.
# in fact, if I do that, i'd no longer need _eq_id_mixin just due to StateMixin.
# (only when __getattr__ and someone calls '==') [060303]
#
# Update 060306: some objects will need _s_same_as(self, other) different from
# __eq__, since __eq__ *might* want to compare some components with !=
# (like int and float) rather than be as strict as same_vals.
# Even __eq__ needs to try to avoid the "Numeric array in list" bug,
# which in some cases will force it to also call same_vals,
# but when types are known it's plausible that it won't have to,
# so the distinct methods might be needed.
# When we first need _s_same_as, that will force use of a new
# _same_InstanceType_helper func. Do we need it before then? Not sure.
# Maybe not; need to define __eq__ better in GAMESS Jig (bug 1616) but
# _s_same_as can probably be the same method. OTOH should we let DataMixin
# be the thing that makes _s_same_as default to __eq__?? ###
######@@@@@@
#
# update, bruce 060419, after thinking about bug 1869
# (complaint about different bonds with same key):
# - The Bond object needs to use id for sameness, in Undo diffs at least
# (only caller of same_vals?) (but can't use id for __eq__ yet).
# - Q: What is it about Bond that decides that -- Bond? StateMixin? not DataMixin?
# A: The fact that scan_children treats it as a "child object",
# not as a data object (see obj_is_data method).
# That's what makes Undo change attrs in it, which only makes sense if
# Undo treats refs to it (in values of other attrs, which it's diffing)
# as the same iff their id is same.
# Conclusion: we need to use the same criterion in same_vals, via a new
# _same_InstanceType_helper -- *not* (only) a new method
# [later: I guess I meant the '_s_same_as' method -- nim, discussed only here]
# as suggested above and in a comment I added to bug 1869 report.
# For now, we don't need the new method at all.
#
# ==
#
# Update, bruce 090205: reviewing the above, there are only the following cases
# where same_vals needs to disagree with '==', given that we are free to define
# proper __eq__/__ne__ methods in our own code:
#
# * Numeric arrays (due to the design flaw in their __eq__ method semantics)
#
# * Python data objects which might contain Numeric arrays
# (i.e. list, dict, tuple -- also 'set', if we start using that in model
# state -- we don't do so yet, since it's not supported in our minimum
# supported Python version)
#
# * supporting same_vals(1, 1.0) == False, for conservatism in Undo
# (but note that most __eq__ methods don't bother to worry about that
# when comparing components in data-like instances; often this is justified,
# either since they treat those values equivalently or only store them with
# one type, so it's reasonable to permit it even though it could in theory
# lead to bugs)
#
# * any future similar cases of same_vals being more conservative than __eq__,
# especially if they apply within instances, motivating us to define a new
# "data API" method called '_s_same_as' (not needed for now); OTOH, any new
# cases of that should be deprecated, as far as I know
#
# * for Bond, as long as it has an __eq__ method more liberal than id comparison
# (used by same_vals), since as a State holder, id comparison is correct in
# principle. It only needs the looser __eq__ due to old code in a bad style,
# but it's hard to know whether that code is entirely gone (and I think it's
# not and is hard to finally remove it).
#
# How does this affect the issue of changing Node to a new-style class?
# If we do this with no code changes, Nodes lose the services of
# _same_InstanceType_helper, but the above suggests they might never have
# needed it anyway -- of the above issues, only the ones concerned with
# '_s_same_as' and 'Bond' apply to instances of old or new classes.
#
# Conclusion: we can ignore extending _same_InstanceType_helper to new-style
# Nodes -- in fact, we could dispense with it entirely in current code except
# for Bond. (See also the 090205 comments in the docstring of state_utils.py.)
#
# (FYI: If we were to write a completely new framework, I think we'd use our
# own classes rather than Numeric, with proper semantics for ==/!=,
# and then dispense with same_vals entirely, relying on '==' even for Undo.)
def _same_InstanceType_helper(obj1, obj2):
#bruce 060419, relates to bug 1869; see detailed comment above
if obj1 is obj2:
return # not just an optim -- remaining code assumes obj1 is not obj2
# We might like to ask classify_instance(obj1).obj_is_data,
# but we have no canonical object-classifier to ask,
# so for A7 (no time to clean this up) we'll just duplicate its code instead
# (and optimize it too).
class1 = obj1.__class__
###k don't check copiers_for_InstanceType_class_names.has_key(class1.__name__),
# since that's always False for now.
obj_is_data = hasattr(class1, '_s_isPureData')
if obj_is_data:
if obj1 != obj2: # rely on our implem of __eq__
raise _NotTheSame
else:
return
else:
# otherwise the 'is' test above caught sameness
raise _NotTheSame
pass
def _same_Numeric_array_helper(obj1, obj2):
if obj1.typecode() != obj2.typecode():
raise _NotTheSame
if obj1.shape != obj2.shape:
raise _NotTheSame
if obj1.typecode() == PyObject:
if env.debug():
print "atom_debug: ran _same_Numeric_array_helper, PyObject case"
# remove when works once ###@@@
# assume not multi-dimensional (if we are, this should work [untested]
# but it will be inefficient)
for i in xrange(len(obj1)):
# two PyObjects (if obj1 is 1-dim) or two lower-dim Numeric arrays
_same_vals_helper(obj1[i], obj2[i])
else:
if obj1 != obj2:
# take pointwise !=, then boolean value of that (correct, but is
# there a more efficient Numeric function?)
# note: using '==' here (and negating boolean value of result)
# would NOT be correct
raise _NotTheSame
return
def _same_numpy_ndarray_helper(obj1, obj2): #bruce 081202
"""
Given two objects of type numpy.ndarray,
raise _NotTheSame if they are not equal.
"""
# For documentation, see http://www.scipy.org/Tentative_NumPy_Tutorial .
# Note that we only need this function because:
# - for some developers, some PyOpenGL functions can return objects of this
# type (e.g. glGetDoublev( GL_MODELVIEW_MATRIX));
# - numpy has the same design flaw in ==/!= that Numeric has.
# CAVEATS:
# - this implementation might be wrong if obj1.data (a python buffer)
# can contain padding, or if element types can be python object pointers,
# or if my guesses from the incomplete documentation I found (on ndarray
# and on buffer) are wrong.
### TODO:
# - support this in the C version of same_vals
# - support it in copy_val
# - not sure if it needs support elsewhere in state_utils.py
if obj1.shape != obj2.shape:
raise _NotTheSame
if obj1.dtype != obj2.dtype:
raise _NotTheSame
# compare the data
# note: type(obj1.data) is <type 'buffer'>;
# python documentation only hints that this can be compared using == or !=;
# doing so seems to work by tests, e.g. buffer("abc") != buffer("def") => True,
# and I verified that the following is capable of finding same or different
# and the printed obj1, obj2 when it did this look correct. [bruce 081202]
if obj1.data != obj2.data:
raise _NotTheSame
return
_known_type_same_helpers = {}
_known_type_same_helpers[type([])] = _same_list_helper
_known_type_same_helpers[type({})] = _same_dict_helper
_known_type_same_helpers[type(())] = _same_tuple_helper
_known_type_same_helpers[ InstanceType ] = _same_InstanceType_helper
# note: see long comment below, which concludes "we can ignore
# extending _same_InstanceType_helper to new-style Nodes"
# (re changing class Node to be a new-style class), since we can
# rely on our overrides of __eq__/__ne__. [bruce 090205 comment]
if _haveNumeric:
# note: related code exists in state_utils.py.
numeric_array_type = type(array(range(2)))
# __name__ is 'array', but Numeric.array itself is a built-in function,
# not a type
assert numeric_array_type != InstanceType
_known_type_same_helpers[ numeric_array_type ] = _same_Numeric_array_helper
del numeric_array_type
if _haveNumpy:
numpy_ndarray_type = numpy.ndarray
assert numpy_ndarray_type != InstanceType
_known_type_same_helpers[ numpy_ndarray_type ] = _same_numpy_ndarray_helper
del numpy_ndarray_type
def _same_vals_helper(v1, v2): #060303
"""
[private recursive helper for same_vals]
raise _NotTheSame if v1 is not the same as v2
(i.e. if their type or structure differs,
or if any corresponding parts are not the same)
"""
typ = type(v1)
# note: if v1 is an instance of an old-style class,
# type(v1) is InstanceType;
# if v1 is an instance of a new-style class,
# type(v1) is the class itself.
# Either way, v1.__class__ is the class itself.
if typ is not type(v2):
raise _NotTheSame
same_helper = _known_type_same_helpers.get(typ) # a fixed public dictionary
# note: this has an entry for InstanceType (necessary only for Bond)
# but not for new-style classes. This is ok (as long as Bond remains an
# old-style class); see comments dated 090205.
if same_helper is not None:
# we optim by not storing any scanner for atomic types, or a few others
same_helper(v1, v2) # might raise _NotTheSame
else:
# general case -- depend on __eq__/__ne__. The things this is wrong for
# are listed in comments dated 090205, and are all covered by
# entries in _known_type_same_helpers.
if v1 != v2:
raise _NotTheSame
return
# old comment, still true but might be redundant:
# (If not for Numeric arrays of type PyObject, we could safely use !=
# in the above code on a pair of Numeric arrays --
# just not on things that might contain them, in case their type's !=
# method used == on the Numeric arrays,
# whose boolean value doesn't correctly say whether they're equal
# (instead it says whether one or more
# corresponding elements are equal).
# Another difference is that 1 == 1.0, but we'll say those are not the
# same; but that aspect of our specification doesn't matter much.)
# ==
if SAMEVALS_SPEEDUP:
# Replace definition above with the extension's version.
# (This is done for same_vals here in utilities/Comparison.py,
# and for copy_val in state_utils.py, which sets COPYVAL_SPEEDUP
# if it works, since it might fail even if SAMEVALS_SPEEDUP works.)
from samevals import same_vals
# this overwrites the public global which other modules import
# note: there is no point in saving the python version before this
# assignment (e.g. for testing), since it uses this global for its
# recursion, so after this import it would be recursing into the
# C version instead of into itself. Fixing this would require
# modifying the global before each test -- not presently worth
# the trouble. [bruce 080922 comment]
pass
# end
| NanoCAD-master | cad/src/utilities/Comparison.py |
# Copyright 2006-2007 Nanorex, Inc. See LICENSE file for details.
"""
qt4transition.py - Useful markers and messages for Qt 4 transition work.
@author: Will
@version: $Id$
@copyright: 2006-2007 Nanorex, Inc. See LICENSE file for details.
"""
import sys
import traceback
import types
import utilities.debug_prefs as debug_prefs
import utilities.debug as debug
from utilities.objectBrowse import objectBrowse
__already = { }
def __linenum(always = False):
try:
raise Exception
except:
tb = sys.exc_info()[2]
f = tb.tb_frame
f = f.f_back
f = f.f_back
key = (f.f_code.co_filename, f.f_lineno)
got_key = True
if not __already.has_key(key):
__already[key] = 1
got_key = False
if got_key and not always:
return False
else:
print f.f_code.co_filename, f.f_code.co_name, f.f_lineno
return True
def qt4here(msg = None, show_traceback = False):
if show_traceback:
traceback.print_stack(None, None, sys.stdout)
if msg is not None:
print 'Qt 4 HERE: ' + msg
print
else:
__linenum(always = True)
if msg is not None:
print 'Qt 4 HERE: ' + msg
def qt4overhaul(msg):
if __linenum():
print 'Qt 4 MAJOR CONCEPTUAL OVERHAUL: ' + msg
def qt4message(msg, always = False):
if debug_prefs.debug_pref("Enable QT4 TODO messages",
debug_prefs.Choice_boolean_False,
prefs_key = True):
if __linenum(always):
print 'Qt 4 MESSAGE: ' + msg
def qt4todo(msg):
if debug_prefs.debug_pref("Enable QT4 TODO messages",
debug_prefs.Choice_boolean_False,
prefs_key = True):
if __linenum():
print 'Qt 4 TODO: ' + msg
else:
return
def multipane_todo(msg):
if __linenum():
print 'Multipane TODO: ' + msg
def qt4warning(msg):
if debug_prefs.debug_pref("Enable QT4 WARNING messages",
debug_prefs.Choice_boolean_False,
prefs_key = True):
if __linenum():
print 'Qt 4 WARNING: ' + msg
def qt4skipit(msg):
"""
Indicates something I don't think we need for Qt 4
"""
if __linenum():
print 'Qt 4 SKIP IT: ' + msg
__nomsg = '128931789ksadjfqwhrhlv128947890127408'
def qt4die(msg = __nomsg, browse = False):
traceback.print_stack(file = sys.stdout)
if msg == __nomsg:
print 'Qt 4 DIE'
elif browse:
print 'Qt 4 DIE:', msg
objectBrowse(msg, maxdepth = 1)
else:
if type(msg) is not types.StringType:
msg = repr(msg)
print 'Qt 4 DIE: ' + msg
sys.exit(0)
def qt4exception(msg):
"""
Indicates something I don't think we definitely shouldn't have for Qt 4
"""
raise Exception('Qt 4: ' + msg)
def qt4info(msg, name = None, maxdepth = 1, browse = False):
__linenum(always = True)
if type(msg) is type('abc'):
print 'Qt 4 INFO:', repr(msg)
else:
print 'Qt 4 INFO:',
if name is not None: print name,
print repr(msg)
if browse:
objectBrowse(msg, maxdepth = maxdepth, outf = sys.stdout)
def qt4warnDestruction(obj, name = ''):
message = '* * * * '
try:
raise Exception
except:
f = sys.exc_info()[2].tb_frame
f = f.f_back
message += f.f_code.co_filename + (':%d' % f.f_lineno)
if name:
message += ' ' + name
if debug_prefs.debug_pref("Enable QT4 WARNING messages",
debug_prefs.Choice_boolean_False,
prefs_key = True):
print 'Setting up destruction warning', message
def destruction(ignore, message = message):
print 'OBJECT DESTROYED (exiting)', message #bruce 070521 revised message
sys.exit(1)
from PyQt4.Qt import QObject, SIGNAL
QObject.connect(obj, SIGNAL("destroyed(QObject *)"), destruction)
def findDefiningClass(cls_or_method, method_name = None):
"""
Find which base class defines this method
>>> print findDefiningClass(DeepClass.foo)
__main__.BaseClass
>>> print findDefiningClass(ShallowClass.foo)
__main__.Base2Class
>>> print findDefiningClass(ShallowClass, 'foo')
__main__.Base2Class
>>> x = DeepClass()
>>> print findDefiningClass(x.foo)
__main__.BaseClass
"""
if method_name is not None:
if type(cls_or_method) is not types.ClassType:
cls_or_method = cls_or_method.__class__
method = getattr(cls_or_method, method_name)
elif type(cls_or_method) is types.MethodType:
method = getattr(cls_or_method.im_class, cls_or_method.im_func.func_name)
else:
method = cls_or_method
assert method.im_self is None, "Method must be a class method, not an instance mthod"
def hunt(klass, lst, depth, name = method.im_func.func_name, method = method):
if hasattr(klass, name) and method.im_func is getattr(klass, name).im_func:
lst.append((depth, klass))
for base in klass.__bases__:
hunt(base, lst, depth + 1)
lst = [ ]
hunt(method.im_class, lst, 0)
lst.sort()
return lst[-1][1]
def lineage(widget, die = True, depth = 0):
"""
Trace the parental lineage of a Qt 4 widget: parent,
grandparent... This is helpful in diagnosing "RuntimeError:
underlying C/C++ object has been deleted" errors. It is frequently
wise to kill the program at the first such deletion, so that is the
default behavior (switchable with die = False).
"""
if widget is not None:
from PyQt4.Qt import QObject, SIGNAL
print (depth * ' ') + repr(widget)
def destruction(ignore, die = die, message = repr(widget) + " was just destroyed"):
qt4here(message, show_traceback = True)
if die:
sys.exit(1)
QObject.connect(widget, SIGNAL("destroyed(QObject *)"), destruction)
lineage(widget.parent(), die, depth + 1)
# ==
if __name__ == '__main__':
# classes used to test findDefiningClass
class BaseClass:
def foo(self):
print 'bar'
class Base2Class:
def foo(self):
print 'BAR'
class MiddleClass(BaseClass):
pass
class DeepClass(MiddleClass, Base2Class):
pass
class ShallowClass(Base2Class, MiddleClass):
pass
import doctest
doctest.testmod()
# end
| NanoCAD-master | cad/src/utilities/qt4transition.py |
# Copyright 2006-2007 Nanorex, Inc. See LICENSE file for details.
"""
objectBrowse.py
@author: Will
@version: $Id$
@copyright: 2006-2007 Nanorex, Inc. See LICENSE file for details.
History:
Will wrote this and used it for debugging.
Bruce 071107 split it out of debug.py.
(It has an undefined reference, but presumably worked,
and is referenced by qt4transition.qt4die.
Therefore I didn't move it into outtakes or scratch.)
"""
import sys, types
### BUG: undefined variable Finder
# ==
def standardExclude(attr, obj):
# EricM commented out these two imports in Will's code,
# since they caused trouble for his import analysis
# and we couldn't tell what they were for
# (though I'm guessing from the comment below that the intent was
# to get these imports over with before turning on some sort of debug
# output that occurred during subsequent imports).
# [bruce 071107 comment]
##from MWsemantics import MWsemantics
##from GLPane import GLPane
# I am rarely interested in peeking inside these, and they create
# tons of output.
return False
class ObjectDescender:
def __init__(self, maxdepth, outf = sys.stdout):
self.already = [ ]
self.maxdepth = maxdepth
self.outf = outf
def exclude(self, attr, obj):
return False
def showThis(self, attr, obj):
return True
def prefix(self, depth, pn):
return ((depth * "\t") + ".".join(pn) + ": ")
def handleLeaf(self, v, depth, pn):
def trepr(v):
if v == None:
return "None"
elif type(v) == types.InstanceType:
def classWithBases(cls):
r = cls.__name__
for b in cls.__bases__:
r += ":" + classWithBases(b)
return r
# r = v.__class__.__name__
r = "<" + classWithBases(v.__class__) + ">"
else:
r = repr(type(v))
return "%s at %x" % (r, id(v))
if type(v) in (types.ListType, types.TupleType):
self.outf.write(self.prefix(depth, pn) + trepr(v))
if len(v) == 0:
self.outf.write(" (empty)")
self.outf.write("\n")
elif type(v) in (types.StringType, types.IntType,
types.FloatType, types.ComplexType):
self.outf.write(self.prefix(depth, pn) + repr(v) + "\n")
else:
self.outf.write(self.prefix(depth, pn) + trepr(v) + "\n")
def getAttributes(self, obj):
lst = dir(obj)
if hasattr(obj, "__dict__"):
for x in obj.__dict__.keys():
if x not in lst:
lst.append(x)
def no_double_underscore(x):
return not x.startswith('__')
lst = filter(no_double_underscore, lst)
lst.sort()
return lst
def descend(self, obj, depth = 0, pathname=[ ], excludeClassVars = False):
if obj in self.already:
return
self.already.append(obj)
if depth == 0:
self.handleLeaf(obj, depth, pathname)
if depth >= self.maxdepth:
return
if type(obj) in (types.ListType, types.TupleType):
lst = [ ]
if len(pathname) > 0:
lastitem = pathname[-1]
pathname = pathname[:-1]
else:
lastitem = ""
for i in range(len(obj)):
x = obj[i]
if not self.exclude(i, x):
y = pathname + [ lastitem + ("[%d]" % i) ]
lst.append((i, x, y))
for i, v, pn in lst:
if self.showThis(i, v):
self.handleLeaf(v, depth+1, pn)
for i, v, pn in lst:
self.descend(v, depth+1, pn)
elif type(obj) in (types.DictType,):
keys = obj.keys()
lst = [ ]
if len(pathname) > 0:
lastitem = pathname[-1]
pathname = pathname[:-1]
else:
lastitem = ""
for k in keys:
x = obj[k]
if not self.exclude(k, x):
y = pathname + [ lastitem + ("[%s]" % repr(k)) ]
lst.append((k, x, y))
for k, v, pn in lst:
if self.showThis(k, v):
self.handleLeaf(v, depth+1, pn)
for k, v, pn in lst:
self.descend(v, depth+1, pn)
elif (hasattr(obj, "__class__") or
type(obj) in (types.InstanceType, types.ClassType,
types.ModuleType, types.FunctionType)):
ckeys = [ ]
if True:
# Look at instance variables, ignore class variables and methods
if hasattr(obj, "__class__"):
ckeys = self.getAttributes(obj.__class__)
else:
# Look at all variables and methods
ckeys = ( )
keys = self.getAttributes(obj)
if excludeClassVars:
keys = filter(lambda x: x not in ckeys, keys)
lst = [ ]
for k in keys:
x = getattr(obj, k)
if not self.exclude(k, x):
lst.append((k, x, pathname + [ k ]))
for k, v, pn in lst:
if self.showThis(k, v):
self.handleLeaf(v, depth+1, pn)
for k, v, pn in lst:
self.descend(v, depth+1, pn)
def objectBrowse(obj, maxdepth = 1, exclude = standardExclude, showThis = None, outf = sys.stdout):
od = ObjectDescender(maxdepth = maxdepth, outf = outf)
if showThis != None:
od.showThis = showThis
od.exclude = exclude
od.descend(obj, pathname=['arg'])
def findChild(obj, showThis, maxdepth = 8):
# Drill down deeper because we're being more selective
def prefix(depth, pn):
# no indentation
return (".".join(pn) + ": ")
f = Finder(maxdepth = maxdepth)
f.showThis = showThis
f.prefix = prefix
f.descend(obj, pathname=['arg'])
# python -c "import debug; debug.testDescend()"
def testDescend():
class Foo:
pass
x = Foo()
y = Foo()
z = Foo()
x.a = 3.14159
x.b = "When in the course of human events"
x.c = y
x.d = [3,1,4,1,6]
y.a = 2.71828
y.b = "Apres moi, le deluge"
y.c = z
z.a = [x, y, z]
z.b = range(12)
x.e = {'y': y, 'z': z}
objectBrowse(x)
def test(name, val):
return name == "a"
findChild(x, test)
# end
| NanoCAD-master | cad/src/utilities/objectBrowse.py |
# Copyright 2004-2009 Nanorex, Inc. See LICENSE file for details.
"""
debug.py -- various debugging utilities and debug-related UI code
TODO: split into several modules in a debug package.
(Some of the functions here should logically be moved into ops_debug.py.)
@version: $Id$
@copyright: 2004-2009 Nanorex, Inc. See LICENSE file for details.
Names and behavior of some functions here (print_compact_traceback, etc)
are partly modelled after code by Sam Rushing in asyncore.py
in the Python library, but this implementation is newly written from scratch;
see PythonDocumentation/ref/types.html for traceback, frame, and code objects,
and sys module documentation about exc_info() and _getframe().
#e Desirable new features:
- print source lines too;
- in compact_stack, include info about how many times each frame has been
previously printed, and/or when it was first seen (by storing something in the
frame when it's first seen, and perhaps incrementing it each time it's seen).
History:
Created by Bruce. Added to by various developers, especially Will.
Bruce 071107 split out two modules by Will:
- objectBrowse.py
- scratch/api_enforcement.py
"""
import sys, os, time, traceback
from utilities.constants import noop
import foundation.env as env
from utilities import debug_flags
# note: some debug features run user-supplied code in this module's
# global namespace (on platforms where this is permitted by our licenses).
# ==
_default_x = object()
def print_verbose_traceback(x = _default_x): # by wware
# note: doesn't print the exception itself.
traceback.print_stack(file = sys.stdout)
if x is not _default_x:
print x
print
# ==
# Generally useful line number function, wware 051205
def linenum(depth = 0):
try:
raise Exception
except:
tb = sys.exc_info()[2]
f = tb.tb_frame
for i in range(depth + 1):
f = f.f_back
print f.f_code.co_filename, f.f_code.co_name, f.f_lineno
# ==
# Enter/leave functions which give performance information
# (by Will; bruce 071107 renamed them to be easier to search for.)
_timing_stack = [ ]
def debug_enter():
if debug_flags.atom_debug:
try:
raise Exception
except:
tb = sys.exc_info()[2]
f = tb.tb_frame.f_back
fname = f.f_code.co_name
_timing_stack.append((fname, time.time()))
print 'ENTER', fname
def debug_leave():
if debug_flags.atom_debug:
try:
raise Exception
except:
tb = sys.exc_info()[2]
f = tb.tb_frame.f_back
fname = f.f_code.co_name
fname1, start = _timing_stack.pop()
assert fname == fname1, 'enter/leave mismatch, got ' + fname1 + ', expected ' + fname
print 'LEAVE', fname, time.time() - start
def debug_middle():
if debug_flags.atom_debug:
try:
raise Exception
except:
tb = sys.exc_info()[2]
f = tb.tb_frame.f_back
fname, line = f.f_code.co_name, f.f_lineno
fname1, start = _timing_stack[-1]
assert fname == fname1, 'enter/middle mismatch, got ' + fname1 + ', expected ' + fname
print 'MIDDLE', fname, line, time.time() - start
# ==
# Stopwatch for measuring run time of algorithms or code snippets.
# wware 051104
class Stopwatch:
def __init__(self):
self.__marks = [ ]
def start(self):
self.__start = time.time()
def mark(self):
self.__marks.append(time.time() - self.__start)
def getMarks(self):
return self.__marks
def now(self):
return time.time() - self.__start
# usage:
# start = begin_timing("description of stuff")
# ...stuff to be timed...
# end_timing(start, "description of stuff")
def begin_timing(msg = ""):
print "begin_timing: %s" % msg
return time.time()
def end_timing(start, msg = ""):
print "end_timing: %s %s" % (msg, time.time() - start)
def time_taken(func):
"""
call func and measure how long this takes.
@return: a triple (real-time-taken, cpu-time-taken, result-of-func),
but see warning for a caveat about the cpu time measurement.
@warning: we measure cpu time using time.clock(), but time.clock() is
documented as returning "the CPU time or real time since the
start of the process or since the first call to clock()."
Tests show that on Mac it probably returns CPU time. We have
not tested this on other platforms.
"""
t1c = time.clock()
t1t = time.time()
res = func()
t2c = time.clock()
t2t = time.time()
return (t2t - t1t, t2c - t1c, res)
def call_func_with_timing_histmsg( func):
realtime, cputime, res = time_taken(func)
env.history.message( "done; took %0.4f real secs, %0.4f cpu secs" % (realtime, cputime) )
return res
# ==
# the following are needed to comply with our Qt/PyQt license agreements.
# [in Qt4, all-GPL, they work on all platforms, as of 070425]
def legally_execfile_in_globals(filename, globals, error_exception = True):
"""
if/as permitted by our Qt/PyQt license agreements,
execute the python commands in the given file, in this process.
"""
try:
import platform_dependent.gpl_only as gpl_only
except ImportError:
msg = "execfile(%r): not allowed in this non-GPL version" % (filename,)
print msg #e should be in a dialog too, maybe depending on an optional arg
if error_exception:
raise ValueError, msg
else:
print "ignoring this error, doing nothing (as if the file was empty)"
else:
gpl_only._execfile_in_globals(filename, globals) # this indirection might not be needed...
return
def legally_exec_command_in_globals( command, globals, error_exception = True ):
"""
if/as permitted by our Qt/PyQt license agreements,
execute the given python command (using exec) in the given globals,
in this process.
"""
try:
import platform_dependent.gpl_only as gpl_only
except ImportError:
msg = "exec is not allowed in this non-GPL version"
print msg #e should be in a dialog too, maybe depending on an optional arg
print " fyi: the command we hoped to exec was: %r" % (command,)
if error_exception:
raise ValueError, msg
else:
print "ignoring this error, doing nothing (as if the command was a noop)"
else:
gpl_only._exec_command_in_globals( command, globals) # this indirection might not be needed...
return
def exec_allowed():
"""
are exec and/or execfile allowed in this version of NE1?
"""
try:
import platform_dependent.gpl_only as gpl_only
except ImportError:
return False
return True
# ==
def safe_repr(obj, maxlen = 1000):
# fyi: this import doesn't work: from asyncore import safe_repr
try:
maxlen = int(maxlen)
assert maxlen >= 5
except:
#e should print once-per-session error message & compact_stack (using helper function just for that purpose)
maxlen = 5
try:
rr = "%r" % (obj,)
except:
rr = "<repr failed for id(obj) = %#x, improve safe_repr to print its class at least>" % id(obj)
if len(rr) > maxlen:
return rr[(maxlen - 4):] + "...>" #e this should also be in a try/except; even len should be
else:
return rr
pass
# ==
# traceback / stack utilities (see also print_verbose_traceback)
def print_compact_traceback(msg = "exception ignored: "):
print >> sys.__stderr__, msg.encode("utf_8") + compact_traceback() # bruce 061227 changed this back to old form
return
## import traceback
## print >> sys.__stderr__, msg
## traceback.print_stack() # goes to stderr by default
## # bug: that doesn't print the exception itself.
def compact_traceback():
type, value, traceback1 = sys.exc_info()
if (type, value) == (None, None):
del traceback1 # even though it should be None
# Note (pylint bug): when this local var was named traceback,
# this del confused pylint -- even though we immediately return
# (so this del has no effect in the subsequent code), pylint
# now thinks traceback inside the try clause below refers to the
# module in our global namespace, not the local variable. I'll
# rename the local variable to traceback1 to see if this helps.
# (It may only partly help -- maybe pylint will now complain
# falsely about an undefined variable.) [bruce 071108]
return "<incorrect call of compact_traceback(): no exception is being handled>"
try:
printlines = []
while traceback1 is not None:
# cf. PythonDocumentation/ref/types.html;
# starting from current stack level (of exception handler),
# going deeper (towards innermost frame, where exception occurred):
filename = traceback1.tb_frame.f_code.co_filename
lineno = traceback1.tb_lineno
printlines.append("[%s:%r]" % ( os.path.basename(filename), lineno ))
traceback1 = traceback1.tb_next
del traceback1
ctb = ' '.join(printlines)
return "%s: %s\n %s" % (type, value, ctb)
except:
del traceback1
return "<bug in compact_traceback(); exception from that not shown>"
pass
# stack
def print_compact_stack( msg = "current stack:\n", skip_innermost_n = 0, **kws ):
#bruce 061118 added **kws
#bruce 080314 pass our msg arg to new msg arg of compact_stack
#bruce 080917 revise semantics of skip_innermost_n in all related functions
# (now 0 means "only frames outside this function")
print >> sys.__stderr__, \
compact_stack( msg, skip_innermost_n = skip_innermost_n + 1, **kws )
STACKFRAME_IDS = False # don't commit with True,
# but set to True in debugger to see more info in compact_stack printout [bruce 060330]
def compact_stack( msg = "", skip_innermost_n = 0, linesep = ' ', frame_repr = None ):
#bruce 061118 added linesep, frame_repr; 080314 added msg arg
#bruce 080917 revise semantics of skip_innermost_n in all related functions
# (now 0 means "only frames outside this function")
printlines = []
frame = sys._getframe( skip_innermost_n + 1)
while frame is not None: # innermost first
filename = frame.f_code.co_filename
lineno = frame.f_lineno
extra = more = ""
if STACKFRAME_IDS:
#bruce 060330
# try 1 failed
## try:
## frame._CS_seencount # this exception messed up some caller, so try getattr instead... no, that was not what happened
## except:
## frame._CS_seencount = 1
## else:
## frame._CS_seencount += 1
# try 2 failed - frame object doesn't permit arbitrary attrs to be set on it
## count = getattr(frame, '_CS_seencount', 0)
## count += 1
## print frame.f_locals
## frame._CS_seencount = count # this is not allowed. hmm.
# so we'll store a new fake "local var" into the frame, assuming frame.f_locals is an ordinary dict
count = frame.f_locals.get('_CS_seencount', 0)
count += 1
frame.f_locals['_CS_seencount'] = count
if count > 1:
extra = "|%d" % count
if frame_repr:
more = frame_repr(frame)
printlines.append("[%s:%r%s]%s" % ( os.path.basename(filename), lineno, extra, more ))
frame = frame.f_back
printlines.reverse() # make it outermost first, like compact_traceback
return msg + linesep.join(printlines)
# test code for those -- but more non-test code follows, below this!
if __name__ == '__main__':
print "see sys.__stderr__ (perhaps a separate console) for test output"
def f0():
return f1()
def f1():
return f2()
def f2():
print_compact_stack("in f2(); this is the stack:\n")
try:
f3()
except:
print_compact_traceback("exception in f3(): ")
print >> sys.__stderr__, "done with f2()"
def f3():
f4()
def f4():
assert 0, "assert 0"
f0()
print >> sys.__stderr__, "returned from f0()"
print "test done"
pass
# ===
# run python commands from various sorts of integrated debugging UIs
# (for users who are developers); used in GLPane.py [or in code farther below
# which used to live in GLPane.py].
# (moved here from GLPane.py by bruce 040928; docstring and messages maybe not yet fixed)
def debug_run_command(command, source = "user debug input"): #bruce 040913-16 in GLPane.py; modified 040928
"""
Execute a python command, supplied by the user via some sort of debugging interface (named by source),
in debug.py's globals. Return 1 for ok (incl empty command), 0 for any error.
Caller should not print exception diagnostics -- this function does that
(and does not reraise the exception).
"""
#e someday we might record time, history, etc
command = "" + command # i.e. assert it's a string
#k what's a better way to do the following?
while command and command[0] == '\n':
command = command[1:]
while command and command[-1] == '\n':
command = command[:-1]
if not command:
print "empty command (from %s), nothing executed" % (source,)
return 1
if '\n' not in command:
msg = "will execute (from %s): %s" % (source, command)
else:
nlines = command.count('\n')+1
msg = "will execute (from %s; %d lines):\n%s" % (source, nlines, command)
print msg
try:
# include in history file, so one can search old history files for useful things to execute [bruce 060409]
from utilities.Log import _graymsg, quote_html
env.history.message( _graymsg( quote_html( msg)))
except:
print_compact_traceback("exception in printing that to history: ")
command = command + '\n' #k probably not needed
try:
## exec command in globals()
legally_exec_command_in_globals( command, globals() )
except:
print_compact_traceback("exception from that: ")
return 0
else:
print "did it!"
return 1
pass
# ==
def debug_timing_test_pycode_from_a_dialog( ): #bruce 051117
# TODO: rewrite this to call grab_text_using_dialog (should be easy)
title = "debug: time python code"
label = "one line of python to compile and exec REPEATEDLY in debug.py's globals()\n(or use @@@ to fake \\n for more lines)"
from PyQt4.Qt import QInputDialog
parent = None
text, ok = QInputDialog.getText(parent, title, label) # parent argument needed only in Qt4 [bruce 070329, more info above]
if not ok:
print "time python code code: cancelled"
return
# fyi: type(text) == <class '__main__.qt.QString'>
command = str(text)
command = command.replace("@@@",'\n')
print "trying to time the exec or eval of command:",command
from code import compile_command
try:
try:
mycode = compile( command + '\n', '<string>', 'exec') #k might need '\n\n' or '' or to be adaptive in length?
# 'single' means print value if it's an expression and value is not None; for timing we don't want that so use 'eval'
# or 'exec' -- but we don't know which one is correct! So try exec, if that fails try eval.
print "exec" # this happens even for an expr like 2+2 -- why?
except SyntaxError:
print "try eval" # i didn't yet see this happen
mycode = compile_command( command + '\n', '<string>', 'eval')
except:
print_compact_traceback("exception in compile_command: ")
return
if mycode is None:
print "incomplete command:",command
return
# mycode is now a code object
print_exec_timing_explored(mycode)
def print_exec_timing_explored(mycode, ntimes = 1, trymore = True): #bruce 051117
"""
After making sure exec of user code is legally permitted, and exec of mycode works,
execute mycode ntimes and print how long that takes in realtime (in all, and per command).
If it took <1 sec and trymore is True, repeat with ntimes *= 4, repeatedly until it took >= 1 sec.
"""
glob = globals()
legally_exec_command_in_globals( mycode, glob )
# if we get to here, exec of user code is legally permitted, and mycode threw no exception,
# so from now on we can say "exec mycode in glob" directly.
toomany = 10**8
while 1:
timetook = print_exec_timing(mycode, ntimes, glob) # print results, return time it took in seconds
if trymore and timetook < 1.0:
if ntimes > toomany:
print "%d is too many to do more than, even though it's still fast. (bug?)" % ntimes
break
ntimes *= 4
continue
else:
break
print "done"
return
def print_exec_timing(mycode, ntimes, glob): #bruce 051117
"""
Execute mycode in glob ntimes and print how long that takes in realtime (in all, and per command).
Return total time taken in seconds (as a float). DON'T CALL THIS ON USER CODE UNTIL ENSURING OUR LICENSE
PERMITS EXECUTING USER CODE in the caller; see print_exec_timing_explored for one way to do that.
"""
start = time.time()
for i in xrange(ntimes):
exec mycode in glob
end = time.time()
took = float(end - start)
tookper = took / ntimes
print "%d times: total time %f, time per call %f" % (ntimes, took, tookper)
return took
# ==
#bruce 050823 preliminary system for letting other modules register commands for debug menu (used by Undo experimental code)
# revised/generalized 050923 [committed 051006]
_commands = {}
class menu_cmd: #bruce 050923 [committed 051006]. #e maybe the maker option should be turned into a subclass-choice... we'll see.
"""
@note: public attributes: name, order
"""
def __init__(self, name, func, order = None, maker = False, text = None):
"""
for doc of args see register_debug_menu_command
"""
# public attrs:
self.name = name # self.name is used for replacement of previous same-named commands in client-maintained sets
# (but the name arg is also used as default value for some other attrs, below)
self.func = func
self.maker = not not maker # if true, some of the rest don't matter, but never mind
if order is not None:
self.order = (0, order)
else:
self.order = (1, name) # unordered ones come after ordered ones, and are sorted by name
self.text = text or name # text of None or "" is replaced by name
# self.text won't be used if maker is true
return
def menu_spec(self, widget):
if self.maker:
try:
res = self.func(widget) # doesn't need the other attrs, I think... tho passing self might someday be useful #e
except:
print_compact_traceback("exception in menu_spec: ")
try:
errmsg = 'exception in menu_spec for %r' % (self.name,)
except:
errmsg = 'exception in menu_spec'
return [(errmsg, noop, 'disabled')]
#e should also protect caller from badly formatted value... or maybe menu spec processor should do that?
return res
text, func = self.text, self.func
return [ (text, lambda func = func, widget = widget: func(widget)) ]
# (the func = func was apparently necessary, otherwise the wrong func got called,
# always the last one processed here)
# [that comment is from before revision of 050923 but probably still applies]
pass
def register_debug_menu_command( *args, **kws ):
"""
Let other modules register commands which appear in the debug menu.
When called, they get one arg, the widget in which the debug menu appeared.
If order is supplied and not None, it's used to sort the commands in the menu
(the other ones come at the end in order of their names).
Duplicate names cause prior-registered commands to be silently replaced
(even if other options here cause names to be ignored otherwise).
If text is supplied, it rather than name is the menu-text. (Name is still used for replacement and maybe sorting.)
If maker is true [experimental feature], then func is not the command but the sub-menu-spec maker,
which runs (with widget as arg) when menu is put up, and returns a menu-spec list;
in this case name is ignored except perhaps for sorting purposes.
@param name: text for menu command
@param function: function which implements menu command (runs with one arg, the widget)
"""
cmd = menu_cmd( *args, **kws )
_commands[cmd.name] = ( cmd.order, cmd )
# store by .name so duplicate names cause replacement;
# let values be sortable by .order
return
def register_debug_menu_command_maker( *args, **kws): # guess: maker interface is better as a separate function.
assert not kws.has_key('maker')
kws['maker'] = True
assert not kws.has_key('text') # since not useful for maker = True
register_debug_menu_command( *args, **kws)
return
def registered_commands_menuspec( widget):
order_cmd_pairs = _commands.values()
order_cmd_pairs.sort()
spec = []
for orderjunk, cmd in order_cmd_pairs:
spec.extend( cmd.menu_spec(widget) )
if not spec:
return spec # i.e. []
return [ ('other', spec) ]
# ===
def overridden_attrs( class1, instance1 ): #bruce 050108
"""
return a list of the attrs of class1 which are overridden by instance1
"""
# probably works for class1, subclass1 too [untested]
res = []
for attr in dir(class1):
ca = getattr(class1, attr)
ia = getattr(instance1, attr)
if ca != ia:
try:
# is ca an unbound instance method, and ia its bound version for instance1?
if ia.im_func == ca.im_func:
# (approximate test; could also verify the types and the bound object in ia #e)
# (note: im_func seems to work for both unbound and bound methods; #k py docs)
continue
except AttributeError:
pass
res.append(attr)
return res
# ==
debug_reload_once_per_event = False # do not commit with true
def reload_once_per_event(module, always_print = False, never_again = True, counter = None, check_modtime = False):
"""
Reload module (given as object or as name),
but at most once per user-event or redraw, and only if debug_flags.atom_debug.
Assumes w/o checking that this is a module it's ok to reload, unless the module defines _reload_ok as False,
in which case, all other reload tests are done, but a warning is printed rather than actually reloading it.
If always_print is True, print a console message on every reload, not just the first one per module.
If never_again is False, refrain from preventing all further reload attempts for a module, after one reload fails for it.
If counter is supplied, use changes to its value (rather than to env.redraw_counter) to decide when to reload.
If check_modtime is True, the conditions for deciding when to reload are used, instead, to decide when to check
the module's source file's modtime, and the actual reload only occurs if that has changed. (If the source file can't
be found, a warning is printed, and reload is only attempted if never_again is False.)
WARNING about proper use of check_modtime:
if module A imports module B, and A and B are only imported after this function is called on them
with check_modtime = True, and the counter increases and B's source file has been modified,
then the developer probably wishes both A and B would be reloaded -- but nothing will be reloaded,
since A has not been modified and that's what this function checks. When A is later modified, both will be reloaded.
To fix this, the caller would need to make sure that, before any import of A, this function is called on both A and B
(either order is ok, I think). We might someday extend this function to make that task easier, by having it record
sub-imports it handles. We might use facilities in the exprs module (not currently finished) as part of that. #e
Usage note: this function is intended for use by developers who might modify
a module's source code and want to test the new code in the same session.
But the default values of options are designed more for safety in production code,
than for highest developer convenience. OTOH, it never reloads at all unless
ATOM_DEBUG is set, so it might be better to revise the defaults to make them more convenient for developers.
See cad/src/exprs/basic.py for an example of a call optimized for developers.
"""
if not debug_flags.atom_debug:
return
if type(module) == type(""):
# also support module names
module = sys.modules[module]
if counter is None:
now = env.redraw_counter
else:
now = counter
try:
old = module.__redraw_counter_when_reloaded__
except AttributeError:
old = -1
if old == now:
return
# after this, if debug_reload_once_per_event, print something every time
if debug_reload_once_per_event:
print "reload_once_per_event(%r)" % (module,)
if old == 'never again': #bruce 060304
return
# now we will try to reload it (unless prevented by modtime check or _reload_ok == False)
assert sys.modules[module.__name__] is module
module.__redraw_counter_when_reloaded__ = now # do first in case of exceptions in this or below, and to avoid checking modtime too often
if check_modtime:
ok = False # we'll set this to whether it's ok to check the modtime,
# and if we set it True, put the modtime in our_mtime,
# or if we set it False, print something
try:
ff = module.__file__
if ff.endswith('.pyc'):
ff = ff[:-1]
ok = ff.endswith('.py') and os.path.isfile(ff)
if ok:
# check modtime
our_mtime = os.stat(ff).st_mtime
else:
print "not ok to check modtime of source file of %r: " % (module,)
except:
print_compact_traceback("problem checking modtime of source file of %r: " % (module,) )
ok = False
if ok:
old_mtime = getattr(module, '__modtime_when_reloaded__', -1) # use -1 rather than None for ease of printing in debug msg
# only reload if modtime has changed since last reload
want_reload = (old_mtime != our_mtime)
setattr(module, '__modtime_when_reloaded__', our_mtime)
if debug_reload_once_per_event:
print "old_mtime %s, our_mtime %s, want_reload = %s" % \
(time.asctime(time.localtime(old_mtime)), time.asctime(time.localtime(our_mtime)), want_reload)
pass
else:
want_reload = not never_again
if debug_reload_once_per_event:
print "want_reload = %s" % want_reload
if not want_reload:
return
pass
# check for _reload_ok = False, but what it affects is lower down, by setting flags about what we do and what we print.
# the point is to isolate the effect conditions here, ensuring what we do and print matches,
# but to not print anything unless we would have without this flag being False.
_reload_ok = getattr(module, '_reload_ok', True)
if _reload_ok:
def doit(module):
reload(module)
reloading = "reloading"
else:
def doit(module):
pass
reloading = "NOT reloading (since module._reload_ok = False)"
del _reload_ok
# now we will definitely try to reload it (or not, due to _reload_ok), and in some cases print what we're doing
if old == -1:
print reloading, module.__name__
if not always_print:
print " (and will do so up to once per redraw w/o saying so again)"
elif always_print:
print reloading, module.__name__
try:
doit(module)
except:
#bruce 060304 added try/except in case someone sets ATOM_DEBUG in an end-user version
# in which reload is not supported. We could check for "enabling developer features",
# but maybe some end-user versions do support reload, and for them we might as well do it here.
print_compact_traceback("reload failed (not supported in this version?); continuing: ")
if never_again:
module.__redraw_counter_when_reloaded__ = 'never again'
return
# ==
_profile_single_call_ENABLED = False #bruce 090305 renamed this
def set_enabled_for_profile_single_call(t): #bruce 090305 renamed this
global _profile_single_call_ENABLED
_profile_single_call_ENABLED = t
return
# state used by profile_single_call_if_enabled
_profile_function = None
_profile_args = None
_profile_keywordArgs = None
_profile_output_file = 'profile.output'
def _call_profile_function():
"""
This is called internally by profile_single_call_if_enabled.
Profiling might be on or off. It just runs the function call
stored by profile_single_call_if_enabled into some private globals.
@return: None (return value from _profile_function is discarded)
"""
_profile_function(*_profile_args, **_profile_keywordArgs)
return
def profile_single_call_if_enabled(func, *args, **keywordArgs):
"""
helper function:
Profile a function call, if enabled (see set_enabled_for_profile_single_call).
This creates a profile.output file on each use
(replacing a prior one if any, even if it was created
during the same session).
@return: None (return value from func is discarded).
Usage: change a normal function call f(a, b, c = 3) into:
profile_single_call_if_enabled(f, a, b, c = 3)
and f will be profiled if enabled (or will run normally if not).
A method call can also be profiled: o.f(a) becomes
profile_single_call_if_enabled(o.f, a).
Profiling by this helper function is enabled by calling
utilities.debug.set_enabled_for_profile_single_call(True)
and disabled by calling
utilities.debug.set_enabled_for_profile_single_call(False)
@see: print_profile_output()
@see: atom_debug_profile_filename in main_startup.py, for profiling
an entire session
"""
#bruce 090305 renamed this from 'profile'
# note: Fancier schemes, like profiling the Nth call of a function, could
# be implemented here, if desired.
global _profile_function
global _profile_args
global _profile_keywordArgs
global _profile_single_call_ENABLED
_profile_function = func
_profile_args = args
_profile_keywordArgs = keywordArgs
if _profile_single_call_ENABLED:
try:
import cProfile as py_Profile
except ImportError:
print "Unable to import cProfile. Using profile module instead."
import profile as py_Profile
pass
filePath = os.path.dirname(os.path.abspath(sys.argv[0])) + "/" + _profile_output_file
filePath = os.path.normpath(filePath)
print "Capturing profile..."
print "Profile output file: %s" % (filePath,)
py_Profile.run('from utilities.debug import _call_profile_function; _call_profile_function()', _profile_output_file)
print "...end of profile capture"
print "(to analyze, see utilities.debug.print_profile_output)"
# Uncomment this to print the profile output in a human readable form:
## print_profile_output(_profile_output_file)
else:
_call_profile_function()
_profile_function = None
_profile_args = None
_profile_keywordArgs = None
return
def print_profile_output(raw_profile_result_file = _profile_output_file):
"""
Print the profile output collected by profile_single_call_if_enabled
in the console in a human readable form.
@param raw_profile_result_file: name of the file generated by running cProfile
or profile
@type raw_profile_result_file: string (pathname)
@see: profile_single_call_if_enabled()
@note: it's often more convenient to run the following commands in an
interactive python interpreter, and provide additional arguments.
See pstats documentation for details.
"""
import pstats
p = pstats.Stats(raw_profile_result_file)
#Strip directories etc could be an argument to this function.
p.strip_dirs().sort_stats('cumulative').print_stats(100)
return
# end
| NanoCAD-master | cad/src/utilities/debug.py |
# Copyright 2004-2007 Nanorex, Inc. See LICENSE file for details.
"""
icon_utilities.py - helper functions for finding icon and pixmap files
in standard places, and caching them, and handling errors when they're
not found.
$Id$
History: developed by several authors; moved out of Utility.py
by bruce 070831.
WARNING: most code still imports these functions from Utility.py.
This should be cleaned up when practical.
TODO:
Most of the code in these functions could probably be merged
into fewer functions.
Module classification: these are used for both widgets and 3d graphics,
so they need to be classified lower than either. The file might be
considered a utility, but it does a lot of io, so we can classify it
as io for now. (Another possibility would be platform.) [bruce 071214]
"""
import os, sys
from utilities import debug_flags
from PyQt4 import QtGui
import utilities.Initialize as Initialize
import utilities.EndUser as EndUser
from utilities.debug import print_compact_stack
# This is the subdirectory component "ui" at the end of "cad/src/ui",
# in which we store most icons and similar image files.
#
# (WARNING: it is also hardcoded into longer string literals in
# many places throughout the source code. In most of them it occurs
# as "ui/", but probably not in all.)
UI_SUBDIRECTORY_COMPONENT = "ui"
# these private global dictionaries are used to cache
# pixmaps and icons returned by some of the functions herein
_pixmaps = {}
_icons = {}
_INITIAL_ICONPREFIX = "."
_iconprefix = _INITIAL_ICONPREFIX
# This will be set by initialize() to the pathname of the directory that
# contains ui/... icon files, for private use. Note that if the
# ALTERNATE_CAD_SRC_PATH feature is being used, this will be set to
# a different value than otherwise (new feature, bruce 070831).
def initialize_icon_utilities():
"""
[must be called during startup, before image_directory() is ever called]
"""
if (Initialize.startInitialization(__name__)):
return
# initialization code
global _iconprefix
_iconprefix = os.path.dirname(os.path.abspath(sys.argv[0]))
_iconprefix = os.sep.join(_iconprefix.split(os.sep)[:-1] + ["src"])
# Note: for developers, this is .../cad/src and also contains the
# toplevel python modules or packages (as of 080111 anyway);
# within built releases, it may not be the same directory as that
# even though it ends with "src". [bruce comment 080111]
if EndUser.getAlternateSourcePath() != None:
new_iconprefix = EndUser.getAlternateSourcePath()
print "ALTERNATE_CAD_SRC_PATH: setting _iconprefix to %r rather than %r" % \
( new_iconprefix, _iconprefix )
_iconprefix = new_iconprefix
Initialize.endInitialization(__name__)
return
def image_directory(): #bruce 070604
"""
Return the full pathname of the directory in which the image files
(mostly icons) with names like ui/<subdir>/<file> exist.
@note: As of 070604, for developers this path ends with cad/src
and is also the main source directory, but in built releases it
might be something different and might be platform-dependent or even
build-system-dependent.
"""
global _iconprefix
assert _iconprefix != _INITIAL_ICONPREFIX, \
"too early to call image_directory()" #bruce 080805
return _iconprefix
def get_image_path(name, print_errors = True):
"""
Return the full path given an image/icon path name.
@param name: The image path name provided by the user. The path should start
with 'ui/' directory inside the src directory.
@type name: str
@param print_errors: whether to report errors for missing icon files
when atom_debug is set. True by default.
@type print_errors: boolean
@return: full path of the image.
@rtype: str
"""
root, ext = os.path.splitext(name)
if not ext:
if name: # 'name' can be an empty string. See docstring for details.
msg = "Warning: No '.png' extension passed to get_image_path for [%s]. " \
"\nPlease add the .png suffix in the source code to remove this warning.\n" % name
print_compact_stack(msg)
name = name + '.png'
iconPath = os.path.join(image_directory(), name)
iconPath = os.path.normpath(iconPath)
if not os.path.exists(iconPath):
if debug_flags.atom_debug and print_errors:
print "icon path %s doesn't exist." % (iconPath,)
return iconPath
def geticon(name, print_errors = True):
"""
Return the icon given an image path name.
@param name: The image path name provided by the user. The path should start
with 'ui/' directory inside the src directory. If name is an
empty string, a null icon is returned.
@type name: str
@param print_errors: whether to report errors for missing icon files
when atom_debug is set. True by default.
@type print_errors: boolean
@return: QIcon object for the given image path.
@rtype: QIcon object.
"""
iconPath = get_image_path(name, print_errors)
# Always set the icon with the 'iconPath'. Don't set it as an empty string
# like done in getPixmap. This is done on purpose. Right now there is an
# apparent bug in Qt in the text alignment for a push button with style sheet.
# @see L{PM_GroupBox._getTitleButton} which sets a non-existant
# 'Ghost Icon' for this button using 'geticon method'
# By setting such an icon, the button text left-aligns! If you create an icon
# with iconPath = empty string (when the user supplied path doesn't exist)
# the text in that title button center-aligns. So lets just always use the
# 'iconPath' even when the path doesn't exist. -- ninad 2007-08-22
icon = QtGui.QIcon(iconPath)
return icon
def getCursorPixmap(png_filename):
"""
Return the QPixmap for the given cursor PNG image file name.
@param png_filename: The cursor image (PNG) file name provided by the user.
The cursor file must live in the 'ui/cursors' directory
inside the src directory.
@type png_filename: str
@return: QPixmap object for the given cursor image file name.
(could return a Null icon)
@rtype: QPixmap object.
"""
return getpixmap(os.path.join("ui/cursors/", png_filename))
def getpixmap(name, print_errors = True):
"""
Return the QPixmap for the given image path name.
@param name: The image path name provided by the user. The path should start
with 'ui/' directory inside the src directory.
@type name: str
@param print_errors: whether to report errors for missing pixmap files
when atom_debug is set. True by default.
@type print_errors: boolean
@return: QPixmap object for the given image path. (could return a Null icon)
@rtype: QPixmap object.
"""
root, ext = os.path.splitext(name)
if not ext:
name = name + '.png'
pixmapPath = os.path.join(image_directory(), name)
pixmapPath = os.path.normpath(pixmapPath)
if os.path.exists(pixmapPath):
pixmap = QtGui.QPixmap(pixmapPath)
else:
# return a null pixmap. Client code should do the necessary check
# before setting the icon.
# @see: L{PM_GroupBox.addPmWidget} for an example on how this is done
pixmap = QtGui.QPixmap('')
if debug_flags.atom_debug and print_errors:
# This could be a common case. As the client uses getpixmap function
# to see if a pixmap exists. So if its obscuring other debug messages,
# the following print statement can be removed
print "pixmap path %s doesn't exist." % pixmapPath
pass
return pixmap
def imagename_to_pixmap(imagename): #bruce 050108
"""
Given the basename of a file in our cad/src/ui directory,
return a QPixmap created from that file. Cache these
(in our own Python directory, not Qt's QPixmapCache)
so that at most one QPixmap is made from each file.
If the imagename does not exist, a Null pixmap is returned.
"""
try:
return _pixmaps[imagename]
except KeyError:
if imagename[:3] == "ui/":
#If the imagename includes "ui/" at the beginning,
#remove it because we will prepend imagename with
#UI_SUBDIRECTORY_COMPONENT
imagename = imagename[3:]
pixmappath = os.path.join( image_directory(),
UI_SUBDIRECTORY_COMPONENT,
imagename)
if not os.path.exists(pixmappath):
print 'pixmap does not exist; using null pixmap: ' + pixmappath
import traceback
traceback.print_stack(file = sys.stdout)
pixmap = QtGui.QPixmap(pixmappath)
# missing file prints a warning but doesn't cause an exception,
# just makes a null pixmap [confirmed by mark 060202]
_pixmaps[imagename] = pixmap
return pixmap
pass
def imagename_to_icon(imagename):
"""
Given the basename of a file in our cad/src/ui directory,
return a QIcon created from that file. Cache these
(in our own Python directory)
so that at most one QIcon is made from each file.
If the imagename does not exist, a Null QIcon is returned.
"""
try:
return _icons[imagename]
except KeyError:
iconpath = os.path.join( image_directory(), UI_SUBDIRECTORY_COMPONENT,
imagename)
if not os.path.exists(iconpath):
print 'icon does not exist: ' + iconpath
icon = QtGui.QIcon(iconpath)
_icons[imagename] = icon
return icon
pass
# end
| NanoCAD-master | cad/src/utilities/icon_utilities.py |
# Copyright 2008 Nanorex, Inc. See LICENSE file for details.
"""
utilities/TimeUtilities.py Stuff related to processing time values.
Not called utilities/time.py because of the python time package.
@author: EricM
@version: $Id$
@copyright: 2008 Nanorex, Inc. See LICENSE file for details.
"""
from datetime import datetime
def timeStamp(when = None):
"""
Return a string suitable for use as a timestamp component of a
filename. You can pass in a datetime object to extract the time
from, or pass nothing and the string will represent now.
"""
if (when is None):
when = datetime.now()
stamp = "%04d-%02d-%02d-%02d-%02d-%02d" % (when.year,
when.month,
when.day,
when.hour,
when.minute,
when.second)
return stamp
| NanoCAD-master | cad/src/utilities/TimeUtilities.py |
# Copyright 2005-2008 Nanorex, Inc. See LICENSE file for details.
"""
debug_prefs.py -- user-settable flags to help with debugging;
serves as a prototype of general first-class-preference-variables system.
Also contains some general color/icon/pixmap utilities which should be refiled.
@author: Bruce
@version: $Id$
@copyright: 2005-2008 Nanorex, Inc. See LICENSE file for details.
History:
By Bruce, 050614
"""
from utilities.constants import noop
from utilities.constants import black, white, red, green, blue, gray, orange, yellow, magenta, pink
from utilities.Comparison import same_vals
# note: qt4transition imports debug_pref from this module, which is one reason this module can't import
# print_compact_traceback at toplevel. This should be fixed; it should be ok for this
# module to import from debug. (There may be other reasons it can't in the current code.)
# [bruce 070613 comment]
import foundation.env as env
# see below for "import preferences" at runtime; we can't import it here due to errors caused by recursive import
_NOT_PASSED = [] # private object for use as keyword arg default [bruce 070110, part of fixing bug of None as Choice value]
# (note, the same global name is used for different objects in preferences.py and debug_prefs.py)
debug_prefs = {} # maps names of debug prefs to "pref objects"
def debug_pref(name, dtype, **options ): #bruce 070613 added call_with_new_value
"""
Public function for declaring and registering a debug pref and querying its value.
Example call: chem.py rev 1.151 (might not be present in later versions).
Details: If debug_prefs[name] is known (so far in this session),
return its current value, and perhaps record the fact that it was used.
If it's not known, record its existence in the list of active debug prefs
(visible in a submenu of the debug menu [#e and/or sometimes on the dock])
and set it to have datatype dtype, with an initial value of dtype's default value,
or as determined from prefs db if prefs_key option is passed and if this pref has been stored there.
(Treat dtype's default value as prefs db default value, in that case.)
(And then record usage and return value, just as in the other case.)
If prefs_key is passed, also arrange to store changes to its value in user prefs db.
The prefs_key option can be True (meaning use the name as the key, after a hardcoded prefix),
or a string (the actual prefs key).
If non_debug option is true, pref is available even to users who don't set ATOM-DEBUG.
If call_with_new_value is passed (as a named argument -- no positional call is supported),
and is not None, it will be called with the new value whenever the value is changed from the
debug menu. It will NOT be called immediately with the current value, since some debug prefs
are called extremely often, and since the caller can easily do this if desired. If this option
was passed before (for the same debug_pref), the old function will be discarded and not called again.
WARNING: the call_with_new_value function is only called for changes caused by use of the debug menu;
it probably should also be called for prefs db changes done by other means, but that's not yet
implemented. (It will be easy to add when it's needed.)
"""
call_with_new_value = options.pop('call_with_new_value', None)
try:
dp = debug_prefs[name]
except KeyError:
debug_prefs[name] = dp = DebugPref(name, dtype, **options)
dp.set_call_with_new_value_function(call_with_new_value)
return dp.current_value()
def debug_pref_object(name): #bruce 060213 experiment
# might be useful for adding subscribers, but, this implem has error
# if no one called debug_pref on name yet!
# basic logic of scheme for this needs revision.
return debug_prefs[name]
class Pref:
# might be merged with the DataType (aka PrefDataType) objects;
# only used in DebugPref as of long before 080215
"""
Pref objects record all you need to know about a currently active
preference lvalue [with optional persistence as of 060124]
"""
# class constants or instance variable initial values (some might be overridden in subclasses)
prefs_key = None
print_changes = False
non_debug = False # should this be True here and False in DebugPref subclass? decide when it matters.
classname_for_repr = 'Pref' #bruce 070430
## starts_out_from_where = "from prefs db"
def __init__(self, name, dtype, prefs_key = False, non_debug = False, subs = ()): #bruce 060124 added prefs_key & non_debug options
#e for now, name is used locally (for UI, etc, and maybe for prefs db);
# whether/how to find this obj using name is up to the caller
self.name = name
assert name and type(name) == type("") #bruce 060124 added assert (don't know if this was already an implicit requirement)
self.dtype = dtype # a DataType object
self.value = self._dfltval = dtype.get_defaultValue() # might be changed below #bruce 070228 added self._dfltval
if prefs_key: #bruce 060124 new feature
if prefs_key is True:
prefs_key = "_debug_pref_key:" + name #e should prefix depend on release-version??
assert type(prefs_key) == type(""), "prefs_key must be True or a string"
assert prefs_key # implied by the other asserts/ifs
self.prefs_key = prefs_key
import foundation.preferences as preferences # make sure env.prefs is initialized [bruce 070110 precaution]
# (evidently ok this early, but not sure if needed -- it didn't fix the bug in a Choice of None I commented on today)
self.value = env.prefs.get( prefs_key, self.value ) ###k guess about this being a fully ok way to store a default value
# Note: until I fixed preferences.py today, this failed to store a default value when self.value was None. [bruce 070110]
# note: in this case, self.value might not matter after this, but in case it does we keep it in sync before using it,
# or use it only via self.current_value() [bruce 060209 bugfix]
if self.print_changes and not same_vals(self.value, self._dfltval): #bruce 070228 new feature for debug_pref
# note: we use same_vals to avoid bugs in case of tuples or lists of Numeric arrays.
# note: this only does printing of initial value being non-default;
# printing of changes by user is done elsewhere, and presently goes
# to both history widget and console print. We'll follow the same policy
# here -- but if it's too early to print to history widget, env.history
# will print to console, and we don't want it to get printed there twice,
# so we check for that before printing it to console ourselves. [bruce 071018]
msg = "Note: %s (default %r) starts out %r" % \
(self, self._dfltval, self.value) ## + " %s" % self.starts_out_from_where
if not getattr(env.history, 'too_early', False):
print msg
env.history.message(msg, quote_html = True, color = 'orange')
pass
self.non_debug = non_debug # show up in debug_prefs submenu even when ATOM-DEBUG is not set?
self.subscribers = [] # note: these are only called for value changes due to debug menu
for sub in subs:
self.subscribe_to_changes(sub)
self.subscribe_to_changes( self._fulfill_call_with_new_value )
# note: creates reference cycle (harmless, since we're never destroyed)
return
__call_with_new_value_function = None
def set_call_with_new_value_function(self, func):
"""
Save func as our new "call with new value" function. If not None,
it will be called whenever our value changes due to use of the debug menu.
(In the future we may extend this to also call it if the value changes by other means.)
It will be discarded if this method is called again (to set a new such function or None).
"""
self.__call_with_new_value_function = func
## don't do this: self._fulfill_call_with_new_value()
return
def _fulfill_call_with_new_value(self):
from utilities.debug import print_compact_traceback # do locally to avoid recursive import problem
func = self.__call_with_new_value_function
if func is not None:
val = self.current_value()
try:
func(val)
except:
print_compact_traceback("exception ignored in %s's call_with_new_value function (%r): " % (self, func) )
# (but don't remove func, even though this print might be verbose)
# Note: if there are bugs where func becomes invalid after a relatively rare change
# (like after a new file is loaded), we might need to make this a history error and make it non-verbose.
pass
return
def subscribe_to_changes(self, func): #bruce 060216, untested, maybe not yet called, but intended to remain as a feature ###@@@
"""
Call func with no arguments after every change to our value from the debug menu,
until func() first returns true or raises an exception (for which we'll print a traceback).
Note: Doesn't detect independent changes to env.prefs[prefs_key] -- for that, suggest using
env.pref's subscription system instead.
"""
self.subscribers.append(func)
def unsubscribe(self, func):
self.subscribers.remove(func) # error if not subscribed now
def current_value(self):
if self.prefs_key:
# we have to look it up in env.prefs instead of relying on self.value,
# in case it was independently changed in prefs db by other code [bruce 060209 bugfix]
# (might also help with usage tracking)
self.value = env.prefs[self.prefs_key] #k probably we could just return this and ignore self.value in this case
return self.value
def current_value_is_not_default(self): #bruce 080312
return not same_vals( self.current_value(), self._dfltval)
def changer_menuspec(self):
"""
Return a menu_spec suitable for including in some larger menu (as item or submenu is up to us)
which permits this pref's value to be seen and/or changed;
how to do this depends on datatype (#e and someday on other prefs!)
"""
def newval_receiver_func(newval):
from utilities.debug import print_compact_traceback # do locally to avoid recursive import problem
assert self.dtype.legal_value(newval), "illegal value for %r: %r" % (self, newval)
###e change to ask dtype, since most of them won't have a list of values!!! this method is specific to Choice.
if self.current_value() == newval: #bruce 060126; revised 060209 to use self.current_value() rather than self.value
if self.print_changes:
print "redundant change:",
##return??
self.value = newval
extra = ""
if self.prefs_key:
env.prefs[self.prefs_key] = newval
# note: when value is looked up, this is the assignment that counts (it overrides self.value) [as of 060209 bugfix]
extra = " (also in prefs db)"
if self.print_changes:
## msg = "changed %r to %r%s" % (self, newval, extra)
msg = "changed %s to %r%s" % (self, newval, extra) # shorter version for console too [bruce 080215]
print msg
msg = "changed %s to %r%s" % (self, newval, extra) # shorter version (uses %s) for history [bruce 060126]
env.history.message(msg, quote_html = True, color = 'gray') #bruce 060126 new feature
for sub in self.subscribers[:]:
#bruce 060213 new feature; as of 070613 this also supports the call_with_new_value feature
from utilities.debug import print_compact_traceback # do locally to avoid recursive import problem
try:
unsub = sub()
except:
print_compact_traceback("exception ignored in some subscriber to changes to %s: " % self)
unsub = 1
if unsub:
self.subscribers.remove(sub) # should work properly even if sub is present more than once
continue
return # from newval_receiver_func
return self.dtype.changer_menuspec(self.name, newval_receiver_func, self.current_value())
def __repr__(self):
extra = ""
if self.prefs_key:
extra = " (prefs_key %r)" % self.prefs_key
return "<%s %r at %#x%s>" % (self.classname_for_repr, self.name, id(self), extra)
def __str__(self):
return "<%s %r>" % (self.classname_for_repr, self.name,)
pass
# ==
class DebugPref(Pref):
classname_for_repr = 'debug_pref' #bruce 070430, for clearer History messages
print_changes = True
## starts_out_from_where = "(from debug prefs submenu)"
pass
# == datatypes
class DataType:
"""
abstract class for data types for preferences
(subclasses are for specific kinds of data types;
instances are for data types themselves,
but are independent from a specific preference-setting
that uses that datatype)
(a DataType object might connote pref UI aspects, so it's not just
about the data, but it's largely a datatype; nonetheless,
consider renaming it to PrefDataType or so ###e)
"""
#e some default method implems; more could be added
###e what did i want to put here??
def changer_menu_text(self, instance_name, curval = None):
"""
Return some default text for a menu meant to display and permit changes to a pref setting
of this type and the given instance-name (of the pref variable) and current value.
(API kluge: curval = None means curval not known, unless None's a legal value.
Better to separate these into two args, perhaps optionally if that can be clean. #e)
"""
if curval is None and not self.legal_value(curval): #####@@@@ use it in the similar-code place
cvtext = ": ?" # I think this should never happen in the present calling code
else:
cvtext = ": %s" % self.short_name_of_value(curval)
return "%s" % instance_name + cvtext
def short_name_of_value(self, value):
return self.name_of_value(value)
def normalize_value(self, value):
"""
most subclasses should override this; see comments in subclass methods;
not yet decided whether it should be required to detect illegal values, and if so, what to do then;
guess: most convenient to require nothing about calling it with illegal values; but not sure;
##e maybe split into public and raw forms, public has to detect illegals and raise ValueError (only).
"""
return value # wrong for many subclasses, but not all (assuming value is legal)
def legal_value(self, value):
"""
Is value legal for this type? [Not sure whether this should include "after self.normalize_value" or not]
"""
try:
self.normalize_value(value) # might raise recursion error if that routine checks for value being legal! #e clean up
return True # not always correct!
except:
# kluge, might hide bugs, but at least in this case (and no bugs)
# we know we'd better not consider this value legal!
return False
pass
def autoname(thing):
return `thing` # for now
class Choice(DataType):
#e might be renamed ChoicePrefType, or renamed ChoicePref and merged
# with Pref class to include a prefname etc
"""
DataType for a choice between one of a few specific values,
perhaps with names and comments and order and default.
"""
# WARNING: before 070110, there was a bug if None was used as one of the choices, but it should be ok now,
# except that the "API kluge: curval = None means curval not known, unless None's a legal value"
# in docstring of self.changer_menu_text has not been reviewed regarding this. ###e [bruce 070110 comment]
def __init__(self, values = None, names = None, names_to_values = None, defaultValue = _NOT_PASSED):
#e names_to_values should be a dict from names to values; do we union these inits or require no redundant ones? Let's union.
if values is not None:
values = list(values) #e need more ways to use the init options
else:
values = []
if names is not None:
assert len(names) <= len(values)
names = list(names)
else:
names = []
while len(names) < len(values):
i = len(names) # first index whose value needs a name
names.append( autoname(values[i]) )
if names_to_values:
items = names_to_values.items()
items.sort()
for name, value in items:
names.append(name)
values.append(value)
self.names = names
self.values = values
#e nim: comments
self._defaultValue = self.values[0] # might be changed below
self.values_to_comments = {}
self.values_to_names = {}
for name, value in zip(self.names, self.values):
self.values_to_names[value] = name
if defaultValue is not _NOT_PASSED and defaultValue == value: # even if defaultValue is None!
# There used to be a bug when None was a legal value but no defaultValue was passed,
# in which this code would change self._defaultValue to None. I fixed that bug using _NOT_PASSED.
# This is one of two changes which makes None acceptable as a Choice value.
# The other is in preferences.py dated today. [bruce 070110]
self._defaultValue = value
def name_of_value(self, value):
return self.values_to_names[value]
def get_defaultValue(self):
# WARNING [can be removed soon if nothing goes wrong]:
# When I renamed this to make it consistent in capitalization
# with similar attributes, I also renamed it to be clearly a get method,
# since in most code this name is used for a public attribute instead.
# AFAIK it has only two defs and two calls, all in this file. [bruce 070831]
return self._defaultValue
def legal_value(self, value):
"""
Is value legal for this type?
[Not sure whether this should include "after self.normalize_value" or not]
"""
return value in self.values
def changer_menuspec( self, instance_name, newval_receiver_func, curval = None): # for Choice (aka ChoicePrefType)
#e could have special case for self.values == [True, False] or the reverse
text = self.changer_menu_text( instance_name, curval) # e.g. "instance_name: curval"
submenu = submenu_from_name_value_pairs( zip(self.names, self.values),
newval_receiver_func,
curval = curval,
indicate_defaultValue = True, #bruce 070518 new feature
defaultValue = self._defaultValue
)
#e could add some "dimmed info" and/or menu commands to the end of submenu
#e could use checkmark to indicate non_default = not same_vals(self._defaultValue, curval),
# but too confusing if _defaultValue is True and curval is False...
# so nevermind unless I figure out a nonfusing indication of that
# that is visible outside the list of values submenu
# and doesn't take too much room.
# Maybe appending a very short string to the text, in changer_menu_text?
# [bruce 080312 comment]
return ( text, submenu )
pass
Choice_boolean_False = Choice([False, True])
Choice_boolean_True = Choice([False, True], defaultValue = True)
def submenu_from_name_value_pairs( nameval_pairs,
newval_receiver_func,
curval = None,
mitem_value_func = None,
indicate_defaultValue = False,
defaultValue = None
):
#bruce 080312 revised to use same_vals (2 places)
from utilities.debug import print_compact_traceback # do locally to avoid recursive import problem
submenu = []
for name, value in nameval_pairs:
text = name
if indicate_defaultValue and same_vals(value, defaultValue):
#bruce 070518 new feature
text += " (default)"
command = ( lambda ## event = None,
func = newval_receiver_func,
val = value :
func(val) )
mitem = ( text,
command,
same_vals(curval, value) and 'checked' or None
)
if mitem_value_func is not None:
try:
res = "<no retval yet>" # for error messages
res = mitem_value_func(mitem, value)
if res is None:
continue # let func tell us to skip this item ###k untested
assert type(res) == type((1, 2))
mitem = res
except:
print_compact_traceback("exception in mitem_value_func, or error in retval (%r): " % (res,))
#e improve, and atom_debug only
pass
submenu.append(mitem)
return submenu
class ColorType(DataType): #e might be renamed ColorPrefType or ColorPref
"""
Pref Data Type for a color. We store all colors internally as a 3-tuple of floats
(but assume ints in [0,255] are also enough -- perhaps that would be a better internal format #e).
"""
#e should these classes all be named XxxPrefType or so? Subclasses might specialize in prefs-UI but not datatype per se...
def __init__(self, defaultValue = None):
if defaultValue is None:
defaultValue = (0.5, 0.5, 0.5) # gray
self._defaultValue = self.normalize_value( defaultValue)
def normalize_value(self, value):
"""
Turn any standard kind of color value into the kind we use internally -- a 3-tuple of floats from 0.0 to 1.0.
Return the normalized value.
If value is not legal, we might just return it or we might raise an exception.
(Preferably ValueError, but for now this is NIM, it might be any exception, or none.)
"""
#e support other python types for value? Let's support 3-seq of ints or floats, for now.
# In future might want to support color name strings, QColor objects, ....
r,g,b = value
value = r,g,b # for error messages in assert
assert type(r) == type(g) == type(b), \
"color r,g,b components must all have same type (float or int), not like %r" % (value,)
assert type(r) in (type(1), type(1.0)), "color r,g,b components must be float or int, not %r" % (value,)
if type(r) == type(1):
r = r/255.0 #e should check int range
g = g/255.0
b = b/255.0
#e should check float range
value = r,g,b # not redundant with above
return value
def name_of_value(self, value):
return "Color(%0.3f, %0.3f, %0.3f)" # Color() is only used in this printform, nothing parses it (for now) #e could say RGB
def short_name_of_value(self, value):
return "%d,%d,%d" % self.value_as_int_tuple(value)
def value_as_int_tuple(self, value):
r, g, b = value # assume floats
return tuple(map( lambda component: int(component * 255 + 0.5), (r, g, b) ))
def value_as_QColor(self, value = None): ###k untested??
#e API is getting a bit klugy... we're using a random instance as knowing about the superset of colors,
# and using its default value as the value here...
if value is None:
value = self.get_defaultValue()
rgb = self.value_as_int_tuple(value)
from PyQt4.Qt import QColor
return QColor(rgb[0], rgb[1], rgb[2]) #k guess
def get_defaultValue(self):
return self._defaultValue
def changer_menuspec( self, instance_name, newval_receiver_func, curval = None):
# in the menu, we'd like to put up a few recent colors, and offer to choose a new one.
# but in present architecture we have no access to any recent values! Probably this should be passed in.
# For now, just use the curval and some common vals.
text = self.changer_menu_text( instance_name, curval) # e.g. "instance_name: curval"
values = [black, white, red, green, blue, gray, orange, yellow, magenta, pink] #e need better order, maybe submenus
##e self.recent_values()
#e should be able to put color names in menu - maybe even translate numbers to those?
values = map( self.normalize_value, values) # needed for comparison
if curval not in values:
values.insert(0, curval)
names = map( self.short_name_of_value, values)
# include the actual color in the menu item (in place of the checkmark-position; looks depressed when "checked")
def mitem_value_func( mitem, value):
"""
add options to mitem based on value and return new mitem
"""
###e should probably cache these things? Not sure... but it might be needed
# (especially on Windows, based on Qt doc warnings)
iconset = iconset_from_color( value)
#e need to improve look of "active" icon in these iconsets (checkmark inside? black border?)
return mitem + (('iconset',iconset),)
submenu = submenu_from_name_value_pairs( zip(names, values),
newval_receiver_func,
curval = curval,
mitem_value_func = mitem_value_func )
submenu.append(( "Choose...", pass_chosen_color_lambda( newval_receiver_func, curval ) ))
#e need to record recent values somewhere, include some of them in the menu next time
#k does that let you choose by name? If not, QColor has a method we could use to look up X windows color names.
return ( text, submenu )
pass
def pass_chosen_color_lambda( newval_receiver_func, curval, dialog_parent = None): #k I hope None is ok as parent
def func():
from PyQt4.Qt import QColorDialog
color = QColorDialog.getColor( qcolor_from_anything(curval), dialog_parent)
if color.isValid():
newval = color.red()/255.0, color.green()/255.0, color.blue()/255.0
newval_receiver_func(newval)
return
return func
def qcolor_from_anything(color):
from PyQt4.Qt import QColor
if isinstance(color, QColor):
return color
if color is None:
color = (0.5, 0.5, 0.5) # gray
return ColorType(color).value_as_QColor() ###k untested call
def contrasting_color(qcolor, notwhite = False ):
"""
return a QColor which contrasts with qcolor;
if notwhite is true, it should also contrast with white.
"""
rgb = qcolor.red(), qcolor.green(), qcolor.blue() / 2 # blue is too dark, have to count it as closer to black
from PyQt4.Qt import Qt
if max(rgb) > 90: # threshhold is a guess, mostly untested; even blue=153 seemed a bit too low so this is dubiously low.
# it's far enough from black (I hope)
return Qt.black
if notwhite:
return Qt.cyan
return Qt.white
def pixmap_from_color_and_size(color, size):
"""
#doc; size can be int or (int,int)
"""
if type(size) == type(1):
size = size, size
w,h = size
qcolor = qcolor_from_anything(color)
from PyQt4.Qt import QPixmap
qp = QPixmap(w,h)
qp.fill(qcolor)
return qp
def iconset_from_color(color):
"""
Return a QIcon suitable for showing the given color in a menu item or (###k untested, unreviewed) some other widget.
The color can be a QColor or any python type we use for colors (out of the few our helper funcs understand).
"""
# figure out desired size of a small icon
from PyQt4.Qt import QIcon
#size = QIcon.iconSize(QIcon.Small) # a QSize object
#w, h = size.width(), size.height()
w, h = 16, 16 # hardwire it and be done
# get pixmap of that color
pixmap = pixmap_from_color_and_size( color, (w,h) )
# for now, let Qt figure out the Active appearance, etc. Later we can draw our own black outline, or so. ##e
iconset = QIcon(pixmap)
checkmark = ("checkmark" == debug_pref("color checkmark", Choice(["checkmark","box"])))
modify_iconset_On_states( iconset, color = color, checkmark = checkmark )
return iconset
def modify_iconset_On_states( iconset, color = white, checkmark = False, use_color = None):
#bruce 050729 split this out of iconset_from_color
"""
Modify the On states of the pixmaps in iconset, so they can be distinguished from the (presumably equal) Off states.
(Warning: for now, only the Normal On states are modified, not the Active or Disabled On states.)
By default, the modification is to add a surrounding square outline whose color contrasts with white,
*and* also with the specified color if one is provided. If checkmark is true, the modification is to add a central
checkmark whose color contrasts with white, *or* with the specified color if one is provided.
Exception to all that: if use_color is provided, it's used directly rather than any computed color.
"""
from PyQt4.Qt import QIcon, QPixmap
if True:
## print 'in debug_prefs.modify_iconset_On_states : implement modify_iconset_On_states for Qt 4'
return
for size in [QIcon.Small, QIcon.Large]: # Small, Large = 1,2
for mode in [QIcon.Normal]: # Normal = 0; might also need Active for when mouse over item; don't yet need Disabled
# make the On state have a checkmark; first cause it to generate both of them, and copy them both
# (just a precaution so it doesn't make Off from the modified On,
# even though in my test it treats the one we passed it as Off --
# but I only tried asking for Off first)
## for state in [QIcon.Off, QIcon.On]: # Off = 1, On = 0, apparently!
## # some debug code that might be useful later:
## ## pixmap = iconset.pixmap(size, mode, state) # it reuses the same pixmap for both small and large!!! how?
## ## generated = iconset.isGenerated(size, mode, state) # only the size 1, state 1 (Small Off) says it's not generated
## ## print "iconset pixmap for size %r, mode %r, state %r (generated? %r) is %r" % \
## ## (size, mode, state, generated, pixmap)
## pixmap = iconset.pixmap(size, mode, state)
## pixmap = QPixmap(pixmap) # copy it ###k this might not be working; and besides they might be copy-on-write
## ## print pixmap # verify unique address
## iconset.setPixmap(pixmap, size, mode, state)
# now modify the On pixmap; assume we own it
state = QIcon.On
pixmap = iconset.pixmap(size, mode, state)
#e should use QPainter.drawPixmap or some other way to get a real checkmark and add it,
# but for initial test this is easiest and will work: copy some of this color into middle of black.
# Warning: "size" localvar is in use as a loop iterator!
psize = pixmap.width(), pixmap.height() #k guess
w,h = psize
## from utilities import debug_flags
## if debug_flags.atom_debug:
## print "atom_debug: pixmap(%s,%s,%s) size == %d,%d" % (size, mode, state, w,h)
from PyQt4.Qt import copyBlt
if checkmark:
if use_color is None:
use_color = contrasting_color( qcolor_from_anything(color))
pixmap2 = pixmap_from_color_and_size( use_color, psize)
for x,y in [(-2,0),(-1,1),(0,2),(1,1),(2,0),(3,-1),(4,-2)]:
# this imitates Qt's checkmark on Mac; is there an official source?
# (it might be more portable to grab pixels from a widget or draw a QCheckListItem into a pixmap #e)
x1,y1 = x + w/2 - 1, y + h/2 - 1
copyBlt( pixmap, x1,y1, pixmap2, x1,y1, 1,3 )
iconset.setPixmap(pixmap, size, mode, state) # test shows re-storing it is required (guess: copy on write)
else:
if use_color is None:
use_color = contrasting_color( qcolor_from_anything(color), notwhite = True)
pixmap2 = pixmap_from_color_and_size( use_color, psize)
###e needs to choose white if actual color is too dark (for a checkmark)
# or something diff than both black and white (for an outline, like we have now)
copyBlt( pixmap2, 2,2, pixmap, 2,2, w-4, h-4 )
# args: dest, dx, dy, source, sx, sy, w,h. Doc hints that overwriting edges might crash.
iconset.setPixmap(pixmap2, size, mode, state)
# note: when I had a bug which made pixmap2 too small (size 1x1), copyBlt onto it didn't crash,
# and setPixmap placed it into the middle of a white background.
pass
pass
return # from modify_iconset_On_states
# ==
#bruce 060124 changes: always called, but gets passed debug_flags.atom_debug as an arg to filter the prefs,
# and has new API to return a list of menu items (perhaps empty) rather than exactly one.
from utilities import debug_flags
def debug_prefs_menuspec( atom_debug): #bruce 080312 split this up
"""
Return the debug_prefs section for the debug menu, as a list of zero or more
menu items or submenus (as menu_spec tuples or lists).
"""
# first exercise all our own debug_prefs, then get the sorted list
# of all known ones.
if debug_flags.atom_debug: #bruce 050808 (it's ok that this is not the atom_debug argument)
testcolor = debug_pref("atom_debug test color", ColorType(green))
max_menu_length = debug_pref(
"(max debug prefs submenu length?)", #bruce 080215
# initial paren or space puts it at start of menu
Choice([10, 20, 30, 40, 50, 60, 70], defaultValue = 20),
#bruce 080317 changed default from 40 to 20, and revised order
non_debug = True, #bruce 080317
prefs_key = "A10/max debug prefs submenu length" # revised, bruce 080317
)
non_default_submenu = True # could be enabled by its own debug_pref if desired
items = [(name.lower(), name, pref) for name, pref in debug_prefs.items()]
# use name.lower() as sortkey, in this function and some of the subfunctions;
# name is included to disambiguate cases where sortkey is identical
items.sort()
# then let each subsection process this in its own way.
if non_default_submenu:
part1 = _non_default_debug_prefs_menuspec( items )
else:
part1 = []
part2 = _main_debug_prefs_menuspec( items, max_menu_length, atom_debug)
return part1 + part2
def _non_default_debug_prefs_menuspec( items): #bruce 080312 added this
"""
[private helper for debug_prefs_menuspec]
Return a submenu for the debug_prefs currently set to a non-default value,
if there are any. Items have a private format coming from our caller.
"""
non_default_items = [item for item in items if item[-1].current_value_is_not_default()]
if not non_default_items:
return []
submenu = [pref.changer_menuspec() for (sortkey_junk, name_junk, pref) in non_default_items]
text = "debug prefs currently set (%d)" % len(submenu)
return [ (text, submenu) ]
def _main_debug_prefs_menuspec( items, max_menu_length, atom_debug):
"""
[private helper for debug_prefs_menuspec]
Return a list of zero or more menu items or submenus (as menu_spec tuples or lists)
usable to see and edit settings of all active debug prefs (for atom_debug true)
or all the ones that have their non_debug flag set (for atom_debug false).
Items have a private format coming from our caller.
"""
items_wanted = []
# will become a list of (sortkey, menuspec) pairs
for sortkey, name_junk, pref in items:
if pref.non_debug or atom_debug:
items_wanted.append( (sortkey, pref.changer_menuspec()) )
# note: sortkey is not used below for sorting (that
# happened in caller), but is used for determining ranges.
# print name_junk, to see the list for determining good ranges below
if not items_wanted:
if atom_debug:
return [ ( "debug prefs submenu", noop, "disabled" ) ]
else:
return []
elif len(items_wanted) <= max_menu_length:
submenu = [menuspec for sortkey, menuspec in items_wanted]
return [ ( "debug prefs submenu", submenu) ]
else:
# split the menu into smaller sections;
# use the first splitting scheme of the following which works
# (or the last one even if it doesn't work)
schemes = [
# these ranges were made by evenly splitting 62 debug prefs
# (there are a lot each of d, e, g, u, and a bunch each of c, m, p, t)
("a-e", "f-z"),
("a-d", "e-j", "k-z"),
("a-c", "d-e", "f-m", "n-z")
]
best_so_far = None
for scheme in schemes:
# split according to scheme, then stop if it's good enough
pointer = 0
res = []
good_enough = True # set to False if any submenu is still too long
total = 0 # count items, for an assert
for range in scheme:
# grab the remaining ones up to the end of this range
one_submenu = []
while (pointer < len(items_wanted) and
(range == scheme[-1] or
items_wanted[pointer][0][0] <= range[-1])):
menuspec = items_wanted[pointer][1]
pointer += 1
one_submenu.append( menuspec )
# and put them into a submenu labelled with the range
good_enough = good_enough and len(one_submenu) <= max_menu_length
total += len(one_submenu)
res.append( ("debug prefs (%s)" % range, one_submenu) )
# (even if one_submenu is empty)
continue # next range
assert total == len(items_wanted)
assert len(res) == len(scheme) # revise if we drop empty ones
best_so_far = res
if good_enough:
break
continue
# good enough, or as good as we can get
assert best_so_far
return best_so_far
pass
# ==
# specific debug_pref exerciser/access functions can go here,
# if they need to be imported early during startup or by several source files
# [this location is deprecated -- use GlobalPreferences.py instead. --bruce 080215]
def _permit_property_pane():
"""
should we set up this session to look a bit worse,
but permit experimental property pane code to be used?
"""
return debug_pref("property pane debug pref offered? (next session)",
Choice_boolean_False,
non_debug = True,
prefs_key = "A8 devel/permit property pane")
_this_session_permit_property_pane = 'unknown' # too early to evaluate _permit_property_pane() during this import
def this_session_permit_property_pane():
"""
this has to give the same answer throughout one session
"""
global _this_session_permit_property_pane
if _this_session_permit_property_pane == 'unknown':
_this_session_permit_property_pane = _permit_property_pane()
if _this_session_permit_property_pane:
_use_property_pane() # get it into the menu right away, otherwise we can't change it until it's too late
return _this_session_permit_property_pane
def _use_property_pane():
return debug_pref("property pane (for HJ dialog)?", Choice_boolean_False, non_debug = True,
prefs_key = "A8 devel/use property pane")
def use_property_pane():
"""
should we actually use a property pane?
(only works if set before the first time a participating dialog is used)
(only offered in debug menu if a property pane is permitted this session)
"""
return this_session_permit_property_pane() and _use_property_pane()
def debug_pref_History_print_every_selected_object(): #bruce 070504
res = debug_pref("History: print every selected object?",
Choice_boolean_False,
non_debug = True,
prefs_key = "A9/History/print every selected object?"
)
return res
# == test code
if __name__ == '__main__':
spinsign = debug_pref("spinsign",Choice([1,-1]))
testcolor = debug_pref("test color", ColorType(green))
print debug_prefs_menuspec(True)
# end
| NanoCAD-master | cad/src/utilities/debug_prefs.py |
# Copyright 2004-2007 Nanorex, Inc. See LICENSE file for details.
"""
Utilities for formatting various data types as strings.
@author: Eric Messick
@version: $Id$
@copyright: 2004-2007 Nanorex, Inc. See LICENSE file for details.
@license: GPL
"""
def Vector3ToString(v):
"""
Print a 3-vector surrounded by angle brackets: '<1, 2, 3>'
Used for povray colors.
"""
return "<" + str(v[0]) + "," + str(v[1]) + "," + str(v[2]) + ">"
| NanoCAD-master | cad/src/utilities/Printing.py |
NanoCAD-master | cad/src/cnt/__init__.py |
|
# Copyright 2004-2008 Nanorex, Inc. See LICENSE file for details.
"""
NanotubeParameters.py -- Generates Nanotube from parameters.
@author: Mark Sims
@version: $Id$
@copyright: 2004-2008 Nanorex, Inc. See LICENSE file for details.
History:
Mark 2008-03-09:
- Created (incorporating some code from Will's older file NanotubeGenerator.py).
"""
import foundation.env as env
from math import sin, cos, pi
from math import atan2
from Numeric import dot, argmax, argmin, sqrt
from model.chem import Atom
from model.bonds import bond_atoms
from model.bond_constants import V_GRAPHITE, V_SINGLE
from model.bond_constants import atoms_are_bonded
from utilities.Log import greenmsg
from utilities.debug import Stopwatch
from geometry.VQT import Q, V, angleBetween, cross, vlen, norm
from geometry.geometryUtilities import matrix_putting_axis_at_z
from model.chunk import Chunk
from model.elements import PeriodicTable
from model.bonds import CC_GRAPHITIC_BONDLENGTH, BN_GRAPHITIC_BONDLENGTH
ntTypes = ["Carbon", "Boron Nitride"]
ntEndings = ["Hydrogen", "None"] # "Capped" NIY. "Nitrogen" removed. --mark
ntBondLengths = [CC_GRAPHITIC_BONDLENGTH, BN_GRAPHITIC_BONDLENGTH]
sqrt3 = 3 ** 0.5
# no longer used:
##basepath_ok, basepath = find_plugin_dir("Nanotube")
##if not basepath_ok:
## env.history.message(orangemsg("The cad/plugins/Nanotube directory is missing."))
class NanotubeParameters:
"""
Generates a nanotube from parameters. Supports both Carbon Nanotubes (CNTs)
or Boron Nitride Nanotubes (BNNT).
"""
n = 5
m = 5
type = "Carbon"
endPoint1 = None
endPoint2 = None
endings = "Hydrogen" # "Hydrogen" or "None". "Capped" NIY.
zdist = 0.0 # Angstroms
xydist = 0.0 # Angstroms
twist = 0 # Degrees/Angstrom
bend = 0 # Degrees
numwalls = 1 # Single
spacing = 2.46 # Spacing b/w MWNT in Angstroms
def __init__(self):
"""
Constructor. Creates an instance of a Nanotube.
By default, the nanotube is a 5x5 Carbon Nanotube. Use the set methods
to change the nanotube's chirality and type (i.e. Boron Nitride).
"""
self.setBondLength()
self._computeRise() # Assigns default rise value.
self._update()
return
def _update(self):
"""
Private method.
Updates all chirality parameters whenever the following attrs are
changed via their set methods:
- n, m,
- type
- bond_length
"""
n, m = self.getChirality()
type = self.getType()
bond_length = self.getBondLength()
self.maxlen = maxlen = 1.2 * bond_length
self.maxlensq = maxlen**2
x = (n + 0.5 * m) * sqrt3
y = 1.5 * m
angle = atan2(y, x)
twoPiRoverA = (x**2 + y**2) ** .5
AoverR = (2 * pi) / twoPiRoverA
self.__cos = cos(angle)
self.__sin = sin(angle)
# time to get the constants
s, t = self.x1y1(0,0)
u, v = self.x1y1(1./3, 1./3)
w, x = self.x1y1(0,1)
F = (t - v)**2
G = 2 * (1 - cos(AoverR * (s - u)))
H = (v - x)**2
J = 2 * (1 - cos(AoverR * (u - w)))
denom = F * J - G * H
self.R = (bond_length**2 * (F - H) / denom) ** .5
self.B = (bond_length**2 * (J - G) / denom) ** .5
self.A = self.R * AoverR
if 0:
print "--------------"
print "angle =", angle
print "A =", self.A
print "B =", self.B
print "R =", self.R
def x1y1(self, n, m):
c, s = self.__cos, self.__sin
x = (n + .5*m) * sqrt3
y = 1.5 * m
x1 = x * c + y * s
y1 = -x * s + y * c
return (x1, y1)
def mlimits(self, z3min, z3max, n):
if z3max < z3min:
z3min, z3max = z3max, z3min
B, c, s = self.B, self.__cos, self.__sin
P = sqrt3 * B * s
Q = 1.5 * B * (c - s / sqrt3)
m1, m2 = (z3min + P * n) / Q, (z3max + P * n) / Q
return int(m1-1.5), int(m2+1.5) # REVIEW: should this use intRound?
def xyz(self, n, m):
x1, y1 = self.x1y1(n, m)
x2, y2 = self.A * x1, self.B * y1
R = self.R
x3 = R * sin(x2/R)
y3 = R * cos(x2/R)
z3 = y2
return (x3, y3, z3)
def setChirality(self, n, m):
"""
Set the n,m chiral integers of self.
Two restrictions are maintained:
- n >= 2
- 0 <= m <= n
@param n: chiral integer I{n}
@type n: int
@param m: chiral integer I{m}
@type m: int
@return: The chiral integers n, m.
@rtype: tuple of two ints (n, m).
@warning: n and/or m may be changed to maintain the restrictions.
"""
if n < 2:
n = 2
if m != self.m:
# m changed. If m became larger than n, make n bigger.
if m > n:
n = m
elif n != self.n:
# n changed. If n became smaller than m, make m smaller.
if m > n:
m = n
self.n = n
self.m = m
self._update()
return self.getChirality()
def getChirality(self):
"""
Returns the n,m chirality of self.
@return: n, m
@rtype: int, int
"""
return (self.n, self.m)
def getChiralityN(self):
"""
Returns the n chirality of self.
@return: n
@rtype: int
"""
return self.n
def getChiralityM(self):
"""
Returns the m chirality of self.
@return: m
@rtype: int
"""
return self.m
def setType(self, type):
"""
Sets the type of nanotube.
@param type: the type of nanotube, either "Carbon" or "Boron Nitride"
@type type: string
@warning: This resets the bond length based on type.
"""
assert type in ntTypes
self.type = type
self.setBondLength() # Calls _update().
return
def getType(self):
"""
Return the type of nanotube.
@return: the type of nanotube.
@rtype: string
"""
return self.type
def getRadius(self):
"""
Returns the radius of the nanotube.
@return: The radius in Angstroms.
@rtype: float
"""
return self.R
def getDiameter(self):
"""
Returns the diameter of the nanotube.
@return: The diameter in Angstroms.
@rtype: float
"""
return self.R * 2.0
def setBondLength(self, bond_length = None):
"""
Sets the I{bond length} between two neighboring atoms in self.
@param bond_length: The bond length in Angstroms. If None, it will be
assigned a default value based on the current
nanotube type.
@type bond_length: float
"""
if bond_length:
self.bond_length = bond_length
else:
self.bond_length = ntBondLengths[ntTypes.index(self.type)]
self._update()
return
def getBondLength(self):
"""
Returns the bond length between atoms in the nanotube.
@return: The bond length in Angstroms.
@rtype: float
"""
return self.bond_length
def setEndings(self, endings):
"""
Sets the type of I{endings} of the nanotube self.
@param endings: Either "Hydrogen" or "None".
@type endings: string
@note: "Capped" endings are not implemented yet.
"""
assert endings in ntEndings
self.endings = endings
def getEndings(self):
"""
Returns the type of I{endings} of the nanotube self.
@return: Either "Hydrogen" or "None".
@rtype : string
@note: "Capped" endings are not implemented yet.
"""
return self.endings
def setEndPoints(self, endPoint1, endPoint2, trimEndPoint2 = False):
"""
Sets endpoints to I{endPoint1} and I{endPoint2}.
@param endPoint1: point
@type endPoint1: V
@param endPoint2: point
@type endPoint2: V
@param trimEndPoint2: If true, endPoint2 will be trimmed to a point in
which the length of the nanotube is an integral
of the nanotube rise. This is not implemented yet.
@type trimEndPoint2: boolean
@attention: trimEndPoint2 argument is ignored (NIY).
"""
# See drawNanotubeLadder() for math needed to implement trimEndPoint2.
self.endPoint1 = + endPoint1
self.endPoint2 = + endPoint2
return
def getEndPoints(self):
"""
Return endpoints.
"""
return (self.endPoint1, self.endPoint2)
def getParameters(self):
"""
Returns all the parameters needed to (re) build the nanotube using
build().
@return: The parameters of the nanotube segment.
These parameters are retreived via
L{NanotubeSegment.getProps()}, called from
L{EditNanotube_EditCommand.editStructure()}.
Parameters:
- n, m (chirality)
- type (i.e. carbon or boron nitride)
- endings (none, hydrogen, nitrogen)
- endpoints (endPoint1, endPoint2)
@rtype: list (n, m), type, endings, (endPoint1, endPoint2)
"""
return (self.getChirality(),
self.getType(),
self.getEndings(),
self.getEndPoints())
def computeEndPointsFromChunk(self, chunk, update = True):
"""
Derives and returns the endpoints and radius of a nanotube chunk.
@param chunk: a nanotube chunk
@type chunk: Chunk
@return: endPoint1, endPoint2 and radius
@rtype: Point, Point and float
@note: computing the endpoints works fine when n=m or m=0. Otherwise,
the endpoints can be slightly off the central axis, especially
if the nanotube is short.
@attention: endPoint1 and endPoint2 may not be the original endpoints,
and they may be flipped (opposites of) the original
endpoints.
"""
# Since chunk.axis is not always one of the vectors chunk.evecs
# (actually chunk.poly_evals_evecs_axis[2]), it's best to just use
# the axis and center, then recompute a bounding cylinder.
if not chunk.atoms:
return None
axis = chunk.axis
axis = norm(axis) # needed
center = chunk._get_center()
points = chunk.atpos - center # not sure if basepos points are already centered
# compare following Numeric Python code to findAtomUnderMouse and its caller
matrix = matrix_putting_axis_at_z(axis)
v = dot( points, matrix)
# compute xy distances-squared between axis line and atom centers
r_xy_2 = v[:,0]**2 + v[:,1]**2
# to get radius, take maximum -- not sure if max(r_xy_2) would use Numeric code, but this will for sure:
i = argmax(r_xy_2)
max_xy_2 = r_xy_2[i]
radius = sqrt(max_xy_2)
# to get limits along axis (since we won't assume center is centered between them), use min/max z:
z = v[:,2]
min_z = z[argmin(z)]
max_z = z[argmax(z)]
# Adjust the endpoints such that the ladder rungs (rings) will fall
# on the ring segments.
# TO DO: Fix drawNanotubeLadder() to offset the first ring, then I can
# remove this adjustment. --Mark 2008-04-12
z_adjust = self.getEndPointZOffset()
min_z += z_adjust
max_z -= z_adjust
endpoint1 = center + min_z * axis
endpoint2 = center + max_z * axis
if update:
#print "Original endpoints:", self.getEndPoints()
self.setEndPoints(endpoint1, endpoint2)
#print "New endpoints:", self.getEndPoints()
return (endpoint1, endpoint2, radius)
def getEndPointZOffset(self):
"""
Returns the z offset, determined by the endings.
@note: Offset distances are not exact, but approximated, which is good
in this case. Providing exact offset values will result in the last
ladder ring from being drawn by drawNanotubeLadder().
"""
endings = self.getEndings()
if endings == "Hydrogen":
return 0.8
elif endings == "Nitrogen":
# Nitrogen endings option removed from PM. 2008-05-02 --Mark
return 1.1
else:
return 0.5
def _computeRise(self): #@ See Python get/set attr builtin methods.
"""
Private method.
Sets the rise. This needs to be called anytime a parameter of self
changes.
This is primarlity used for determining the distance between ladder
rungs when drawing the nanotube ladder, during interactive drawing.
@attention: The computed rise is a hack. Feel free to fix.
"""
# Need formula to compute rise.
# I'm sure this is doable, but I need to research it further to learn
# how to compute rise from these params. --Mark 2008-03-12
self.rise = 2.5 # default
if self.m == 0:
self.rise = 2.146
if self.m == 5:
self.rise = 2.457
def getRise(self):
"""
Returns the nanotube U{rise}.
This is primarlity used for determining the distance between ladder
rungs when drawing the nanotube ladder, during interactive drawing.
@return: The rise in Angstroms.
@rtype: float
"""
return self.rise
def getLengthFromNumberOfCells(self, numberOfCells):
"""
Returns the nanotube length (in Angstroms) given the number of cells.
@param numberOfCells: The number of cells in the nanotube.
@type numberOfCells: int
@return: The length of the nanotube in Angstroms.
@rtype: float
"""
assert numberOfCells >= 0
return self.rise * (numberOfCells - 1)
def getLength(self):
"""
Returns the length of the nanotube.
"""
endPoint1, endPoint2 = self.getEndPoints()
return vlen(endPoint1 - endPoint2)
def populate(self, mol, length):
"""
Populates a chunk (mol) with the atoms.
"""
def add(element, x, y, z, atomtype='sp2'):
atm = Atom(element, V(x, y, z), mol)
if element == "C":
atm.set_atomtype_but_dont_revise_singlets(atomtype)
return atm
evenAtomDict = { }
oddAtomDict = { }
bondDict = { }
mfirst = [ ]
mlast = [ ]
for n in range(self.n):
mmin, mmax = self.mlimits(-.5 * length, .5 * length, n)
mfirst.append(mmin)
mlast.append(mmax)
for m in range(mmin, mmax+1):
x, y, z = self.xyz(n, m)
if self.type == "Carbon":
atm = add("C", x, y, z) # CNT
else:
atm = add("B", x, y, z) # BNNT
evenAtomDict[(n,m)] = atm
bondDict[atm] = [(n,m)]
x, y, z = self.xyz(n + 1.0 / 3, m + 1.0 / 3 )
if self.type == "Carbon":
atm = add("C", x, y, z) # CNT
else:
atm = add("N", x, y, z, 'sp3') # BNNT
oddAtomDict[(n,m)] = atm
bondDict[atm] = [(n + 1, m), (n, m + 1)]
# m goes axially along the nanotube, n spirals around the tube
# like a barber pole, with slope depending on chirality. If we
# stopped making bonds now, there'd be a spiral strip of
# missing bonds between the n=self.n-1 row and the n=0 row.
# So we need to connect those. We don't know how the m values
# will line up, so the first time, we need to just hunt for the
# m offset. But then we can apply that constant m offset to the
# remaining atoms along the strip.
n = self.n - 1
mmid = (mfirst[n] + mlast[n]) / 2
atm = oddAtomDict[(n, mmid)]
class FoundMOffset(Exception): pass
try:
for m2 in range(mfirst[0], mlast[0] + 1):
atm2 = evenAtomDict[(0, m2)]
diff = atm.posn() - atm2.posn()
if dot(diff, diff) < self.maxlensq:
moffset = m2 - mmid
# Given the offset, zipping up the rows is easy.
for m in range(mfirst[n], mlast[n]+1):
atm = oddAtomDict[(n, m)]
bondDict[atm].append((0, m + moffset))
raise FoundMOffset()
# If we get to this point, we never found m offset.
# If this ever happens, it indicates a bug.
raise Exception, "can't find m offset"
except FoundMOffset:
pass
# Use the bond information to bond the atoms
for (dict1, dict2) in [(evenAtomDict, oddAtomDict),
(oddAtomDict, evenAtomDict)]:
for n, m in dict1.keys():
atm = dict1[(n, m)]
for n2, m2 in bondDict[atm]:
try:
atm2 = dict2[(n2, m2)]
if not atoms_are_bonded(atm, atm2):
if self.type == "Carbon":
bond_atoms(atm, atm2, V_GRAPHITE) # CNT
else:
bond_atoms(atm, atm2, V_SINGLE) # BNNT
except KeyError:
pass
def build(self, name, assy, position, mol = None, createPrinted = False):
"""
Build a nanotube from the parameters in the Property Manger dialog.
"""
endPoint1, endPoint2 = self.getEndPoints()
cntAxis = endPoint2 - endPoint1
length = vlen(cntAxis)
# This can take a few seconds. Inform the user.
# 100 is a guess. --Mark 051103.
if not createPrinted:
# If it's a multi-wall tube, only print the "Creating" message once.
if length > 100.0:
env.history.message("This may take a moment...")
PROFILE = False
if PROFILE:
sw = Stopwatch()
sw.start()
xyz = self.xyz
if mol == None:
mol = Chunk(assy, name)
atoms = mol.atoms
mlimits = self.mlimits
# populate the tube with some extra carbons on the ends
# so that we can trim them later
self.populate(mol, length + 4 * self.maxlen)
# Apply twist and distortions. Bends probably would come
# after this point because they change the direction for the
# length. I'm worried about Z distortion because it will work
# OK for stretching, but with compression it can fail. BTW,
# "Z distortion" is a misnomer, we're stretching in the Y
# direction.
for atm in atoms.values():
# twist
x, y, z = atm.posn()
twistRadians = self.twist * z
c, s = cos(twistRadians), sin(twistRadians)
x, y = x * c + y * s, -x * s + y * c
atm.setposn(V(x, y, z))
for atm in atoms.values():
# z distortion
x, y, z = atm.posn()
z *= (self.zdist + length) / length
atm.setposn(V(x, y, z))
length += self.zdist
for atm in atoms.values():
# xy distortion
x, y, z = atm.posn()
radius = self.getRadius()
x *= (radius + 0.5 * self.xydist) / radius
y *= (radius - 0.5 * self.xydist) / radius
atm.setposn(V(x, y, z))
# Judgement call: because we're discarding carbons with funky
# valences, we will necessarily get slightly more ragged edges
# on nanotubes. This is a parameter we can fiddle with to
# adjust the length. My thought is that users would prefer a
# little extra length, because it's fairly easy to trim the
# ends, but much harder to add new atoms on the end.
LENGTH_TWEAK = self.getBondLength()
# trim all the carbons that fall outside our desired length
# by doing this, we are introducing new singlets
for atm in atoms.values():
x, y, z = atm.posn()
if (z > .5 * (length + LENGTH_TWEAK) or
z < -.5 * (length + LENGTH_TWEAK)):
atm.kill()
# Apply bend. Equations are anomalous for zero bend.
if abs(self.bend) > pi / 360:
R = length / self.bend
for atm in atoms.values():
x, y, z = atm.posn()
theta = z / R
x, z = R - (R - x) * cos(theta), (R - x) * sin(theta)
atm.setposn(V(x, y, z))
def trimCarbons():
"""
Trim all the carbons that only have one carbon neighbor.
"""
for i in range(2):
for atm in atoms.values():
if not atm.is_singlet() and len(atm.realNeighbors()) == 1:
atm.kill()
trimCarbons()
# If we're not picky about endings, we don't need to trim carbons
if self.endings == "Capped":
# buckyball endcaps
addEndcap(mol, length, self.getRadius())
if self.endings == "Hydrogen":
# hydrogen terminations
for atm in atoms.values():
atm.Hydrogenate()
elif self.endings == "Nitrogen":
# nitrogen terminations.
# This option has been removed from the "Endings" combo box
# in the PM. 2008-05-02 --mark
dstElem = PeriodicTable.getElement('N')
atomtype = dstElem.find_atomtype('sp2')
for atm in atoms.values():
if len(atm.realNeighbors()) == 2:
atm.Transmute(dstElem, force=True, atomtype=atomtype)
# Translate structure to desired position
for atm in atoms.values():
v = atm.posn()
atm.setposn(v + position)
if PROFILE:
t = sw.now()
env.history.message(greenmsg("%g seconds to build %d atoms" %
(t, len(atoms.values()))))
if self.numwalls > 1:
n += int(self.spacing * 3 + 0.5) # empirical tinkering
self.build(name, assy,
endPoint1, endPoint2,
position,
mol = mol, createPrinted = True)
# Orient the nanotube.
if self.numwalls == 1:
# This condition ensures that MWCTs get oriented only once.
self._orient(mol, endPoint1, endPoint2)
return mol
pass # End build()
def _postProcess(self, cntCellList):
pass
def _orient(self, cntChunk, pt1, pt2):
"""
Orients the CNT I{cntChunk} based on two points. I{pt1} is
the first endpoint (origin) of the nanotube. The vector I{pt1}, I{pt2}
defines the direction and central axis of the nanotube.
@param pt1: The starting endpoint (origin) of the nanotube.
@type pt1: L{V}
@param pt2: The second point of a vector defining the direction
and central axis of the nanotube.
@type pt2: L{V}
"""
a = V(0.0, 0.0, -1.0)
# <a> is the unit vector pointing down the center axis of the default
# DNA structure which is aligned along the Z axis.
bLine = pt2 - pt1
bLength = vlen(bLine)
b = bLine/bLength
# <b> is the unit vector parallel to the line (i.e. pt1, pt2).
axis = cross(a, b)
# <axis> is the axis of rotation.
theta = angleBetween(a, b)
# <theta> is the angle (in degress) to rotate about <axis>.
scalar = bLength * 0.5
rawOffset = b * scalar
if 0: # Debugging code.
print ""
print "uVector a = ", a
print "uVector b = ", b
print "cross(a,b) =", axis
print "theta =", theta
print "cntRise =", self.getCntRise()
print "# of cells =", self.getNumberOfCells()
print "scalar =", scalar
print "rawOffset =", rawOffset
if theta == 0.0 or theta == 180.0:
axis = V(0, 1, 0)
# print "Now cross(a,b) =", axis
rot = (pi / 180.0) * theta # Convert to radians
qrot = Q(axis, rot) # Quat for rotation delta.
# Move and rotate the nanotube into final orientation.
cntChunk.move(qrot.rot(cntChunk.center) - cntChunk.center + rawOffset + pt1)
cntChunk.rot(qrot)
# Bruce suggested I add this. It works here, but not if its
# before move() and rot() above. Mark 2008-04-11
cntChunk.full_inval_and_update()
return
# override abstract method of DataMixin
def _copyOfObject(self):
"""
Create and return a copy of nanotube.
"""
nanotube = NanotubeParameters()
nanotube.setChirality(self.n, self.m)
nanotube.setType(self.type)
nanotube.setEndings(self.endings)
nanotube.setEndPoints(self.endPoint1, self.endPoint2)
return nanotube
# override abstract method of DataMixin
def __eq__(self, other):
"""
Compare self with other.
"""
if not isinstance(other, self.__class__):
return False
if self.n != other.n:
return False
elif self.m != other.m:
return False
elif self.n != other.n:
return False
elif self.type != other.type:
return False
elif self.endings != other.endings:
return False
# Note: Numeric arrays can be safely compared using !=, but not ==.
elif self.endPoint1 != other.endPoint1:
return False
elif self.endPoint2 != other.endPoint2:
return False
else:
return True
pass
pass
| NanoCAD-master | cad/src/cnt/model/NanotubeParameters.py |
NanoCAD-master | cad/src/cnt/model/__init__.py |
|
# Copyright 2007-2008 Nanorex, Inc. See LICENSE file for details.
"""
NanotubeSegment.py - ...
@author: Mark
@version: $Id$
@copyright: 2007-2008 Nanorex, Inc. See LICENSE file for details.
Note: related to DnaStrandOrSegment, from which it was copied and modified.
"""
import foundation.env as env
from utilities.debug import print_compact_stack, print_compact_traceback
from model.chunk import Chunk
from model.chem import Atom
from model.bonds import Bond
from geometry.VQT import V, norm, vlen
from utilities.icon_utilities import imagename_to_pixmap
from utilities.Comparison import same_vals
def getAllNanotubeSegmentsInPart(assy):
"""
@return: a list of all NanotubeSegments in the part.
"""
selNanotubeSegmentList = []
def addSelectedNanotubeSegment(obj, ntSegmentList = selNanotubeSegmentList):
if isinstance(obj, NanotubeSegment):
ntSegmentList += [obj]
return
assy.part.topnode.apply2all(addSelectedNanotubeSegment)
return selNanotubeSegmentList
from foundation.LeafLikeGroup import LeafLikeGroup
_superclass = LeafLikeGroup
class NanotubeSegment(LeafLikeGroup):
"""
Model object which represents a Nanotube Segment inside a Nanotube Group.
Internally, this is just a specialized Group containing a single chunk,
itself containing all the atoms of a nanotube.
"""
# This should be a tuple of classifications that appear in
# files_mmp._GROUP_CLASSIFICATIONS, most general first.
# See comment in class Group for more info. [bruce 080115]
_mmp_group_classifications = ('NanotubeSegment',)
nanotube = None
_endPoint1 = None
_endPoint2 = None
# TODO: undo or copy code for those attrs,
# and updating them when the underlying structure changes.
# But maybe that won't be needed, if they are replaced
# by computing them from the atom geometry as needed.
# [bruce 080227 comment]
autodelete_when_empty = True
# (but only if current command permits that for this class --
# see comment near Group.autodelete_when_empty for more info,
# and implems of Command.keep_empty_group)
iconPath = "ui/modeltree/NanotubeSegment.png"
hide_iconPath = "ui/modeltree/NanotubeSegment-hide.png"
# This partially fixes bug 2914. Copying now works, but the following
# "warning" is printed to stdout:
# ****************** needs _copyOfObject: <cnt.model.Nanotube.Nanotube instance at 0x164FC030>
# I'm guessing this means that we need to override abstract method
# _copyOfObject() of DataMixin, but I'd like to discuss this with Bruce first.
# I also have confirmed that there is still a bug when editing the
# copied nanotube (it will automatically move from the clipboard
# to the part after it is resized).
# Mark 2008-07-09.
copyable_attrs = _superclass.copyable_attrs + ('nanotube',)
def writemmp_other_info_opengroup(self, mapping): #bruce 080507 refactoring
"""
"""
#bruce 080507 refactoring (split this out of Group.writemmp)
# (I think the following condition is always true, but I didn't
# prove this just now, so I left in the test for now.)
encoded_classifications = self._encoded_classifications()
if encoded_classifications == "NanotubeSegment":
# This is a nanotube segment, so write the parameters into an info
# record so we can read and restore them in the next session.
# --Mark 2008-04-12
assert self.nanotube
mapping.write("info opengroup nanotube-parameters = %d, %d, %s, %s\n" \
% (self.nanotube.getChiralityN(),
self.nanotube.getChiralityM(),
self.nanotube.getType(),
self.nanotube.getEndings()))
pass
return
def readmmp_info_opengroup_setitem( self, key, val, interp ):
"""
[extends superclass method]
"""
#bruce 080507 refactoring (split this out of the superclass method)
if key == ['nanotube-parameters']:
# val includes all the parameters, separated by commas.
n, m, type, endings = val.split(",")
self.n = int(n)
self.m = int(m)
self.type = type.lstrip()
self.endings = endings.lstrip()
# Create the nanotube.
from cnt.model.NanotubeParameters import NanotubeParameters
self.nanotube = NanotubeParameters() # Returns a 5x5 CNT.
self.nanotube.setChirality(self.n, self.m)
self.nanotube.setType(self.type)
self.nanotube.setEndings(self.endings)
# The endpoints are recomputed every time it is edited.
else:
_superclass.readmmp_info_opengroup_setitem( self, key, val, interp)
return
def edit(self):
"""
Edit this NanotubeSegment.
@see: EditNanotube_EditCommand
"""
commandSequencer = self.assy.w.commandSequencer
commandSequencer.userEnterCommand('EDIT_NANOTUBE')
assert commandSequencer.currentCommand.commandName == 'EDIT_NANOTUBE'
commandSequencer.currentCommand.editStructure(self)
return
def getAxisVector(self, atomAtVectorOrigin = None):
"""
Returns the unit axis vector of the segment (vector between two axis
end points)
"""
# REVIEW: use common code for this method? [bruce 081217 comment]
endPoint1, endPoint2 = self.nanotube.getEndPoints()
if endPoint1 is None or endPoint2 is None:
return V(0, 0, 0)
#@see: RotateAboutAPoint command. The following code is disabled
#as it has bugs (not debugged but could be in
#self.nanotube.getEndPoints). So, rotate about a point won't work for
#rotating a nanotube. -- Ninad 2008-05-13
##if atomAtVectorOrigin is not None:
###If atomAtVectorOrigin is specified, we will return a vector that
###starts at this atom and ends at endPoint1 or endPoint2 .
###Which endPoint to choose will be dicided by the distance between
###atomAtVectorOrigin and the respective endPoints. (will choose the
###frthest endPoint
##origin = atomAtVectorOrigin.posn()
##if vlen(endPoint2 - origin ) > vlen(endPoint1 - origin):
##return norm(endPoint2 - endPoint1)
##else:
##return norm(endPoint1 - endPoint2)
return norm(endPoint2 - endPoint1)
def setProps(self, props):
"""
Sets some properties. These will be used while editing the structure.
(but if the structure is read from an mmp file, this won't work. As a
fall back, it returns some constant values)
@see: InsertNanotube_EditCommand.createStructure which calls this method.
@see: self.getProps, EditNanotube_EditCommand.editStructure
"""
(_n, _m), _type, _endings, (_endPoint1, _endPoint2) = props
from cnt.model.NanotubeParameters import NanotubeParameters
self.nanotube = NanotubeParameters()
self.nanotube.setChirality(_n, _m)
self.nanotube.setType(_type)
self.nanotube.setEndings(_endings)
self.nanotube.setEndPoints(_endPoint1, _endPoint2)
def getProps(self):
"""
Returns nanotube parameters necessary for editing.
@see: EditNanotube_EditCommand.editStructure where it is used.
@see: EditNanotube_PropertyManager.getParameters
@see: NanotubeSegmentEditCommand._createStructure
"""
# Recompute the endpoints in case this nanotube was read from
# MMP file (which means this nanotube doesn't have endpoint
# parameters yet).
self.nanotube.computeEndPointsFromChunk(self.members[0])
return self.nanotube.getParameters()
def isAncestorOf(self, obj):
"""
Checks whether the object <obj> is contained within the NanotubeSegment
Example: If the object is an Atom, it checks whether the
atom's chunk is a member of this NanotubeSegment (chunk.dad is self)
It also considers all the logical contents of the NanotubeSegment to determine
whether self is an ancestor. (returns True even for logical contents)
@see: self.get_all_content_chunks() (inherited from LeafLikeGroup)
@see: EditNanotube_GraphicsMode.leftDrag
"""
# TODO: this needs cleanup (it looks like it's made of two alternative
# implems, one after the other), generalization (to use some centrally
# defined notion of logical contents), and optimization. Also, if it
# is still defined in more than one class, common code should be used.
# [bruce 080507/081217 comment]
c = None
if isinstance(obj, Atom):
c = obj.molecule
elif isinstance(obj, Bond):
chunk1 = obj.atom1.molecule
chunk2 = obj.atom1.molecule
if chunk1 is chunk2:
c = chunk1
elif isinstance(obj, Chunk):
c = obj
if c is not None:
if c in self.get_all_content_chunks():
# review: could optimize by (c.dad is self), at least in this class
# [bruce 081217 comment]
return True
#NOTE: Need to check if the isinstance checks are acceptable (apparently
#don't add any import cycle)
if isinstance(obj, Atom):
chunk = obj.molecule
if chunk.dad is self:
return True
else:
return False
elif isinstance(obj, Bond):
chunk1 = obj.atom1.molecule
chunk2 = obj.atom1.molecule
if (chunk1.dad is self) or (chunk2.dad is self):
return True
elif isinstance(obj, Chunk):
if obj.dad is self:
return True
return False
def node_icon(self, display_prefs):
# REVIEW: use common code for this method? [bruce 081217 comment]
del display_prefs
if self.all_content_is_hidden():
return imagename_to_pixmap( self.hide_iconPath)
else:
return imagename_to_pixmap( self.iconPath)
def permit_as_member(self, node, pre_updaters = True, **opts):
"""
[friend method for enforce_permitted_members_in_groups and subroutines]
Does self permit node as a direct member,
when called from enforce_permitted_members_in_groups with
the same options as we are passed?
@rtype: boolean
[extends superclass method]
"""
# this method was copied from DnaStrandOrSegment and edited for this class
if not LeafLikeGroup.permit_as_member(self, node, pre_updaters, **opts):
# reject if superclass would reject [bruce 081217]
return False
del opts
assy = self.assy
res = isinstance( node, assy.Chunk) #@ NEEDS SOMETHING MORE.
return res
pass # end of class NanotubeSegment
# end
| NanoCAD-master | cad/src/cnt/model/NanotubeSegment.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.