Source code for schrodinger.job.launcher

"""
A class for launching individual executables under job control.

Launcher is a Python wrapper for using the external 'jlaunch' script.
It simplifies the creation of the "startup script" for a script running
under job control.

NOTE: Launcher is designed for use within startup scripts only.  If there is
a script/executable that needs to be submitted under job control by another
script, please file a case to have a startup script written for it; then use
jobcontrol.launch_job() to launch it.

Copyright Schrodinger, LLC. All rights reserved.

"""

import base64
import logging
import os

import schrodinger.job.jobcontrol as jobcontrol
from schrodinger.utils import mmutil
from schrodinger.utils import subprocess
from schrodinger.infra import mmjob

_version = "$Revision: 1.78 $"

_logger = logging.getLogger("schrodinger.job.launcher")
try:
    import schrodinger.utils.log
    schrodinger.utils.log.default_logging_config()
except:
    pass

# Full path to the $SCHRODINGER/run on the launch host:
LOCAL_RUN = os.path.join(os.environ['SCHRODINGER'], 'run')


[docs]class Launcher: """ Run a program (typically a Python or shell script) under job control. The parameters describing the job are specified as attributes of the launcher object. The launch() method translates the job parameters into commandline options for the jlaunch script, and executes that script. If the job is launched successfully, a job object is returned. The getCommandArgs() method returns the jlaunch command without executing the job. NOTE: Launcher is designed for use within startup scripts only. """
[docs] def __init__( self, script, jobname=None, copyscript=False, usehostargs=None, runtoplevel=False, host=None, local=False, user=None, disp=None, proj=None, save=None, prog=None, nice=None, debugger=None, tmpdir=None, wait=False, no_redirect=False, localdriver=False, nproc=None, interval=None, loginterval=None, expandvars=None, viewname=None, oplsdir=None, product=None, stoppable=False, ): """ :param script: The remote path to the script or program to run (the only required argument). Used by job control to locate the script on the remote host (which will be same as the launch host if host="localhost" or no_redirect=True). Example: "$REMOTE_MMSHARE_EXEC/../../python/scripts/prepwizard_driver.py" If the script is located in a $SCHRODINGER/run path, then simply the name of the script can be passed in (e.g. "prepwizard_driver.py"), as $SCHRODINGER/run of the remote host will automatically find the script. If copyscript is True, must be set to the full local path of the script to be copied. :param copyscript: Whether or not to copy the script or program to the job directory, to make it accessible on the compute host (default: False). Useful for allowing non-distributed scripts (e.g. from ScriptCenter) to run on remote hosts. *Keep set to False when running scripts included in the Schrodinger distribution, including toplevel scripts, scripts under mmshare, or any other distributed repository. Avoid the hack of setting <script> to the local path of the script and enabling <copyscript> - instead set <script> to the remote path of the script - ask fellow developers for help if needed.* :param usehostargs: Deprecated and has no effect. :param runtoplevel: Use $SCHRODINGER/run to reset the environment before submitting the script under job control. This is useful if the script that uses Launcher is not invoked with options (e.g., -HOST) appropriate for running the job, such as a script that both starts a GUI and runs the job. (default: False) :param host: Run jobs on the specified host. By default, host specified on the original command-line will be used (JOBHOST env var is read, which is set by the original top-level script). :param local: Run the job in the directory from which it was launched (deprecated) :param user: Run the job as the specified user (deprecated). :param proj: The absolute pathname for the Maestro project to which this job should be assigned. If None, jlaunch will use the project specified on the command line, or by the SCHRODINGER_PROJECT env var. (default: None) :param disp: The "disposition" for this job's outputs; supported settings are "NONE", "NEW", "APPEND", or None. If None, jlaunch will use the disposition specified on the command line or by the SCHRODINGER_JOB_DISPOSITION env var. *THis parameter is only meaningful if the job has been assigned to a project.* (default: None) :param save: Save the temporary job directory after the job completes (default: None). By default, will use what was specified on the command line (toplevel script will parse the -SAVE option and set the SCHRODINGER_SAVE_JOBDIR env var). Set to True or False to force saving or not saving the job directory. :param prog: The name of the program being executed, for example, 'Glide'. This is recorded in the Program field of the job record. (default: None) :param nice: Run the program at reduced priority (default: False). By default, use nice if specified on the command line (toplevel script parses the -NICE option and sets the SCHRODINGER_NICE env var.) :param debugger: Run the job locally under the specified debugger. (default: None) :param tmpdir: The absolute pathname for the directory under which the temporary job directory should be created. (default: None) If set to None, then the usual procedures are followed for getting the tmpdir, that is, the 'tmpdir:' setting in the hosts file, the SCHRODINGER_TMPDIR env var, and the -TMPDIR commandline option. :param wait: Do not return from launch() until the job completes. (default: False) :param no_redirect: Do not run under job control (default: False). Set to True if the user specified the -NOJOBID option. :param localdriver: Submit the main job (driver) to localhost, and use the -HOST command line argument only for any subjobs started by it. This overrides any -DRIVERHOST option on the command line. This is not compatible with 'runtoplevel' mode. (default: False) :param nproc: The number of processors required for a parallel job. When a job is submitted to a queueing system, this many processors will be requested; it has no effect on simple local and remote jobs. For a serial job, this should be set to None. (default: None) :param interval: The monitoring interval, in seconds. This controls how often monitor files are copied back during the job. If monitor files are specified, then nothing is copied back. If set to zero or None, monitoring is turned off altogether. *Monitoring can be very resource-intensive, and so it should only be turned on at user request, through the Maestro monitor panel. This parameter should generally always be left at None.* (default: None) :warning: Monitoring should only be turned on when requested by the user, because it imposes a typically slows down the backend and consumes significant system resources. :param loginterval: The log interval, in seconds. This controls how frequently logfile changes are copied back during the job. If set to None, the standard log interval is used; if set to zero, the log file won't be copied back until the backend finishes. Generally, you should let this be None. (default: None) :param expandvars: Whether to expand environment variable references in the command that's executed to launch the job. If set to None, defer to jobcontrol.launch_job(), which has its own default behavior for this. (default: None) :param oplsdir: Path to the custom opls parameter directory for the -OPLSDIR flag (default: None) :type product: str :param product: Product to pass in to $SCHRODINGER/run - determines what product directory is searched on the remote host. By default it's mmshare. :type stoppable: bool :param stoppable: If True, stopping a job is handled by sending a message to the backend. """ if local and mmutil.feature_flag_is_enabled(mmutil.JOB_SERVER): raise DeprecationWarning( 'Option "local" is deprecated, as it is incompatible with JOB_SERVER' ) # In order to prevent copying a precompiled *.pyc file, and instead # copy the original script, strip the 'c' at the end: # MAY NOT BE NECESSARY ANYMORE if script.endswith(".pyc"): self._script = script[:-1] else: self._script = script # If no job name is specified, set it according to the script name if jobname: self._jobname = jobname else: # no jobname specified, default to "<scriptname>_run" basename = os.path.splitext(os.path.basename(self._script))[0] self._jobname = basename + "_run" self._stdouterr_file = self._jobname + '.log' # This variable should point to the log file that should be listed # FIRST in the job record. This file will be default in Monitor panel. # If None, then the stdout/stderr file will be the default self._default_log_file = None self._scriptargs = [] self._input_files = set() self._additional_log_files = set() self._append_logs = False self._temp_input_files = {} self._force_input_files = [] self._output_files = set() self._req_output_files = set() self._structureoutputfile = None self._structuremonitorfile = None self._host = host self._local = local self._user = user self._disp = disp self._proj = proj self._save = save self._prog = prog self._nice = nice self._oplsdir = oplsdir self._viewname = viewname self._debugger = debugger self._tmpdir = tmpdir self._wait = wait self._no_redirect = no_redirect self._runtoplevel = runtoplevel self._copyscript = copyscript self._usehostargs = usehostargs self._localdriver = localdriver self._nproc = nproc self._interval = interval self._loginterval = loginterval self._expandvars = expandvars self._envs = [] self._licenses = [] self._no_launch = False self._product = product self._stoppable = stoppable
[docs] def addScriptArgs(self, args): """ Specify arguments to add to the command line for the backend when it is executed on the compute node. :param args: A list of arguments. """ if type(args) is str: args = [args] self._scriptargs.extend(args)
[docs] def addInputFile(self, filename): """ Register an input file with job control. This file will be copied to the job directory before the backend is executed, unless the file is already accessible at the registered pathname. *Don't use this method to register the script or program being executed. Use the copyscript option for that.* :param filename: The pathname of the input file. """ self._input_files.add(filename)
[docs] def addTempInputFile(self, filename, name=''): """ Register a temporary input file with job control. These temporary files are stored in the JobDB with the name '<jobid>.TIN<name>.<ext>', where the <name> is a tag specified via the 'name' arg. The 'name' can be omitted for one input file at most. :param filename: The name of the temporary input file. :param name: The tag for this temporary input file. :raise Exception: if either the 'name' or 'filename' has already been added. """ if name in self._temp_input_files: raise Exception("Duplicate temp input file name: " + name) if filename in self._temp_input_files.values(): raise Exception("Duplicate temp input file: " + filename) self._temp_input_files[name] = filename
[docs] def addForceInputFile(self, source_filename, dest_filename=None): """ Register an input file with job control. Job control will copy this file to the job directory regardless of whether it's already accessible on the compute node. :param source_filename: The pathname for the input file on launch node. :param dest_filename: The name given to the copied file. By default, this will be the base name of source_filename. """ if not dest_filename: dest_filename = os.path.basename(source_filename) self._force_input_files.append((source_filename, dest_filename))
[docs] def addOutputFile(self, filename): """ Register an output file with job control so it gets copied back to the launch directory. If the file is not found in the job directory after the job completes, this will be silently ignored. :param filename: The name of the output file. """ self._output_files.add(filename)
[docs] def addRequiredOutputFile(self, filename): """ Register an output file with job control so it gets copied back to the launch directory. If the file does not exist at the end of the job, then job control will mark the job as "died". :param filename: The name of the output file. """ self._req_output_files.add(filename)
[docs] def setStructureOutputFile(self, filename): """ Register a structure output file with job control (i.e., the file that gets incorporated into Maestro upon job completion). This method also registers the file as an output file. :param filename: The name of the structure output file. """ self._structureoutputfile = filename self._output_files.add(filename)
[docs] def setStructureMonitorFile(self, filename): """ Register a structure monitor file with job control. The name of the structure monitor file. If this job is monitored from Maestro, then this file will be used to get the structure to display. :param filename: The name of the structure monitor file. """ self._structuremonitorfile = filename
[docs] def setStdOutErrFile(self, filename): """ Specify the name of the main log file, which captures the stdout and stderr of the backend. If no filename is specified, then the main log file will be named '<jobname>.log'. :param filename: The name of the main log file. """ # This option was added for VSW stages that run in their own # directories but whose log files are in the main work directory. self._stdouterr_file = filename
[docs] def addLogFile(self, filename, default=False): """ Register a log file with job control. This file will be copied back to the launch directory incrementally as the job runs. By default, the first log file registered will be displayed when the job is being monitored in Maestro. Use the 'default' option to specify that some particular file should be displayed. :param filename: The name of a log file. :param default: Whether to display this log file in the monitor panel when the job is being monitored in Maestro. (default: False) """ self._additional_log_files.add(filename) if default: # Show this file first in Monitor panel self._default_log_file = filename
[docs] def setNoLaunch(self, nolaunch): """ Specifies whether should exit before actually launching the job. This can be useful for debugging issues in job launching. :param nolaunch: If true, exit just before the job would be submitted to the queue, or started on the compute node. :type nolaunch: bool """ self._no_launch = nolaunch
[docs] def setAppendLogs(self, appendlogs): """ Specifies whether to append to existing log files. Useful for restarting jobs. :param appendlogs: If true, append to existing logfiles :type appendlogs: bool """ self._append_logs = appendlogs
[docs] def addEnv(self, env): """ Specify an environment variable setting that should be added to the backend environment on the compute node. :param env: An environment variable setting, in the form 'MYENV=value'. """ self._envs.append(env)
[docs] def addLicense(self, license): """ Specify a license that's required by the backend. If license checking is turned on, then the backend will not be started unless all required licenses are available. :param license: An license token in the form 'TOKEN' or 'TOKEN:n', where TOKEN is the name of license and n is the number of tokens. """ self._licenses.append(license)
def _getBackendCommand(self): """ :return: the command used to execute the backend. """ if self._copyscript: # The script will be copied to the JobDir, which ensures # that it is accessible for backend execution. remote_script = os.path.basename(self._script) else: # Did not copy the script, so run the original. remote_script = self._script args = jobcontrol.list2jmonitorcmdline(self._scriptargs) # Build the backend command '-cmd' argument to be passed to jlaunch: # Convert script arguments to a string: # (list2cmdline may add double quote characters) # PROBLEM: Does NOT quote arguments with '<' characters that cause sh # errors. If arg is "Br<5", error is: "sh: 5: No such file or directory" # NOTE: remote_script may have spaces or quotes in it, and so can each # of the script arguments: # Avoid list2cmdline for remote_script as it may have path containing # environment variable. This might not be quoted by list2cmdline and would # fail if the final expansion of environment variable has space. # Use $SCHRODINGER/run to find the backend script/executable on the # remote host: cmd = '"$REMOTE_SCHRODINGER/run" ' if self._product: cmd += '-FROM %s ' % self._product if '-HOST' in self._scriptargs: raise RuntimeError( "Use Launcher's host option to specify the host to run " "jobs on - instead of adding a -HOST script argument. If " "host doesn't propagate properly, then the issue is with " "the backend attempting to parse the -HOST option instead " "of using jobcontrol.get_backend_host_list()") if remote_script == "time": # Add the "script" before run. In this case, the script to run will # be the first item in the args list. cmd = 'time ' + cmd else: cmd += '"%s" ' % remote_script cmd += args # It's alright to have spaces and quotes in this string: if self._append_logs: op = '>>' else: op = '>' cmd = f"( {cmd} ) {op} \"{self._stdouterr_file}\" 2>&1" # If nice is None; use existing SCHRODINGER_NICE value (from toplevel) if self._nice is not None: # This is equivalent of what the top-level script is doing: if self._nice: os.environ['SCHRODINGER_NICE'] = "yes" else: os.environ['SCHRODINGER_NICE'] = "" # If the script and/or args contains $SCHRODINGER, replace it with # $REMOTE_SCHRODINGER for remote jobs so the remote SCHRODINGER is used. # Also expand the path (to work under Windows) # Do the same for *_EXEC variables if not self._runtoplevel: replacements = [ ('SCHRODINGER', 'REMOTE_SCHRODINGER'), ('MMSHARE_EXEC', 'REMOTE_MMSHARE_EXEC'), ('GLIDE_EXEC', 'REMOTE_GLIDE_EXEC'), ('IMPACT_EXEC', 'REMOTE_IMPACT_EXEC'), ('CANVAS_EXEC', 'REMOTE_CANVAS_EXEC'), # FIXME: Expand to work with all products by reading the # $SCHRODINGER_APP_EXEC variable and expanding that EXEC # and SCHRODINGER. ] for local_env, remote_env in replacements: if os.getenv(remote_env): # Ev:128745 Properly handle spaces in the path: remote_env_corrected = os.environ[remote_env].replace( " ", r"\ ") # FIXME this will not work if the variable already has spaces escaped. # Will this ever happen? cmd = cmd.replace("$%s" % local_env, remote_env_corrected) return cmd
[docs] def getCommandArgs(self): """ Returns the job invocation command. If the job is being run 'no_redirect', just the backend execution command is returned. Otherwise, the jlaunch invocation command is returned. The jlaunch command uses the new semantics (e.g., '-cmd' instead of '-e'). Raises a RuntimeError if the SCHRODINGER and MMSHARE_EXEC env vars aren't set. """ jlaunch_args = [] # list of arguments to be passed to jlaunch file_args = [] # list of input/output/logfile arguments jlaunch_args.extend(["-name", self._jobname]) # Set monitor interval: if self._interval is not None: jlaunch_args.extend(["-monitorinterval", str(self._interval)]) # Set the log interval: if self._loginterval is not None: jlaunch_args.extend(["-loginterval", str(self._loginterval)]) # Set the temp dir options: if self._local: jlaunch_args.append('-local') else: if not mmutil.feature_flag_is_enabled(mmutil.JOB_SERVER): jlaunch_args.append('-nolocal') if self._tmpdir: jlaunch_args.extend(["-tmpdir", self._tmpdir]) if self._save is not None: # Don't set anything if None if self._save: jlaunch_args.append('-save') else: jlaunch_args.append('-nosave') if self._prog: jlaunch_args.extend(['-prog', self._prog]) ############### ADD LOG FILES ################################## # This mechanism to add log files may appear strange, but it was # written this way for the following reasons: # 1. Stdout/stderr file can be given any name by the user # 2. Any log file can be the first (default) log file all_log_files = [] # List of all log files if self._default_log_file: all_log_files.append(self._default_log_file) # Set the stdout/stderr log file: if self._stdouterr_file: # Remove stdout/stderr file from the previous run (if any): if os.path.isfile(self._stdouterr_file) and not self._append_logs: try: os.remove(self._stdouterr_file) except: pass # If it was not already added by user: if self._stdouterr_file not in all_log_files: all_log_files.append(self._stdouterr_file) # Set the additional log files (if any): for f in self._additional_log_files: if f not in all_log_files: all_log_files.append(f) for f in all_log_files: file_args.append(('-log', f)) if self._append_logs: jlaunch_args.extend(['-appendlogs']) # Set the input files (if any): for f in self._input_files: file_args.append(('-in', f)) # Set the output files (if any): for f in self._output_files: file_args.append(('-out', f)) # Set the required output files (if any): for f in self._req_output_files: file_args.append(('-reqout', f)) # Set the structure output file (if any): if self._structureoutputfile: jlaunch_args.extend(['-structout', self._structureoutputfile]) # Set the structure monitor file (if any): if self._structuremonitorfile: jlaunch_args.extend(['-structmon', self._structuremonitorfile]) # If REMOTE_SCHRODINGER and REMOTE_MMSHARE_EXEC are present, # use them (they were set by toplevel) # Otherwise check for presence of SCHRODINGER and MMSHARE_EXEC # (set by startup script and jlaunch) # Check for environment variables set by toplevel: if not os.getenv('REMOTE_SCHRODINGER'): # Tell jlaunch to use SCHRODINGER: os.environ['REMOTE_SCHRODINGER'] = os.environ['SCHRODINGER'] if not os.getenv('REMOTE_MMSHARE_EXEC'): # Tell jlaunch to use MMSHARE_EXEC: os.environ['REMOTE_MMSHARE_EXEC'] = os.environ['MMSHARE_EXEC'] # Copy the script to the JobDir (needed for non built-in scripts): if self._copyscript: file_args.append( ('-f', '{} > {}'.format(self._script, os.path.basename(self._script)))) for source, dest in self._force_input_files: file_args.append(('-f', f'{source} > {dest}')) # Tell jlaunch about the temporary input files (including the script): for name, filename in self._temp_input_files.items(): file_args.append(('-tin%s' % name, filename)) # add environment settings for env in self._envs: jlaunch_args.extend(['-env', env]) # add number of processors if self._nproc: jlaunch_args.extend(['-nproc', str(self._nproc)]) # add required licenses for license in self._licenses: jlaunch_args.extend(['-lic', license]) # Add file arguments. cmdline_file_args = jobcontrol.file_arguments_for_launch_command( file_args) jlaunch_args.extend(cmdline_file_args) if self._localdriver and self._runtoplevel: # These options are incompatible, because there is no way to # separate driver and subjob hosts when invoking jlaunch with a # top-level script. raise RuntimeError( "The Launcher options 'localdriver' and 'runtoplevel' cannot both be True" ) if self._localdriver: # Override the top-level host args for the driver. This requires # setting some remote env vars to their local values. Note that # using 'usehostargs=False', host, and user initialization options # is not sufficient; env var manipulations must be done, also. # The -HOST must be set to 'localhost' to avoid jlaunch defaulting # to the top-level HOST_ARGS (this happens only with the "new # semantics"). app_exec_var = os.getenv('SCHRODINGER_APP_EXEC') if app_exec_var: remote_app_exec_var = 'REMOTE_' + app_exec_var os.environ[remote_app_exec_var] = os.environ[app_exec_var] os.environ['REMOTE_MMSHARE_EXEC'] = os.environ['MMSHARE_EXEC'] os.environ['REMOTE_SCHRODINGER'] = os.environ['LOCAL_SCHRODINGER'] jobhost = "localhost" jobuser = None else: if self._host is None: # Use the host args set by $SCHRODINGER/run: jobhost = os.getenv('JOBHOST') # NOTE: If jobhost is None, jlaunch will read the HOST_ARGS # env var, and if HOST_ARGS is not set, it will submit the job # to localhost. So it should be possible to remove this block # of code, and if self._host is None to simply not pass -HOST # to jlaunch. else: # Use the host args from constructor (if any): jobhost = self._host # TODO: Remove this block, as we are no longer supporting the # -USER option. if self._user is None: jobuser = os.getenv('JOBUSER') # NOTE: If jobuser is None, jlaunch will submit the job as # the same user. else: jobuser = self._user if self._usehostargs is not None: import warnings msg = ("usehostargs is deprecated. To disable this option it " "is now sufficient to specify the host option.") warnings.warn(msg, DeprecationWarning, stacklevel=2) if self._runtoplevel: # The calling code (which is not a startup script - e.g. a panel) # requested the top-level script to be run prior to submitting # the job. This means that the current REMOTE_SCHRODINGER value # can not be used, as it will be changed by the toplevel. hostobj = jobcontrol.get_host(jobhost) os.environ['REMOTE_SCHRODINGER'] = hostobj.schrodinger cmd = self._getBackendCommand() # Remove this temporary fix once JOBCON-2877 is addressed if self._expandvars is None or self._expandvars: cmd = os.path.expandvars(cmd) cmd = "BASE64 %s" % base64.b64encode( cmd.encode('ascii')).decode('ascii') jlaunch_args += ['-cmd', cmd] if jobhost: jlaunch_args.extend(['-HOST', jobhost]) if jobuser: jlaunch_args.extend(['-USER', jobuser]) # jlaunch does NOT accept -DISP and -PROJ so these should be used # only when running in toplevel mode (with $SCHRODINGER/run) if self._runtoplevel: if self._disp: jlaunch_args.extend(['-DISP', self._disp]) if self._proj: jlaunch_args.extend(['-PROJ', self._proj]) if self._viewname: jlaunch_args.extend(['-VIEWNAME', self._viewname]) if self._oplsdir: jlaunch_args.extend(['-OPLSDIR', self._oplsdir]) if self._no_launch: jlaunch_args.append('-NOLAUNCH') if mmutil.feature_flag_is_enabled(mmutil.JOB_SERVER): if self._stoppable: jlaunch_args.append('-stoppable') # Provide the user temp dir for the Go launch process to locate the localhost # jobserver directory. This is slow to obtain within the Go launch process. jlaunch_args.extend( ['-user-tempdir', mmjob.get_job_server_parent_path()]) if self._runtoplevel: command = [LOCAL_RUN, 'jlaunch.pl'] command += jlaunch_args else: command = [ 'perl', os.path.join(os.environ['MMSHARE_EXEC'], 'jlaunch.pl') ] command += jlaunch_args _logger.debug("jlaunch %s" % " ".join(command)) return command
def _getNoRedirectCommandArgs(self): """ Returns the command for executing the backend directly, without job control. """ # Use $SCHRODINGER/run to execute the script: if self._product: cmd = [LOCAL_RUN, '-FROM', self._product, self._script] else: cmd = [LOCAL_RUN, self._script] cmd += self._scriptargs return cmd
[docs] def launch(self, print_jobid=True, launch_dir=None): """ Launch the script or program. If the program is successfully launched under job control, the resulting Job object is returned. With 'no_redirect' runs the backend directly and returns the subprocess exit status. With 'debugger' runs the backend using specified debugger and returns None. :param print_jobid: Whether to print the JobId of the newly launched job to the terminal. (default: True) :type launch_dir: str :param lauch_dir: Launch the job from the specified directory. :raise IOError: if any of the input files are missing. OSError: if the executable is missing. RuntimeError: If jlaunch failed to submit the job. :return: The job object of the launched job. If no_redirect is set, the subprocess exit status is returned; if debugger is set, None is returned. """ # Check existence of input files missing_input_files = \ [f for f in self._input_files if not os.path.isfile(f)] if missing_input_files: raise OSError("Input file(s) missing: '%s'" % \ ', '.join(missing_input_files)) not_readable_files = \ [f for f in self._input_files if not os.access(f, os.R_OK)] if not_readable_files: raise OSError("Input file(s) missing read permission: '%s'" % \ ', '.join(not_readable_files)) if self._no_redirect: cmd = self._getNoRedirectCommandArgs() try: return subprocess.call(cmd) except OSError as err: if "No such file or directory" in str(err): raise OSError("No such file or directory: %s" % cmd[0]) elif self._debugger: import shlex dbg = shlex.split(self._debugger) cmd = dbg + self._getNoRedirectCommandArgs() subprocess.call(cmd) return None else: # Check existence of input files cmd_args = self.getCommandArgs() if self._expandvars is not None: jobobj = jobcontrol.launch_job(cmd_args, print_jobid, self._expandvars, launch_dir=launch_dir) else: jobobj = jobcontrol.launch_job(cmd_args, print_jobid, launch_dir=launch_dir) if self._wait: # If we passed -wait option to jlaunch, then it will stay open # until the job completes and will return with a non-zero status # if the job fails. This would cause an exception to be raised # by jobcontrol.launch_job(). jobobj.wait() return jobobj
[docs]def launch(script, args=None, input_files=None, force_input_files=None, output_files=None, req_output_files=None, structure_output_file=None, structure_monitor_file=None, log_files=None, envs=None, licenses=None, print_jobid=True, appendlogs=False, nolaunch=None, **kwargs): """ Launch a job directly, without creating an explicit Launcher instance. This function supports same arguments as the Launcher class constructor, as well as those listed below. These additional arguments are all optional. NOTE: Launcher is designed for use within startup scripts only. :param args: A list of string arguments to pass to the backend program. (default: None) :param input_files: A list of input files that are required for the job to run. These will be copied to the job directory unless they're already accessible with the given pathnames. (default: None) :param force_input_files: A list of input files that should be transfered to the job directory, regardless of whether they're already accessible or not. (default: None) :param output_files: A list of output files to register with job control. These files will be copied back to the launch directory after the backend completes. Missing files are silently ignored. (default: None) :param req_output_files: A list of output files to register with job control. These files will be copied back to the launch directory after the backend completes. Missing files mark the job as "died". (default: None) :param structure_output_file: The name of the structure output file. If this job's results are supposed to be incorporated into a Maestro project, then this is the file that will be incorporated. (default: None) :param structure_monitor_file: The name of the structure monitor file. If this job is monitored from Maestro, then this file will be used to get the structure to display. (default: None) :param log_files: A list of log files to register with job control. These files will be copied back to the launch directory continually while the job is running. (default: None) :param envs: A list of environment variable settings to be made on the compute node. Each setting should be a string, in the form "MYENV=value". (default: None) :param licenses: A list of required licenses. Each requirement is specified as a string in the form 'TOKEN' or 'TOKEN:n', where TOKEN is the name of the license and n is the number of tokens checked out. (default: None) :param print_jobid: Whether to print the JobId of the newly launched job to the terminal. (default: True) :param appendlogs: If true, appends to log files rather than creating blank ones. (default: False) :raise IOError: if any of the input files are missing. :return: The job object, if launched successfully under job control, otherwise None. """ launcher = Launcher(script, **kwargs) if args: launcher.addScriptArgs(args) if nolaunch: launcher.setNoLaunch(nolaunch) if input_files: for filename in input_files: launcher.addInputFile(filename) if force_input_files: for filename in force_input_files: launcher.addForceInputFile(filename) if output_files: for filename in output_files: launcher.addOutputFile(filename) if req_output_files: for filename in req_output_files: launcher.addRequiredOutputFile(filename) if structure_output_file: launcher.setStructureOutputFile(structure_output_file) if structure_monitor_file: launcher.setStructureMonitorFile(filename) if log_files: for filename in log_files: launcher.addLogFile(filename) if appendlogs: launcher.setAppendLogs(True) if envs: for env in envs: launcher.addEnv(env) if licenses: for license in licenses: launcher.addLicense(license) job = launcher.launch(print_jobid) return job
if __name__ == "__main__": print("Module documentation:") print(__doc__)