#!/usr/bin/python # Copyright (c) 2004-2014 Bright Computing Holding BV. All Rights Reserved. # # This software is the confidential and proprietary information of # Bright Computing Holding BV ("Confidential Information"). You shall not # disclose such Confidential Information and shall use it only in # accordance with the terms of the license agreement you entered into # with Bright Computing Holding BV or its subsidiaries. import getopt, shutil, time import os, sys, signal, urllib from subprocess import Popen import threading, logging, socket import rpm, tempfile, fnmatch from stat import ST_MODE, S_ISBLK import unicodedata, json, filecmp sys.path.append('/cm/local/apps/cluster-tools/other') from cmcommon import linuxDistro def main(): try: # Check if super user if os.geteuid() != 0: printError('This script must be run as root', True) # Check if cm release file exists if not os.path.isfile(gd.cm_release_file): printError('Release file ' + gd.cm_release_file + ' not found.\nUnable to determine Bright version.', True); else: gd.current_version = getCurrentVersion(gd.cm_release_file) if not gd.current_version in gd.upgrade_options.keys(): printError('Current Bright version ' + gd.current_version + ' is not a valid candidate for upgrade.', True); # Check cmdaemon state if os.path.isfile(gd.cmd_state_file): gd.cmdaemon_state = readFile(gd.cmd_state_file) else: printError('Cmdaemon state file ' + gd.cmd_state_file + ' not found.\nUnable to determine state of node.', True); #if not gd.cmdaemon_state in gd.valid_cmdaemon_states: # printError('Cluster Manager upgrade can be done only in the following states:\n' + '|'.join(gd.valid_cmdaemon_states), True) if gd.verbose: tee("Current bright version: " + gd.current_version, True) tee("Upgrade possibilities : " + ','.join(gd.upgrade_options[gd.current_version]), True) show_help = False # Parse command line arguments options, arguments = getopt.getopt(sys.argv[1:], 'u:i:d:vnhcwfse:a:', ['version=', 'image=', 'useiso=', 'verbose', 'noninteractive', 'help', 'cudaupgrade', 'force', 'skipnodestatescheck', 'disablerepos', 'imagebasedirs=']) if len(options) == 0: gd.usage() sys.exit(1) for opt, arg in options: if opt in ('-u', '--version'): gd.upgrade_version = arg if opt in ('-a', '--imagebasedirs'): gd.additional_image_base_dirs = arg.split(',') if opt in ('-d', '--useiso'): gd.use_iso_repo = True gd.iso_repo_path = os.path.abspath(arg) if opt in ('-v', '--verbose'): gd.verbose = True if opt in ('-n', '--noninteractive'): gd.non_interactive = True if opt in ('-h', '--help'): show_help = True if opt in ('-c', '--cudaupgrade'): gd.cuda_upgrade = True if opt in ('-f', '--force'): gd.force_upgrade = True if opt in ('-s', '--skipnodestatescheck'): gd.skip_node_states_check = True if opt in ('-e', '--disablerepos'): gd.disable_repos = arg.split(',') for opt, arg in options: if opt in ('-i', '--image'): gd.update_images = True if arg == "all": gd.images_to_update = gd.getAllImageDirs() else: gd.images_to_update = gd.getAllImageDirs(arg.split(',')) if show_help: gd.printMan() sys.exit(0) if gd.upgrade_version == None: gd.usage() sys.exit(1) # Sanity checks errors = [] warnings = [] possible_upgrades = gd.upgrade_options[gd.current_version] if len(possible_upgrades) == 0: if os.path.isfile(gd.upgraded_from): gd.current_version = readFile(gd.upgraded_from) gd.init_complete = True else: errors.append('No upgrade possibilities for ' + gd.current_version) else: if not gd.upgrade_version in possible_upgrades: errors.append('Invalid upgrade version (' + gd.upgrade_version + ') for Bright ' + gd.current_version + '\nPossible versions are: ' + ','.join(possible_upgrades)) ldist = linuxDistro() if not ldist.fulldist in gd.valid_dist_path: errors.append('Unsupported base distribution for upgrade: ' + str(ldist.fulldist)) if gd.update_images: bad_imgs = [] for imgdir in gd.images_to_update: if not os.path.isdir(imgdir): warnings.append('Software image directory ' + imgdir + ' does not exist.') bad_imgs.append(imgdir) else: ldist = linuxDistro(imgdir) if not ldist.fulldist in gd.valid_dist_path: if ldist.fulldist == None: warnings.append(imgdir + ' is an invalid software image directory, cannot find distribution.') else: warnings.append(imgdir + ': invalid distribution ' + str(ldist.fulldist)) bad_imgs.append(imgdir) """ Remove bad dirs from actual list of images to be updated""" for d in bad_imgs: gd.images_to_update = [x for x in gd.images_to_update if x != d] if len(warnings) > 0: printWarning('\n'.join(warnings), False); if len(errors) > 0: printError('\n'.join(errors), True); if gd.update_images and len(gd.images_to_update) == 0: tee('\nNothing to do.\n') sys.exit(1) s = signal.signal(signal.SIGINT, signal.SIG_IGN) # Check validity of iso/dvd (if iso is being used for upgrade) # Create iso/dvd repo config file if gd.use_iso_repo: valid = True if gd.update_images: for img in gd.images_to_update: if not validBrightDvd(img): valid = False else: if not validBrightDvd(): valid = False if not valid: printError('Bright DVD/ISO validation failed.', True); gd.preReqTxt() if not gd.non_interactive: continueOrExit() gd.setHeaders() # Pre-upgrade checks print '\n' gd.setCommonVars() if not gd.init_complete: if (gd.cmdaemon_state == 'ACTIVE'): if not gd.skip_node_states_check: if serviceRunning('cmd'): if not Exec(gd.NODES_UP, CheckIfNodesUp): sys.exit(1) if not Exec(gd.STOP_SERV, CmStopServices): sys.exit(1) if not Exec(gd.SS_MNT, CmSharedMountCheck): sys.exit(1) if (gd.cmdaemon_state == 'ACTIVE'): if not Exec(gd.BC_HEADER, BackupCurrentCmdaemonConfiguration): sys.exit(1) signal.signal(signal.SIGINT, s) # Start upgrade if gd.update_images: if (gd.cmdaemon_state == 'ACTIVE'): tee("\nThe following software images are scheduled for upgrade:\n") tee(','.join(gd.images_to_update), Font.BLUE) upgradeSoftwareImages() cleanUp() if not Exec(gd.UPR_HEADER, UpdateProvisioners): logAndAppend('Update provisioners failed.') print '\n' else: printError('Software images must be upgraded only on the active head node.') else: c_v = getCurrentVersion(gd.cm_release_file) if (gd.upgrade_version == c_v) and not gd.force_upgrade: tee('Head node is already at version ' + gd.upgrade_version + ', (use -f option if upgrade is incomplete)') else: logger.Log('UPGRADE FROM ' + gd.current_version + ' TO ' + gd.upgrade_version) upgradeHeadNode() except Exception, e: printException(str(e), True); def isBlockDevice(dev): try: if os.path.exists(dev): mode = os.stat(dev)[ST_MODE] if S_ISBLK(mode): return True else: logger.Log(dev + " is not a block special device file.") return False else: logAndAppend("Cannot find path " + dev) return False except Exception, e: logger.LogE(str(e)) return False def disableRepos(repo_list): try: for f in repo_list: logger.LogV('Disabling repo ' + f) fin = open(f, 'r') fout = open(f + '.new', 'wt') for line in fin: line = line.strip('\n') if 'enabled=1' in line: line = line.replace('enabled=1', 'enabled=0') fout.write(line + '\n') fout.close() fin.close() shutil.move(f + '.new', f) except Exception, e: logger.Log(str(e)); def validBrightDvd(img_root=''): validdvd = True try: if not os.path.exists(gd.iso_repo_path): gd.errors.append('Path ' + gd.iso_repo_path + ' does not exist.') else: if isBlockDevice(gd.iso_repo_path): gd.iso_repo_dir = tempfile.mkdtemp() if not canMount(gd.iso_repo_path, gd.iso_repo_dir): validdvd = False else: if not validBrightDvdDir(gd.iso_repo_path): validdvd = False elif os.path.isdir(gd.iso_repo_path): gd.do_not_unmount = True if not validBrightDvdDir(gd.iso_repo_path): validdvd = False else: gd.iso_repo_dir = gd.iso_repo_path elif os.path.isfile(gd.iso_repo_path): gd.iso_repo_dir = tempfile.mkdtemp() if canMount(gd.iso_repo_path, gd.iso_repo_dir): if not validBrightDvdDir(gd.iso_repo_dir): validdvd = False if not unMount(gd.iso_repo_dir): gd.errors.append('Unable to unmount temp directory ' + gd.iso_repo_dir) else: shutil.rmtree(gd.iso_repo_dir) if validdvd: ldist = linuxDistro(img_root) if not ldist.fulldist in gd.valid_dist_path: gd.errors.append('Unsupported base distribution ' + ldist.fulldist) validdvd = False else: dist_path = gd.valid_dist_path[ldist.fulldist] found_dist = False for p in dist_path: dist_dir = gd.iso_repo_dir + gd.iso_rpms_dir + '/dist/' + p logger.LogV('Looking for ' + dist_dir) if os.path.isdir(dist_dir): found_dist = True break if not found_dist: if not unMount(gd.iso_repo_dir): gd.errors.append('Unable to unmount temp directory ' + gd.iso_repo_dir) else: shutil.rmtree(gd.iso_repo_dir) if img_root != '': logAndAppend('Incorrect ISO/DVD for image ' + img_root + '(' + ldist.fulldist + ')') else: logAndAppend('Incorrect ISO/DVD for head node' + '(' + ldist.fulldist + ')') validdvd = False else: logger.Log('Dist on DVD/ISO ' + gd.iso_repo_path + ' is good: ' + ldist.fulldist) except Exception, e: logger.LogE(str(e)) validdvd = False return validdvd def createPrereqDistRepoFile(): try: f = open(gd.prereq_dist_repo_file, 'wt') f.write('[prereq-distrepo-cmupgrade' + ']\n') f.write('name=Prereq repo for cm upgrade\n') if gd.is_sles: f.write('baseurl=http://download.opensuse.org/repositories/Cloud:/OpenStack:/Havana/SLE_11_SP3/\n') else: f.write('baseurl=http://dl.fedoraproject.org/pub/epel/6/x86_64/\n') f.write('enabled=1\n') f.write('gpgcheck=0\n') f.close() except Exception, e: logger.LogE(str(e)) def createDvdRepoFile(): try: logger.LogV('Writing repo ' + gd.dvd_repo_file) url_1 = urllib.quote('file:' + '''//''' + gd.iso_repo_dir + gd.iso_rpms_dir + '/' + gd.stable_release_num, '/:') fa = open(gd.dvd_repo_file , 'w') fa.write('[cm-dvdrepo-' + gd.upgrade_version + ']\n') fa.write('name=CM packages DVD Repository for ' + gd.upgrade_version + '\n') fa.write('baseurl=%s\n' %(url_1)) fa.write('enabled=1\n') fa.write('gpgcheck=1\n') if not gd.is_sles: if not gd.wlm_upgrade: fa.write('exclude=' + gd.upgrade_excludes + '\n') url_2 = urllib.quote('file:' + '''//''' + gd.iso_repo_dir + gd.iso_rpms_dir + '/packagegroups/default', '/:') fa.write('\n\n[cm-dvdrepo-pgdefault-' + gd.upgrade_version + ']\n') fa.write('name=CM default package group DVD Repository for ' + gd.upgrade_version + '\n') fa.write('baseurl=%s\n' %(url_2)) fa.write('enabled=1\n') fa.write('gpgcheck=0\n') fa.close() except Exception, e: logger.LogE(str(e)) def runThread(work): ts = work() ts.start() alive = ts.isAlive() prid = 0 while alive: alive = ts.isAlive() prid = printProgress(prid) return ts.join() def printProgress(pos): sys.stdout.write(gd.progresschars[pos]) time.sleep(.25) sys.stdout.write('\b\b\b\b\b\b\b\b') sys.stdout.flush() pos+=1 pos%=len(gd.progresschars) return pos def Exec (header, thr): result = True printHeader(header) result = runThread(thr) printResult(result) return result def logAndAppend(msg): gd.errors.append(msg) logger.Log(msg) def maxLen(): maxlen = 80 try: row_col = None import fcntl import termios import struct fd = os.open(os.ctermid(), os.O_RDONLY) if fd != None: row_col = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')) else: row_col = struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234')) if row_col != None: maxlen = row_col[1] except Exception, e: logger.LogE(str(e)) return maxlen def printError(msg, exit=True): gd.errors = [x for x in gd.errors if x != ''] logger.Log(msg) print '\n' print Font.BOLD + 'ERROR(S):' + Font.CLOSE print '-' * gd.max_len print """\ %s %s %s %s """ %(Font.RED, msg, '\n'.join(gd.errors), Font.CLOSE) print '-' * gd.max_len if exit: sys.exit(1) def printWarning(msg, exit=False): logger.Log(msg) print '\n' print Font.BOLD + 'WARNINGS(S):' + Font.CLOSE print '-' * gd.max_len print """\ %s %s %s """ %(Font.RED, msg, Font.CLOSE) print '-' * gd.max_len if exit: sys.exit(1) def printException(msg, exit=True): try: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print """ EXCEPTION(%s): %s """ %(fname + ':' + str(exc_tb.tb_lineno), msg) except Exception, e: print """ EXCEPTION: %s """ %(msg + '\n' + str(e)) if exit: sys.exit(1) def emptyString(mystr): ret = mystr.strip(' ') if len(ret) == 0: return True return False def continueOrExit(): var = raw_input("Continue(c)/Exit(e)? ") while var != 'c' and var != 'e': var = raw_input("Continue(c)/Exit(e)? ") if var == 'e': sys.exit(1) pass def getCurrentVersion(mfile): try: fin = open(mfile, 'r') for line in fin: line = line.strip('\n'); if 'Cluster Manager' in line: return line.replace('Cluster Manager v', '') break except Exception, e: printException(str(e), True); def tee(msg, color=None): try: if color == None: print Font.BOLD + msg + Font.CLOSE else: print color + msg + Font.CLOSE logger.Log(msg) except Exception, e: logger.LogE(str(e), True); def readFile(mfile): data = '' try: f = open(mfile, 'r') data = f.read() return data except: return data def writeFile(filename, msg): try: f = open(filename, 'w') f.write(msg) f.close() except Exception, e: logger.LogE(str(e)) def validBrightDvdDir(d, err=None): good = False try: subdirs = os.listdir(d + gd.iso_rpms_dir) for mydir in subdirs: logger.LogV('Looking in ' + mydir) if gd.upgrade_version in mydir: good = True gd.stable_release_num = mydir break if not good: gd.errors.append('Could not find the correct version directory for ' + gd.upgrade_version) else: l_r = gd.stable_release_num.split('-') m_r = gd.minimum_stable_release[gd.upgrade_version].split('-') if int(l_r[1]) < int(m_r[1]): good = False logger.LogV(l_r[1] + ' < ' + m_r[1]) gd.errors.append('The DVD/ISO being used does not appear to be new enough (' + gd.stable_release_num + ').\nThe DVD/ISO release must be ' + gd.minimum_stable_release[gd.upgrade_version] + ' or newer.') if good: logger.Log('DVD/ISO ' + gd.iso_repo_path + ' is good: ' + gd.stable_release_num) except Exception, e: gd.errors.append(str(e)) good = False return good def printHeader(msg): print msg.ljust(45 + 1, '.') + "... ", sys.stdout.flush() logger.Log(msg) def printResult(result): if result: print "[ OK ]" else: print "[FAILED]" cleanUp() if len(gd.errors) > 0: printError('', True) def canMount(p, mnt_dir): good = True try: rc = RunCommand() rc.run(['/bin/mount','-o', 'loop', p, mnt_dir]) good = rc.good except Exception, e: if err != None: err.append(str(e)); return good def unMount(p): good = True try: rc = RunCommand() rc.run(['/bin/umount', p]) good = rc.good except Exception, e: gd.errors.append(str(e)); return good def bindMount(src, dest): try: rc = RunCommand() rc.run(['/bin/mount', '-o', 'bind', src, dest]) good = rc.good except Exception, e: gd.errors.append(str(e)); def getNextFile(f, err=None): try: i = 1 while os.path.exists(f + '.' + str(i)): i = i + 1 return f + '.' + str(i) except Exception, e: logger.Log(str(e)) return f def processBackupFiles(origs, createbackup): try: if createbackup: for orig in origs: if os.path.isfile(orig): backup = orig + gd.upgrade_backup_suffix if not os.path.isfile(backup): logger.Log('Backing up file ' + backup) shutil.copy(orig, backup) else: logger.Log('Backup file already exists, ' + backup) else: logger.Log("Cannot find file " + orig + ', nothing to backup') else: for orig in origs: if os.path.isfile(orig): shutil.copy(orig, orig + gd.upgrade_new_suffix) backup = orig + gd.upgrade_backup_suffix if os.path.isfile(backup): logger.Log('Restoring backed up file ' + backup) shutil.copy(backup, orig) else: logger.Log("Cannot find file " + backup + ', nothing to restore') except Exception, e: logger.Log(str(e)); def serviceRunning(service): r = False try: rc = RunCommand() rc.run(['/sbin/service', service, 'status']) if rc.good: r = True else: r = False except Exception, e: logger.Log('Unable to get service status for ' + service + ':' + str(e)) r = False return r def stopService(proglist, service): res = True try: if serviceRunning(service): rc = RunCommand() rc.run(['/sbin/service', service, 'stop']) if rc.good: r = waitForServiceStop(proglist) if not r: res = False else: logger.Log('Command failed: ' + 'service ' + service + 'stop') res = False else: logger.Log('Service ' + service + ' is not running') res = True except Exception, e: logger.LogE(str(e)) res = False return res def restartService(service): res = True try: rc = RunCommand() rc.run(['/sbin/service', service, 'restart']) res = rc.good logger.Log('Waiting for cmdaemon to complete init') time.sleep(gd.cmd_start_wait) except Exception, e: logger.LogE(str(e)) res = False return res def processRunning(process): r = False try: if os.system("/sbin/pidof " + process + " 1>/dev/null") == 0: r = True except Exception, e: logger.LogE(str(e)); r = False return r def waitForServiceStop(proglist): r = True try: retries = 0 for prog in proglist: if processRunning(prog): logger.Log('Checking: ' + prog) alive = 0 while alive == 0 and retries < 5: alive = os.system('/sbin/pidof ' + prog + " 1>/dev/null") retries+=1 time.sleep(1) if alive == 0: gd.errors.append('Process still running: ' + prog) r = False except Exception, e: logger.LogE(str(e)); r = False return r def skipLineCmdConf(line): r = [None, None] try: if len(line) <= 1 or line.startswith('#') or emptyString(line): return r elif line.startswith('Service {') or line.startswith('}'): return r elif '=' in line: (var, value) = line.split('=', 1) var = var.strip(' ') value = value.strip(' ') if var == 'Path' or var == 'Public' or var == 'ObjectFile': return r return [var, value] except Exception, e: logger.LogE(str(e)) return r return r def skipLineNodeInstallerConf(line): r = [None, None] try: if len(line) <= 1 or line.startswith('#') or emptyString(line): return r elif '=' in line: (var, value) = line.split('=', 1) var = var.strip(' ') value = value.strip(' ') return [var, value] except Exception, e: logger.LogE(str(e)) return r return r def readCmdConf(conf_file): conf = {} try: f = open(conf_file, 'r') for line in f: line = line.strip('\n') (var, value) = skipLineCmdConf(line) if var != None and value != None: conf[var] = value; f.close() return conf except Exception, e: logger.LogE(str(e)); conf = {} return conf def readNodeInstallerConf(conf_file): conf = {} try: f = open(conf_file, 'r') for line in f: line = line.strip('\n') (var, value) = skipLineNodeInstallerConf(line) if var != None and value != None: conf[var] = value; f.close() return conf except Exception, e: logger.LogE(str(e)); conf = {} return conf def packageFoundOnLocalSource(name): p = None try: if gd.use_iso_repo: for root, dirs, files in os.walk(gd.iso_repo_dir): if 'cm-rpms/dist' in root: for filename in fnmatch.filter(files, name + '-*'): p = os.path.join(root, filename) break except Exception, e: logger.LogE(str(e)); return p def packageInstalled(name, version=None): r = None try: tr_set = rpm.TransactionSet() db_match = tr_set.dbMatch() if not db_match or db_match == None: logger.Log('No packages found in rpm database'); db_match.pattern('name', rpm.RPMMIRE_GLOB, name) for headers in db_match: if version != None: if (headers['name'] == name) and (version in headers['release']): r = headers['name'] + '-' + headers['version'] + '-' + headers['release'] break else: if (headers['name'] == name): r = headers['name'] + '-' + headers['version'] + '-' + headers['release'] break except Exception, e: logger.LogE(str(e)); r = None return r def installedPackages(pattern, version): r = [] try: tr_set = rpm.TransactionSet() db_match = tr_set.dbMatch() if not db_match or db_match == None: logger.Log('No packages found in rpm database'); db_match.pattern('name', rpm.RPMMIRE_GLOB, pattern + '*') for headers in db_match: if (pattern in headers['name']) and (version in headers['release']): r.append(headers['name'] + '-' + headers['version'] + '-' + headers['release']) except Exception, e: logger.LogE(str(e)); r = [] return r def cleanUp(d=None): try: if gd.use_iso_repo: if d != None: """ Clean up bind mount in software images """ if not unMount(d + '/' + gd.iso_repo_dir): gd.errors.append('Cleanup: unable to unmount %s/%s' %(d, gd.iso_repo_dir)) else: if not gd.do_not_unmount and d == None: if not unMount(gd.iso_repo_dir): gd.errors.append('Cleanup: unable to unmount temp directory %s' %(gd.iso_repo_dir)) except Exception, e: logger.LogE(str(e)); def removeBrokenWlmSymlinks(): try: """ Remove broken prolog symlinks, if found """ prolog_health_checker = '/var/prologs/10-prolog-healthchecker' broken_links = ['/cm/local/apps/pbspro%s' %(prolog_health_checker), '/cm/local/apps/sge%s' %(prolog_health_checker), '/cm/local/apps/slurm%s' %(prolog_health_checker), '/cm/local/apps/torque%s' %(prolog_health_checker)] for b_r in broken_links: if os.path.exists(b_r): os.remove(b_r) except Exception, e: logger.LogE(str(e)); def copyClientCerts(): try: client_pem_new = '/root/.cm/admin.pem' client_key_new = '/root/.cm/admin.key' client_pem_old = '/root/.cm/cmsh/admin.pem' client_key_old = '/root/.cm/cmsh/admin.key' if not os.path.isfile(client_pem_new): if os.path.isfile(client_pem_old): shutil.copy(client_pem_old, client_pem_new) os.system('chmod 600 ' + client_pem_new) if not os.path.isfile(client_key_new): if os.path.isfile(client_key_old): shutil.copy(client_key_old, client_key_new) os.system('chmod 600 ' + client_key_new) except Exception, e: logger.LogE(str(e)); def printRpmNewFiles(): try: if len(gd.rpm_new_files) > 0: print '\n' print gd.bf('The following rpmnew/rpmsave files were found:') print '-' * gd.max_len for f in gd.rpm_new_files: print f print """\ Please compare with the original and process them as needed. Please be aware that the cmdaemon configuration file has been updated to the new version during the upgrade. %s """ %('-' * gd.max_len) print """ New system configuration files can be found in /cm/conf/ It is important to review and deploy them if required. %s """ %('-' * gd.max_len) except Exception, e: logger.LogE(str(e)); def printBootMessage(): try: print """\ After all software images and head nodes have been upgraded: %s * Re-configure shared storage using cmha-setup,(if running in high availability mode) * Power on compute nodes * Power on cloud directors * Power on cloud nodes %s %s """ %(Font.BOLD, Font.CLOSE, '-' * gd.max_len) except Exception, e: logger.LogE(str(e)); def upgradeHeadNode(): try: if not Exec(gd.CHK_REQ, CheckRequiredDistPackages): sys.exit(1) tee('\nUpgrading head node (' + socket.gethostname() + ')\n') if not Exec(gd.UR_HEADER, UpdateCmRepoConfiguration): sys.exit(1) if not Exec(gd.REPOS_REACHABLE, CheckIfReposReachable): sys.exit(1) if len(gd.dep_solve_blockers) > 0: if not Exec(gd.RD_HEADER, RemoveDepSolveBlockerRpms): sys.exit(1) if not Exec(gd.IP_HEADER, InstallPrereqCmPackages): sys.exit(1) if not Exec(gd.UI_HEADER, UpgradeInit): sys.exit(1) if packageInstalled(gd.cm_config_cm, gd.current_version) != None: if not Exec(gd.UCM_HEADER, UpdateCmConfigCmRpm): sys.exit(1) if not Exec(gd.CU_HEADER, CmUpgrade): sys.exit(1) if not Exec(gd.RO_HEADER, RemoveObseletedPackages): sys.exit(1) new_packages = 0 if gd.is_sles: new_packages = len(gd.new_cm_packages_sles[gd.current_version]) else: new_packages = len(gd.new_cm_packages[gd.current_version]) if new_packages > 0: if not Exec(gd.UN_HEADER, UpdateNameChangedCmPackages): sys.exit(1) if len(gd.new_dist_packages[gd.current_version]) > 0: if not Exec(gd.IR_HEADER, InstallRequiredNewDistPackages): sys.exit(1) if gd.cuda_upgrade: if not Exec(gd.CUDA_HEADER, UpdateCudaRpms): sys.exit(1) if not Exec(gd.RCU_HEADER, RemoveCmUpgradeFile): sys.exit(1) if not Exec(gd.PRN_HEADER, ProcessRpmNewFiles): sys.exit(1) if not Exec(gd.RR_HEADER, RestoreRequiredFilesFromBackup): sys.exit(1) if not Exec(gd.MOTD_HEADER, UpdateMotdFile): sys.exit(1) if gd.upgrade_version in gd.update_slurm_params.keys(): if not Exec(gd.UPDATE_WLM_CONF, UpdateWlmConf): sys.exit(1) if gd.cmdaemon_state == 'ACTIVE': if not Exec(gd.MET_UPD, UpdateMetricCollection): sys.exit(1) if not Exec(gd.DOC_HEADER, DeleteOldNodeCerts): sys.exit(1) print '\n' print gd.cf('*** Head node upgrade complete ***', 'GREEN') cleanUp() printRpmNewFiles() printBootMessage() except Exception, e: printException(str(e), True) def upgradeSoftwareImages(): try: for image in gd.images_to_update: c_v = getCurrentVersion(image + gd.cm_release_file) gd.current_version = c_v if (gd.upgrade_version == c_v) and not gd.force_upgrade: tee("\nSoftware image " + image + ' is already at version ' + gd.upgrade_version + '\n') else: tee("\nUpgrading software image " + image + "\n") if os.path.isfile(image + '/' + gd.upgraded_from): gd.current_version = readFile(image + '/' + gd.upgraded_from) logger.Log('UPGRADE FROM ' + gd.current_version + ' TO ' + gd.upgrade_version) try: if gd.use_iso_repo: img_repo_dir = image + '/' + gd.iso_repo_dir if not os.path.isdir(img_repo_dir): os.mkdir(img_repo_dir) bindMount(gd.iso_repo_dir, img_repo_dir) gd.setCommonVars(image) # Change root to image directory real_root = os.open("/", os.O_RDONLY) os.chroot(image) if not Exec(gd.CHK_REQ, CheckRequiredDistPackages): sys.exit(1) if not Exec(gd.UR_HEADER, UpdateCmRepoConfiguration): sys.exit(1) if len(gd.dep_solve_blockers) > 0: if not Exec(gd.RD_HEADER, RemoveDepSolveBlockerRpms): sys.exit(1) # This is not needed for the software images right now # if not Exec(gd.IP_HEADER, InstallPrereqCmPackages): # sys.exit(1) if not Exec(gd.UI_HEADER, UpgradeInit): sys.exit(1) if packageInstalled(gd.cm_config_cm, gd.current_version) != None: if not Exec(gd.UCM_HEADER, UpdateCmConfigCmRpm): sys.exit(1) if not Exec(gd.CU_HEADER, CmUpgrade): sys.exit(1) if not Exec(gd.RO_HEADER, RemoveObseletedPackages): sys.exit(1) if len(gd.new_cm_packages_image[gd.current_version]) > 0: if not Exec(gd.UN_HEADER, UpdateNameChangedCmPackages): sys.exit(1) if len(gd.new_dist_packages[gd.current_version]) > 0: if not Exec(gd.IR_HEADER, InstallRequiredNewDistPackages): sys.exit(1) if not Exec(gd.RCU_HEADER, RemoveCmUpgradeFile): sys.exit(1) if not Exec(gd.PRN_HEADER, ProcessRpmNewFiles): sys.exit(1) removeBrokenWlmSymlinks() # Exit chroot os.fchdir(real_root) os.chroot(".") os.close(real_root) cleanUp(image) except Exception, e: printException(str(e), True) except Exception, e: printException(str(e), True) # All class definitions class MyThread(threading.Thread): def join(self): super(MyThread, self).join() return self.result class CmSharedMountCheck(MyThread): def run(self): good = True try: fin = open('/proc/mounts', 'r') for line in fin: line = line.strip('\n') parts = line.split(' ') if '/cm/shared' in parts[1]: good = False gd.errors.append('Mounted: ' + line) break fin.close() except Exception, e: logger.Log(str(e)) good = True self.result = good class CheckIfNodesUp(MyThread): def run(self): r = True try: cmd = Cmdaemon() if cmd.connected: nodelist = cmd.nodesNotDown() if len(nodelist) > 0: for node in nodelist: gd.errors.append('Node ' + node.resolveName() + ' is in state: ' + node.status().name) r = False cmd.disconnect() else: r = False except Exception, e: logger.Log(str(e)) r = False self.result = r class CmStopServices(MyThread): def run(self): r = True try: if stopService(['/cm/local/apps/cmd/sbin/cmd'], 'cmd'): for service in gd.wlm_services: if not stopService(gd.wlm_services[service], service): r = False else: logger.Log("Unable to stop cmdaemon service") r = False except Exception, e: logger.Log(str(e)) r = False self.result = r class CheckIfReposReachable(MyThread): def run(self): r = True try: rc = RunCommand() rc.run(gd.repo_clean) if gd.repo_refresh != None: logger.LogV('Running refresh: ' + str(gd.repo_refresh)) rc.run(gd.repo_refresh) rc.run(gd.list_updates + [gd.cm_config_cm]) r = rc.good if not r: logger.LogV('list updates failed, trying list available') rc.run(gd.list_available + [gd.cm_config_cm]) r = rc.good logger.LogV('list available exit code: ' + str(rc.proc.returncode)) if not r: c_v = getCurrentVersion(gd.cm_release_file) if gd.upgrade_version == c_v: r = True elif packageInstalled('cm-config-cm', gd.upgrade_version): r = True else: logger.Log('Failed to fetch new ' + gd.cm_config_cm + ' rpm from the repositories.') gd.errors.append('Repository validation failed.') except Exception, e: logger.Log(str(e)); r = False self.result = r class CheckRequiredDistPackages(MyThread): def run(self): r = True try: to_check = [] for p in gd.required_dist_packages: if packageInstalled(p) == None: to_check.append(p) else: logger.Log('Package %s is installed' %(p)) if len(to_check) > 0: for p in to_check: rc = RunCommand() rc.run(gd.list_available + [p]) if not rc.good: found = packageFoundOnLocalSource(p) if found != None: rc.run(gd.rpm_install + [found]) if not rc.good: r = False gd.errors.append('Failed to install package %s' %(found)) else: logger.Log('Installed packages %s' %(found)) else: r = False gd.errors.append('Required package %s not installed and not found in repos' %(p)) except Exception, e: logger.Log(str(e)) r = False self.result = r class ApplyExistingUpdates(MyThread): def run(self): r = True try: rc = RunCommand() rc.run(gd.repo_clean) if gd.repo_refresh != None: rc.run(gd.repo_refresh) rc.run(gd.upgrade_cmd) r = rc.good except Exception, e: logger.Log(str(e)); r = False self.result = r class BackupCurrentCmdaemonConfiguration(MyThread): def run(self): r = True try: outfile = gd.cmd_backup_file if os.path.exists(gd.cmd_backup_file): outfile = getNextFile(gd.cmd_backup_file) shutil.copy(gd.cmd_backup_file, outfile) os.remove(gd.cmd_backup_file) rc = RunCommand() rc.run([gd.cmd_bin, '-x', gd.cmd_backup_file]) r = rc.good if r: if os.path.isfile(gd.cmd_backup_file): if filecmp.cmp(gd.cmd_backup_file, outfile): os.remove(outfile) # Create backup of important config files processBackupFiles([gd.my_cnf, gd.root_bashrc, gd.cmd_conf, gd.motd_file, gd.slurm_conf, gd.nd_conf_file], True) except Exception, e: logger.Log(str(e)); r = False self.result = r class UpdateCmRepoConfiguration(MyThread): def run(self): r = True try: rc = RunCommand() """ Disable repos if provided on command line """ if gd.is_sles and (len(gd.disable_repos) > 0): logger.Log('Disable repos %s' %(','.join(gd.disable_repos))) rc.run(gd.disable_repo_cmd + gd.disable_repos) if gd.use_iso_repo: createDvdRepoFile() disableRepos(gd.current_cm_repos) else: createPrereqDistRepoFile() for f in gd.current_cm_repos: if os.path.isfile(f): already_excluded = False fin = open(f, 'r') fout = open(f + '.new', 'wt') for line in fin: line = line.strip('\n') if gd.current_version in line: line = line.replace(gd.current_version, gd.upgrade_version) elif gd.upgrade_excludes in line: already_excluded = True fout.write(line + '\n') # Add exclude line only if not already excluded if not already_excluded: if not gd.wlm_upgrade: fout.write('exclude=' + gd.upgrade_excludes + '\n') fout.close() fin.close() shutil.move(f + '.new', f) else: logger.Log('Repo config file ' + f + ' does not exist') if gd.is_sles: if not gd.wlm_upgrade: rc.run(gd.repo_lock + [gd.upgrade_excludes]) r = rc.good if not r: logger.Log('Failed to run ' + gd.repo_lock + ' ' + gd.upgrade_excludes) # Refresh and clear repo cache rc.run(gd.repo_clean) if gd.repo_refresh != None: rc.run(gd.repo_refresh) except Exception, e: logger.LogE(str(e)); r = False self.result = r class RemoveDepSolveBlockerRpms(MyThread): def run(self): r = True try: rc = RunCommand() p_list = [] for package in gd.dep_solve_blockers: p = packageInstalled(package, gd.current_version) if p != None: p_list.append(p) else: logger.Log('Package ' + package + ' not installed, nothing to remove'); for p in p_list: rc.run(gd.rpm_erase_nodeps + [p]) r = rc.good except Exception, e: logger.LogE(str(e)); r = False self.result = r class InstallPrereqCmPackages(MyThread): def run(self): r = True try: rc = RunCommand() rc.run(gd.install_cmd + gd.prerequisites) r = rc.good except Exception, e: logger.Log(str(e)); r = False self.result = r class UpgradeInit(MyThread): def run(self): r = True try: if not os.path.isfile(gd.UPGRADE_file): f = open(gd.UPGRADE_file, 'a') f.close() except Exception, e: logger.LogE(str(e)); r = False self.result = r class UpdateCmConfigCmRpm(MyThread): def run(self): r = True try: rc = RunCommand() rc.run(gd.rpm_erase_nodeps + [gd.cm_config_cm]) r = rc.good if r: if gd.repo_refresh != None: rc.run(gd.repo_refresh) rc.run(gd.install_cmd + [gd.cm_config_cm]) r = rc.good if not gd.use_iso_repo: rc.run(gd.install_cmd + ['cm-config-ceph-release']) r = rc.good except Exception, e: logger.LogE(str(e)); r = False self.result = r class CmUpgrade(MyThread): def run(self): r = True try: exclude_list = [] for k, v in gd.exclude_from_updates.items(): if packageInstalled(k) != None: for p in v[:]: logger.Log('Package %s is installed and must be excluded' %(p)) if gd.is_sles: exclude_list.append(p) else: exclude_list.append('--exclude=%s' %(p)) rc = RunCommand() if len(exclude_list) > 0: if gd.is_sles: rc.run(gd.repo_lock + [exclude_list]) rc.run(gd.upgrade_cmd) else: rc.run(gd.upgrade_cmd + exclude_list) else: rc.run(gd.upgrade_cmd) r = rc.good if r: if not os.path.exists(gd.upgraded_from): writeFile(gd.upgraded_from, gd.current_version) """ If using a DVD to upgrade, disable default cm repo again, because they get enabled by default when the RPM gets updated. Yum commands will fail, if the repos are not reachable. """ if gd.use_iso_repo: disableRepos(gd.current_cm_repos) except Exception, e: logger.LogE(str(e)); r = False self.result = r class RemoveObseletedPackages(MyThread): def run(self): r = True try: rc = RunCommand() p_list = [] for package in gd.obselete_packages[gd.current_version]: p = packageInstalled(package, gd.current_version) if p != None: p_list.append(p) else: logger.Log('Package ' + package + ' not installed, nothing to remove'); for p in p_list: logger.Log('Will remove package: ' + p); rc.run(gd.rpm_erase_nodeps + [p]) r = rc.good except Exception, e: logger.LogE(str(e)); r = False self.result = r class UpdateNameChangedCmPackages(MyThread): def run(self): r = True try: rc = RunCommand() if gd.update_images: rc.run(gd.install_cmd + gd.new_cm_packages_image[gd.current_version]) else: if gd.is_sles: rc.run(gd.install_cmd + gd.new_cm_packages_sles[gd.current_version]) else: rc.run(gd.install_cmd + gd.new_cm_packages[gd.current_version]) r = rc.good except Exception, e: logger.LogE(str(e)); r = False self.result = r class InstallRequiredNewDistPackages(MyThread): def run(self): r = True try: rc = RunCommand() rc.run(gd.install_cmd + [gd.new_dist_packages[gd.current_version]]) r = rc.good except Exception, e: logger.LogE(str(e)); r = False self.result = r class UpdateCudaRpms(MyThread): def run(self): r = True try: rc = RunCommand() # Remove old cuda rpms for p in gd.cuda_rpms[gd.current_version]: cuda_rpms = installedPackages(p, gd.current_version) if len(cuda_rpms) > 0: rc.run(gd.remove_cmd + cuda_rpms) # Install new cuda rpms for p in gd.cuda_rpms[gd.upgrade_version]: rc.run(gd.install_cmd + [p + '*']) except Exception, e: logger.LogE(str(e)); r = False self.result = r class RemoveCmUpgradeFile(MyThread): def run(self): r = True try: if os.path.isfile(gd.UPGRADE_file): os.remove(gd.UPGRADE_file) except Exception, e: logger.LogE(str(e)); r = False self.result = r class ProcessRpmNewFiles(MyThread): def run(self): r = True try: cmd_conf_rpmnew = gd.cmd_conf + '.rpmnew' cmd_conf_rpmnew_new = gd.cmd_conf + '.rpmnew.new' if os.path.isfile(cmd_conf_rpmnew): cmd_conf_old = readCmdConf(gd.cmd_conf) cmd_conf_new = readCmdConf(cmd_conf_rpmnew) fin = open(cmd_conf_rpmnew, 'r') fout = open(cmd_conf_rpmnew_new, 'wt') for line in fin: line = line.strip('\n') (var, value) = skipLineCmdConf(line) if var != None and value != None: if (var in cmd_conf_old) and (cmd_conf_old[var] != value): if not var in gd.skipOldCmdConfDirectives: line = line.replace(value, cmd_conf_old[var]) fout.write(line + '\n') # Add additional directives defined in old config for key in cmd_conf_old.keys(): if not key in cmd_conf_new: logger.Log('Writing additional conf ' + key + ' = ' + cmd_conf_old[key]) fout.write(key + ' = ' + cmd_conf_old[key] + '\n') fout.close() fin.close() shutil.move(cmd_conf_rpmnew_new, gd.cmd_conf) os.system('chmod 600 ' + gd.cmd_conf) gd.rpm_new_files = [] for root, dirnames, filenames in os.walk('/'): dirnames[:] = [dir for dir in dirnames if not os.path.ismount(os.path.join(root, dir))] if '/cm/images' not in root: for p in ['*.rpmnew', '*.rpmsave']: for filename in fnmatch.filter(filenames, p): gd.rpm_new_files.append(os.path.join(root, filename)) except Exception, e: logger.LogE(str(e)); r = False self.result = r class RestoreRequiredFilesFromBackup(MyThread): def run(self): r = True try: processBackupFiles([gd.my_cnf, gd.root_bashrc], False) except Exception, e: logger.LogE(str(e)); r = False self.result = r class UpdateWlmConf(MyThread): def run(self): r = True try: if not gd.update_images: if os.path.exists(gd.slurm_conf): """ Update slurm conf file """ fin = open(gd.slurm_conf, 'r') fout = open(gd.slurm_conf + '.new', 'wt') for line in fin: line = line.strip('\n') if '=' in line and not line.startswith('#') and not emptyString(line): (var, value) = line.split('=', 1) var = var.strip(' ') value = value.strip(' ') if var in gd.update_slurm_params[gd.upgrade_version]: if value in gd.update_slurm_params[gd.upgrade_version][var]["old"].split(','): fout.write('%s=%s\n' %(var, gd.update_slurm_params[gd.upgrade_version][var]["new"])) else: fout.write(line + '\n') logger.Log('Will not update %s' %(var)) else: fout.write(line + '\n') else: fout.write(line + '\n') fout.close() fin.close() shutil.move(gd.slurm_conf + '.new', gd.slurm_conf) else: logger.Log('Cannot find file ' + gd.slurm_conf) """ Create symbolic link to 'SGE_ROOT/var/default' directory if it exists. This is required when the version of SGE changes. """ sge_root = '/cm/shared/apps/sge' sge_current = sge_root + '/current' sge_current_default = sge_current + '/default' sge_var_default = sge_root + '/var/default' if os.path.isdir(sge_current): if not os.path.exists(sge_current_default): if os.path.isdir(sge_var_default): os.symlink(sge_var_default, sge_current_default) else: logger.Log('%s already exists, will not try to create symbolic link' %(sge_current_default)) else: logger.Log('Did not find directory %s, nothing to do' %(sge_current)) removeBrokenWlmSymlinks() except Exception, e: logger.LogE(str(e)); r = False self.result = r class UpdateMotdFile(MyThread): def run(self): r = True try: if os.path.isfile(gd.motd_file + gd.upgrade_backup_suffix): if not filecmp.cmp(gd.motd_file, gd.motd_file + gd.upgrade_backup_suffix): if os.path.isfile(gd.motd_file): os.remove(gd.motd_file) shutil.copy(gd.motd_file + gd.upgrade_backup_suffix, gd.motd_file) if os.path.isfile(gd.cmid_file): f = open(gd.cmid_file, 'r') new_cmid = f.read() new_cmid = new_cmid.strip('\n') f.close() if not emptyString(new_cmid): old_cmid = '00000' cmid_prefix_txt = 'Cluster Manager ID: #' cmid_str_to_replace = cmid_prefix_txt + old_cmid cmversion_prefix_txt = 'Welcome to Bright Cluster Manager ' cmversion_str_to_replace = cmversion_prefix_txt + gd.current_version fin = open(gd.motd_file, 'r') fout = open(gd.motd_file + '.new', 'wt') for line in fin: line = line.strip('\n') if cmid_str_to_replace in line: line = line.replace(cmid_str_to_replace, cmid_prefix_txt + new_cmid) if cmversion_str_to_replace in line: line = line.replace(cmversion_str_to_replace, cmversion_prefix_txt + gd.upgrade_version) fout.write(line + '\n') fout.close() fin.close() shutil.move(gd.motd_file + '.new', gd.motd_file) else: logger.Log('Cannot find file ' + gd.cmid_file); gd.errors.append('Cannot find file ' + gd.cmid_file) r = False except Exception, e: logger.LogE(str(e)); r = False self.result = r class UpdateProvisioners(MyThread): def run(self): r = True try: copyClientCerts() cmd = Cmdaemon() if cmd.connected: if not cmd.updateProvisioners(): r = False # Commit head node to trigger writing the pxelinux.cfg/default file from template cmd.genPxeLinuxCfgDefault() cmd.disconnect() else: r = False except Exception, e: logger.LogE(str(e)); r = False self.result = r class UpdateMetricCollection(MyThread): def run(self): r = True try: copyClientCerts() if restartService('cmd'): cmd = Cmdaemon() if cmd.connected: if not cmd.addNewMetrics(): r = False else: logAndAppend('Failed to connect to cmdaemon, will not add metrics') else: logAndAppend('Failed to restart cmdaemon service, cannot add new metrics.') except Exception, e: logger.LogE(str(e)); r = False self.result = r class DeleteOldNodeCerts(MyThread): def run(self): r = True try: if os.path.isdir(gd.node_certificates_dir): node_cert_dirs = os.listdir(gd.node_certificates_dir) for node_cert_dir in node_cert_dirs: cert_file = gd.node_certificates_dir + '/' + node_cert_dir + '/cert' key_file = gd.node_certificates_dir + '/' + node_cert_dir + '/key' if os.path.isfile(cert_file): os.remove(cert_file) if os.path.isfile(key_file): os.remove(key_file) """ Update node-installer.conf file if required """ if os.path.isfile(gd.nd_conf_file): fin = open(gd.nd_conf_file, 'r') fout = open(gd.nd_conf_file + '.new', 'wt') for line in fin: line = line.strip('\n') if '=' in line and not line.startswith('#') and not emptyString(line): (var, value) = line.split('=', 1) var = var.strip(' ') value = value.strip(' ') if var in gd.update_nd_conf_params[gd.upgrade_version]: logger.Log('Updating param %s in %s' %(var, gd.nd_conf_file)) if value in gd.update_nd_conf_params[gd.upgrade_version][var]["old"].split(','): fout.write('%s = %s\n' %(var, gd.update_nd_conf_params[gd.upgrade_version][var]["new"])) else: fout.write(line + '\n') logger.Log('Will not update %s' %(var)) else: fout.write(line + '\n') else: fout.write(line + '\n') fout.close() fin.close() shutil.move(gd.nd_conf_file + '.new', gd.nd_conf_file) os.system('chmod 600 %s' %(gd.nd_conf_file)) else: logger.Log('Node installer conf file not found %s' %(gd.nd_conf_file)) except Exception, e: logger.LogE(str(e)); r = False self.result = r class Font: PURPLE = '\033[95m' CYAN = '\033[96m' DARKCYAN = '\033[36m' BLUE = '\033[94m' GREEN = '\033[32m' YELLOW = '\033[93m' RED = '\033[91m' BOLD = '\033[1m' UNDERLINE = '\033[4m' CLOSE = '\033[0m' class RunCommand: def __init__(self): self.command = [] self.good = True self.proc = None def run(self, cmd): self.command = cmd log = open(gd.logfile, "a") # log.write('Run command: ' + ' '.join(self.command) + '\n') self.proc = Popen(self.command, stdout=log, stderr=log) self.proc.communicate() if self.proc.returncode != 0: self.good = False log.write('Command (' + ' '.join(self.command) + ') failed with exit code: ' + str(self.proc.returncode) + "\n") else: self.good = True log.close() class Rpm: def __init__(self): self.tr_set = None self.db_match = None self.__init_rpm_db__() def __init_rpm_db__(self): self.tr_set = rpm.TransactionSet() self.db_match = self.tr_set.dbMatch() if not self.db_match or self.db_match == None: logger.Log('No packages found in rpm database'); class Logger: def __init__(self, logfile): self.logger = logging.getLogger('cmupgradelog') self.hdlr = logging.FileHandler(logfile) self.formatter = logging.Formatter('(%(asctime)s) %(message)s') self.hdlr.setFormatter(self.formatter) self.logger.addHandler(self.hdlr) self.logger.setLevel(logging.INFO) def Log(self, msg, lspace=0): for x in msg.split('\n'): if not emptyString(x): self.logger.info(x) def LogV(self, msg, lspace=0): if gd.verbose: for x in msg.split('\n'): if not emptyString(x): self.logger.info('VERBOSE: ' + x) def LogE(self, msg, lspace=0): exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] self.Log('Exception in ' + fname + '(line no:' + str(exc_tb.tb_lineno) + '): ' + msg) class Cmdaemon(): def __init__(self): self.connected = False self.cm = None self.cluster = None self.up_keys = [] self.__connect__() def __connect__(self): try: import pythoncm from readconfig import cmdconf self.cm = pythoncm.ClusterManager() self.cluster = self.cm.addCluster(cmdconf.cmdurl, cmdconf.clientpem, cmdconf.clientkey) self.node_status_up = pythoncm.Status.UP self.node_status_down = pythoncm.Status.DOWN self.valid_node_down_states = [self.node_status_down] retry = 0 while retry < 6: if not self.cluster.connect(): retry += 1 logger.Log('Connect to cluster (attempt ' + str(retry) + ')') try: self.cluster.refreshAll() except: pass time.sleep(10) else: self.connected = True break except Exception, e: logger.Log(str(e)) def disconnect(self): try: self.cluster.disconnect() except Exception, e: logger.Log(str(e)) def updateProvisioners(self): try: self.up_keys = self.provisioningNodesUp(True) update_provisioners = self.cluster.updateProvisioners() if not 'Failed' in update_provisioners: provisioning_busy = True while provisioning_busy: time.sleep(5) provisioning_status = self.cluster.provisioningStatus() provisioning_busy = self.isProvisioningBusy(provisioning_status) return True else: num_provisioning_nodes = self.getNumberOfProvisioningNodes() if num_provisioning_nodes != 1: logger.Log(update_provisioners + ':' + self.cluster.getLastError()) return False else: return True except Exception, e: logger.Log(str(e)) return False def getNumberOfProvisioningNodes(self): n = 0 try: all_nodes = self.cluster.getAll('Node') for node in all_nodes: for r in node.roles: if r.name == 'provisioning': n = n + 1 logger.Log('Number of provisioning nodes: %s' %(n)) except Exception, e: logger.Log(str(e)) return n def isProvisioningBusy(self, status): r = False try: provisioning_node_status_list = status.provisioningNodeStatusList for node_status in provisioning_node_status_list: if node_status.nodeKey in self.up_keys: if False in node_status.upToDate: logger.LogV('Waiting for provisioning to complete on ' + str(node_status.nodeKey) + ', upToDate: ' + str(node_status.upToDate)) r = True else: logger.Log('Provisioning completed on ' + str(node_status.nodeKey) + ', upToDate: ' + str(node_status.upToDate)) except Exception, e: logger.Log(str(e)) r = False return r def genPxeLinuxCfgDefault(self): try: head_nodes = self.cluster.getAll('MasterNode') for head_node in head_nodes: c = head_node.commit() if not c.result: logger.Log("Head node commit failed") for j in range(c.count): logger.Log(c.getValidation(j).msg) else: logger.Log("Head node commit success") except Exception, e: logger.Log(str(e)) def addNewMetrics(self): r = True try: if gd.upgrade_version in gd.newmetricdata.keys(): conf = json.loads(gd.newmetricdata[gd.upgrade_version]) new_metrics = [] # Add metrics import pythoncm for metric in conf: oneMetric = pythoncm.Metric() oneMetric.name = unicodedata.normalize('NFKD',metric).encode('ascii', 'ignore') oneMetric.command = unicodedata.normalize('NFKD', conf[metric]['command']).encode('ascii', 'ignore') oneMetric.metricClass = pythoncm.MetricClass.PROTOTYPE if 'class' in conf[metric]: klass = unicodedata.normalize('NFKD', conf[metric]['command']).encode('ascii', 'ignore') if (klass == "DISK"): oneMetric.metricClass = pythoncm.MetricClass.DISK timeout = 5 if 'timeout' in conf[metric]: timeout = conf[metric]['timeout'] oneMetric.scriptTimeout = timeout oneMetric.extendedEnvironment = True if self.cluster.add(oneMetric): c = oneMetric.commit() if not c.result: for j in range(c.count): logger.Log(c.getValidation(j).msg) logger.Log(self.cluster.getLastError()) else: logger.Log("Committed: %s" % oneMetric.name) new_metrics.append(oneMetric) else: logger.Log('Metric %s already exists ' % oneMetric.name) categories = self.cluster.getAll('nodecategory') monConfs = [self.cluster.find('HeadNode', 'MonConf')] for cat in categories: monConfs.append(self.cluster.find(cat.monConfId)) # Add mon metric confs i = 0 for mc in monConfs: for metric in new_metrics: if not conf[metric.name]['enabled']: logger.Log('Metric not enabled %s' %metric.name) continue for_nodes = 1 for_headnodes = 1 if 'nodes' in conf[metric.name]: for_nodes = conf[metric.name]['nodes'] if 'headnode' in conf[metric.name]: for_headnodes = conf[metric.name]['headnode'] if ((i == 0 and for_headnodes == 0) or (i > 0 and for_nodes == 0)): logger.Log('skip %s' % metric.name) continue mmc = pythoncm.MonMetricConf() mmc.metric = metric si = 120 if 'samplingInterval' in conf[metric.name]: si = conf[metric.name]['samplingInterval'] mmc.samplingInterval = si mc.metrics += [mmc] if mc.modified: c = mc.commit() if not c.result: logger.Log('monconf commit failed for %s' % mc.resolveName()) for j in range(c.count): logger.Log(c.getValidation(j).msg) logger.Log(self.cluster.getLastError()) else: logger.Log('monconf commit succeeded for %s' % mc.resolveName()) else: logger.Log('monconf not modified %s' % mc.resolveName()) i += 1 else: logger.Log("No new metrics to be added for " + gd.upgrade_version) except Exception, e: logger.Log(str(e)) r = False return r def nodesNotDown(self): nodelist = [] try: for node in self.cluster.getAll('SlaveNode'): if not node.status() in self.valid_node_down_states: nodelist.append(node) return nodelist except Exception, e: logger.Log(str(e)) return nodelist def provisioningNodesUp(self, keys=True): nodelist = [] try: for node in self.cluster.getAll('Node'): if self.hasRole(node, 'provisioning'): if node.status() == self.node_status_up: logger.Log('Found provisioning node UP: ' + str(node.uniqueKey)) if keys: nodelist.append(node.uniqueKey) else: nodelist.append(node) return nodelist except Exception, e: logger.Log(str(e)) return nodelist def hasRole(self, node, role_name): r = False try: for role in node.roles: if role.name == role_name: r = True except Exception, e: logger.Log(str(e)) r = False return r class GlobalDefs: def __init__(self): self.logfile = '/var/log/cm-upgrade.log' self.cmd_dir = '/cm/local/apps/cmd' self.cmd_bin = self.cmd_dir + '/sbin/cmd' self.cmd_conf = self.cmd_dir + '/etc/cmd.conf' self.nd_conf_file = '/cm/node-installer/scripts/node-installer.conf' self.cm_release_file = '/etc/cm-release' self.cmd_state_file = '/var/spool/cmd/state' self.softwareimages_maindir = '/cm/images' self.additional_image_base_dirs = [] self.iso_rpms_dir = '/data/cm-rpms' self.upgrade_version = None self.current_version = None self.update_images = False self.cmdaemon_state = None self.images_to_update = [] self.use_iso_repo = False self.iso_repo_path = None self.valid_cmdaemon_states = ['ACTIVE', 'PASSIVE'] self.upgrade_possibilities = ['7.0'] self.upgrade_options = {"6.0":["7.0"], "6.1":["7.0"], "7.0":[]} self.obselete_packages = {"6.0":['gotoblas', 'lm_sensors', 'rsync-cm', 'cmgui-json-dist', 'mlnx-ofed', 'libnet', 'mpich2-ge-gcc-64', 'mpich2-ge-pgi-64', 'mpich2-ge-open64-64', 'mpich2-ge-intel-64', 'fftw3-open64-64', 'fftw2-open64-64', 'fftw3-gcc-64', 'fftw2-gcc-64', 'freeipmi', 'conman', 'ipmitool', 'iozone', 'stresscpu', 'globalarrays-open64-openmpi-64', 'globalarrays-gcc-openmpi-64', 'globalarrays-intel-openmpi-64', 'globalarrays-pgi-openmpi-64', 'pbspro-slave'], "6.1":['mlnx-ofed', 'libnet']} self.new_cm_packages = {"6.0":['syslog-ng', 'openblas', 'cm-iozone', 'cm-conman', 'cm-freeipmi', 'cm-ipmitool', 'fftw2-openmpi-gcc-64', 'fftw3-openmpi-gcc-64', 'fftw2-openmpi-open64-64', 'fftw3-openmpi-open64-64', 'globalarrays-openmpi-gcc-64', 'globalarrays-openmpi-open64-64', 'pbspro-client', 'genders', 'pdsh', 'pdsh-mod-cmupdown', 'pdsh-mod-genders', 'pdsh-rcmd-exec', 'pdsh-rcmd-ssh'], "6.1":['genders', 'pdsh', 'pdsh-mod-cmupdown', 'pdsh-mod-genders', 'pdsh-rcmd-exec', 'pdsh-rcmd-ssh']} self.new_cm_packages_sles = {"6.0":['syslog-ng', 'openblas', 'cm-iozone', 'cm-conman', 'cm-freeipmi', 'cm-ipmitool', 'fftw2-openmpi-gcc-64', 'fftw3-openmpi-gcc-64', 'globalarrays-openmpi-gcc-64', 'pbspro-client', 'genders', 'pdsh', 'pdsh-mod-cmupdown', 'pdsh-mod-genders', 'pdsh-rcmd-exec', 'pdsh-rcmd-ssh'], "6.1":['genders', 'pdsh', 'pdsh-mod-cmupdown', 'pdsh-mod-genders', 'pdsh-rcmd-exec', 'pdsh-rcmd-ssh']} self.new_cm_packages_image = {"6.0":['cm-freeipmi', 'cm-ipmitool', 'pbspro-client'], "6.1":[]} self.new_dist_packages = {"6.0":[], "6.1":[]} self.cuda_rpms = {"6.0":['cuda40', 'cuda41', 'cuda42', 'cuda50'], "6.1":['cuda42', 'cuda50'], "7.0":['cuda60']} self.slurm_conf = '/cm/shared/apps/slurm/var/etc/slurm.conf' self.update_slurm_params = {"7.0":{"PrologSlurmctld":{"old":"/cm/local/apps/cmd/scripts/prolog-healthchecker,/cm/local/apps/cmd/scripts/prolog", "new":"/cm/local/apps/cmd/scripts/prolog-prejob"}}} self.update_nd_conf_params = {"7.0":{"privateKeyBits":{"old":"512", "new":"2048"}}} self.verbose = False self.non_interactive = False self.iso_repo_dir = None self.progresschars=['[ . ]','[ .. ]','[ ... ]','[ .... ]'] self.upgrade_excludes = 'slurm* sge* pbspro* torque* maui* moab* cm-hwloc*' self.stable_release_num = None self.minimum_stable_release = {"7.0":"7.0-7"} self.init_complete = False self.cm_config_cm = 'cm-config-cm' self.rpm_erase_nodeps = 'rpm -e --nodeps'.split(' ') self.rpm_install = 'rpm -i'.split(' ') self.disable_repos = [] self.do_not_unmount = False self.dep_solve_blockers = ['boto', 'globalarrays-pgi-openmpi-64', 'globalarrays-intel-openmpi-64', 'globalarrays-open64-openmpi-64', 'globalarrays-gcc-openmpi-64'] self.prerequisites = ['python-boto'] self.upgrade_backup_suffix = '.upgrade_backup' self.upgrade_new_suffix = '.upgrade_new' self.node_certificates_dir = '/cm/node-installer/certificates' self.errors = [] self.sge_root = '/cm/shared/apps/sge/current' self.torque_root = '/cm/shared/apps/torque/current' self.pbspro_root = '/cm/shared/apps/pbspro/current' self.slurm_root = '/cm/shared/apps/slurm/current' self.wlm_services = {"sgemaster.sge1":[self.sge_root + '/bin/linux-x64/sge_qmaster'], "sgeexecd":[self.sge_root + '/bin/linux-x64/sge_execd'], "maui":[], "moab":[], "torque_server":[self.torque_root + '/sbin/pbs_server'], "torque_mom":[self.torque_root + '/sbin/pbs_mom'], "torque_sched":[self.torque_root + '/sbin/pbs_sched'], "trqauthd":[self.torque_root + '/sbin/trqauthd'], "pbs":[self.pbspro_root + '/sbin/pbs_server', self.pbspro_root + '/sbin/pbs_mom', self.pbspro_root + '/sbin/pbs_server.bin'], "slurm":[self.slurm_root + '/sbin/slurmctld', self.slurm_root + '/sbin/slurmd'], "slurmdbd":[self.slurm_root + '/sbin/slurmdbd']} self.upgraded_from = '/var/spool/cmd/upgraded_from' self.rpm_new_files = [] self.exclude_from_updates = {'cmgui-ddn':['cmgui']} self.valid_dist_path = {'centos6':['centos/6u4', 'centos/6u5', 'centos/6'], 'rhel6':['rhel/6u4', 'rhel/6u5', 'rhel/6u6'], 'sl6':['sl/6'], 'oel6':['oel/6'], 'sles11sp2':['sles/11sp2'], 'sles11sp3':['sles/11sp3']} self.unsupported_dists = ['RHEL5', 'CENTOS5', 'SL5', 'SLES11sp1', 'RHEL6u3 and older', 'CENTOS6u3 and older', 'SL6u3 and older'] self.cuda_upgrade = False self.wlm_upgrade = True self.force_upgrade = False self.cmd_start_wait = 30 self.max_len = 80 self.skip_node_states_check = False self.skipOldCmdConfDirectives = ['EnableJSON', 'EnableShellService'] self.newmetricdata = {"7.0":'{\ "hadoop" :{"enabled": 1, "timeout": 20, "command": "/cm/local/apps/cmd/scripts/metrics/hadoop/sample_hadoop", "samplingInterval":120},\ "hdfs_usage" : {"enabled": 1, "timeout": 20, "command": "/cm/local/apps/cmd/scripts/metrics/hadoop/sample-hdfs-usage"},\ "hdfsadmin_report" : {"enabled": 1, "timeout": 20, "command": "/cm/local/apps/cmd/scripts/metrics/hadoop/sample-hdfsadmin-report", "samplingInterval":120},\ "hdfsadmin_ls" : {"enabled": 1, "timeout": 20, "command": "/cm/local/apps/cmd/scripts/metrics/hadoop/hadoop-hdfs-ls"},\ "hdfsadmin_namenode" : {"enabled": 1, "timeout": 20, "command": "/cm/local/apps/cmd/scripts/metrics/hadoop/hadoop-hdfs-namenode"},\ "hdfsadmin_nodecapacity" : {"enabled": 1, "timeout": 20, "command": "/cm/local/apps/cmd/scripts/metrics/hadoop/hadoop-hdfs-nodecapacity"},\ "hdfsadmin_balancer" : {"enabled": 0, "timeout": 20, "command": "/cm/local/apps/cmd/scripts/metrics/hadoop/hadoop-hdfs-balancer"},\ "hdfsadmin_fsck" : {"enabled": 0, "timeout": 30, "command": "/cm/local/apps/cmd/scripts/metrics/hadoop/hadoop-hdfs-fsck"},\ "hdfsadmin_testpath" : {"enabled": 1, "timeout": 30, "command": "/cm/local/apps/cmd/scripts/metrics/hadoop/hadoop-hdfs-testpath"},\ "yarn_nodemanager" : {"enabled": 1, "timeout": 20, "command": "/cm/local/apps/cmd/scripts/metrics/hadoop/hadoop-yarn-nodemanager"},\ "ceph_global" : {"enabled": 1, "timeout": 20, "command": "/cm/local/apps/cmd/scripts/metrics/ceph/ceph_global", "nodes": 0, "headnode": 1},\ "ceph" : {"enabled": 1, "timeout": 30, "command": "/cm/local/apps/cmd/scripts/metrics/ceph/sample_ceph"},\ "cimc" : {"enabled": 1, "timeout": 30, "command": "/cm/local/apps/cmd/scripts/metrics/ucsmetrics"},\ "dellnss" : {"enabled": 1, "timeout": 40, "command": "/cm/local/apps/cmd/scripts/metrics/sample_dell_nss"},\ "checkdellnss" : {"enabled": 1, "timeout": 40, "command": "/cm/local/apps/cmd/scripts/healthchecks/dellnss"}}'} def setHeaders(self): self.AE_HEADER = 'Apply existing updates to Bright ' + gd.current_version self.BC_HEADER = 'Backup current configuration of Bright ' + gd.current_version self.UR_HEADER = 'Update repository configs' self.PU_HEADER = 'Process upgrade excludes' self.IP_HEADER = 'Install pre-requisites' self.UI_HEADER = 'Setting up upgrade' self.UCM_HEADER = 'Update cm-config-cm' self.RD_HEADER = 'Remove dependency solver blocks' self.CU_HEADER = 'Upgrade packages to Bright ' + gd.upgrade_version self.RO_HEADER = 'Remove obseleted packages' self.UN_HEADER = 'Install packages with name changes' self.IR_HEADER = 'Install new required distribution packages' self.CUDA_HEADER = 'Update CUDA packages' self.RCU_HEADER = 'Disable upgrade mode' self.PRN_HEADER = 'Process config file changes' self.RR_HEADER = 'Restore backed-up files' self.MOTD_HEADER = 'Update /etc/motd file' self.UPDATE_WLM_CONF = 'Update workload manager configuration' self.UPR_HEADER = 'Updating provisioners' self.DOC_HEADER = 'Delete old node certificates' self.REPOS_REACHABLE = 'Validating Bright ' + gd.upgrade_version + ' repositories' self.NODES_UP = 'Checking if nodes are still UP' self.STOP_SERV = 'Stopping cmd and workload manager services' self.SS_MNT = 'Checking if /cm/shared is mounted' self.MET_UPD = 'Adding new metrics' self.CHK_REQ = 'Checking required dist packages' self.cmd_backup_file = '/var/spool/cmd/cmd-backup-' + gd.current_version + '.xml' def setCommonVars(self, root=''): self.UPGRADE_file = '/cm/UPGRADE' self.root_bashrc = '/root/.bashrc' self.my_cnf = '/etc/my.cnf' self.motd_file = '/etc/motd' self.cmid_file = '/cm/CLUSTERMANAGERID' if os.path.isfile('/etc/SuSE-release'): self.is_sles = True self.repo_dir = '/etc/zypp/repos.d' self.dvd_repo_file = self.repo_dir + '/cm-' + gd.upgrade_version + '-dvd.repo' self.prereq_dist_repo_file = self.repo_dir + '/prereq-dist.repo' self.main_cmd = 'zypper -n --no-gpg-checks' self.remove_cmd = (self.main_cmd + ' remove').split(' ') self.upgrade_cmd = (self.main_cmd + ' update').split(' ') self.update_cmd = (self.main_cmd + ' update').split(' ') self.install_cmd = (self.main_cmd + ' install').split(' ') self.repo_refresh = (self.main_cmd + ' refresh').split(' ') self.repo_clean = (self.main_cmd + ' clean').split(' ') self.repo_lock = (self.main_cmd + ' addlock').split(' ') self.repo_unlock = (self.main_cmd + ' removelock').split(' ') self.list_updates = (self.main_cmd + ' search -u -s --match-exact').split(' ') self.list_available = (self.main_cmd + ' search -u -s --match-exact').split(' ') self.current_cm_repos = [self.repo_dir + '/Cluster_Manager_Base.repo', self.repo_dir + '/Cluster_Manager_Updates.repo'] self.required_dist_packages = [] self.disable_repo_cmd = (self.main_cmd + ' modifyrepo -d').split(' ') else: self.is_sles = False self.repo_dir = '/etc/yum.repos.d' self.dvd_repo_file = self.repo_dir + '/cm-' + gd.upgrade_version + '-dvd.repo' self.prereq_dist_repo_file = self.repo_dir + '/prereq-dist.repo' self.main_cmd = 'yum -y --nogpgcheck' if len(self.disable_repos) > 0: self.main_cmd = self.main_cmd + ' ' + ' '.join(['--disablerepo=%s' %(rep) for rep in self.disable_repos]) self.upgrade_cmd = (self.main_cmd + ' upgrade').split(' ') self.update_cmd = (self.main_cmd + ' update').split(' ') self.install_cmd = (self.main_cmd + ' install').split(' ') self.remove_cmd = (self.main_cmd + ' remove').split(' ') self.repo_refresh = None self.repo_clean = 'yum clean all'.split(' ') self.list_updates = (self.main_cmd + ' list updates').split(' ') self.list_available = (self.main_cmd + ' list available').split(' ') self.current_cm_repos = [self.repo_dir + '/cm.repo'] self.required_dist_packages = ['perl-JSON', 'perl-Socket6'] self.disable_repo_cmd = None def getAllImageDirs(self, ilist=[]): result = [] try: if len(ilist) == 0: if len(self.additional_image_base_dirs) > 0: for d in self.additional_image_base_dirs: for imgdir in os.listdir(d): result.append(d + '/' + imgdir) else: for imgdir in os.listdir(self.softwareimages_maindir): result.append(self.softwareimages_maindir + '/' + imgdir) else: for img in ilist: if len(self.additional_image_base_dirs) > 0: for d in self.additional_image_base_dirs: result.append(d + '/' + img) else: result.append(self.softwareimages_maindir + '/' + img) except Exception, e: print str(e) logAndAppend(str(e), False) return result def usage(self): print """ %s %s -u [OTHER OPTIONS] -u | --version %s """ %(self.bf('USAGE:'), sys.argv[0], '|'.join(self.upgrade_possibilities)) print self.bf('OTHER OPTIONS') print """\ -n | --noninteractive Run in non interactive mode, no questions will be asked -h | --help Display detailed information about actions of this script -v | --verbose Print verbose output -f | --force Force re-run of upgrade. Useful if the upgrade had been interrupted or had failed unexpectedly -c | --cudaupgrade Also upgrade CUDA packages -s | --skipnodestatescheck Skip checking nodes states, this is useful when cmdaemon is in an unreachable state -i | --image List of software images names (comma separated) -a | --imagebasedirs List of additional directories to find software images if they are in not in the standard location: /cm/images (comma separated) -d | --iso Use Bright ISO/DVD for upgrade -e | --disablerepos Disable repositories during upgrade This is helpful in preventing yum from failing when some of the configured repositories are not reachable (comma seperated list of repo ids or glob or a combination of both) """ print self.bf('EXAMPLES') print """\ 1. Upgrade head node to Bright 7.0 cm-upgrade -u 7.0 2. Upgrade head node to Bright 7.0, do not wait for user confirmation cm-upgrade -u 7.0 -n 3. Upgrade sofware image 'sles11sp3-image' to Bright 7.0 using Bright ISO cm-upgrade -u 7.0 -i sles11sp3-image -d /root/bright7.0-sles11sp3.iso 4. Upgrade head node using Bright ISO cm-upgrade -u 7.0 -d /root/bright7.0-centos6.iso 5. Upgrade all software images cm-upgrade -u 7.0 -i all 6. Upgrade all software images including images in additional locations /opt/images and /user/extra/images cm-upgrade -u 7.0 -i all -a '/opt/images,/usr/extra/images' 8. Upgrade all software images using Bright ISO cm-upgrade -u 7.0 -i all -d /root/bright7.0-sles11sp3.iso 9. Also upgrade CUDA packages as part of the upgrade cm-upgrade -u 7.0 -c """ def bf(self, txt): return Font.BOLD + txt + Font.CLOSE def cf(self, txt, color): return Font.__dict__[color] + txt + Font.CLOSE def preReqTxt(self): print self.bf('PREREQUISITES') print """\ The following pre-requisites must be met before starting the upgrade: * All compute nodes must be powered off * All cloud nodes and cloud directors must be terminated * Existing updates must be applied to the head node and software images * Also recommended: * Create full back-up of head node(s) if possible * Extra base distribution packages will be installed, please make sure that appropriate repositories/software channels are enabled """ def intro(self): print self.bf('DESCRIPTION') print """\ This is the Bright Cluster Manager upgrade utility. This can be used to upgrade a Bright cluster from one major version to another (e.g., 6.0 to 7.1). It must be run on both the active and passive head nodes when a cluster is running in High Availability mode. It can be executed simultaneously or at different times. It is distribution independent and hence can be executed on both RedHat and SuSE Linux based Bright clusters. Software images can only be updated when the script is being run from the active head node. """ def printMan(self): self.intro() self.usage() self.preReqTxt() print self.bf('HEAD NODE UPGRADE') print """\ The following is an outline of the main tasks that are performed during upgrade of head nodes * Create back-up of cluster configuration * Create back-up workload manager configurations where possible * Upgrade all Bright RPMs to the new version * Remove obseleted RPMs if any * Install new Bright RPMs if required * Install new base distribution RPMS if required * Upgrade CUDA RPMs (if --upgrade-cuda is specified) * Update CMDaemon configuration file * Collect information about rpmnew files and other configuration file changes """ print self.bf('SOFTWARE IMAGE UPGRADE') print """\ The following is an outline of the main tasks that are performed during upgrade of all software images * Upgrade all Bright RPMs to the new version * Remove obseleted RPMs if any * Install new Bright RPMs if required * Install new base distribution RPMS if required * Update CMDaemon configuration file * Collect information about rpmnew files and other configuration file changes. """ print self.bf('LOG FILES') print """\ Head node upgrade: * /var/log/cm-upgrade.log Software image upgrade: * /var/log/cm-upgrade.log where is for example: /cm/images/default-image """ print self.bf('LIMITATIONS') print """\ When using a Bright DVD/ISO to upgrade software images: * It is not possible to update multiple images of different base distributions at the same time. Only one ISO/DVD can be specified to the command line. Please see section on %s and %s for more details. """ %(self.bf('USAGE'), self.bf('EXAMPLES')) print self.bf('UNSUPPORTED LINUX DISTRIBUTIONS') print """\ * The following Linux base distributions are not supported: * RHEL5, SL5, CENTOS5 * SLES11sp1 * RHEL6u3 and older, SL6u3 and older, CENTOS6u3 and older """ print self.bf('SUPPORT') print """\ The Bright Cluster Manager Administration Manual describes how to send bug reports to Bright Computing. Please attach corresponding upgrade logfiles (see %s), along with support requests related to upgrade. """ %(self.bf('LOG FILES')) if __name__ == "__main__": try: global gd, logger gd = GlobalDefs() logger = Logger(gd.logfile) main() except Exception, e: printException(str(e), True);