ovirt-log-collector-3.1.0/ 0000755 0032753 0032753 00000000000 11763605557 015634 5 ustar oschreib oschreib ovirt-log-collector-3.1.0/src/ 0000775 0032753 0032753 00000000000 11763605557 016425 5 ustar oschreib oschreib ovirt-log-collector-3.1.0/src/sos/ 0000775 0032753 0032753 00000000000 11763605557 017231 5 ustar oschreib oschreib ovirt-log-collector-3.1.0/src/sos/plugins/ 0000775 0032753 0032753 00000000000 11763605557 020712 5 ustar oschreib oschreib ovirt-log-collector-3.1.0/src/sos/plugins/jboss.py 0000664 0032753 0032753 00000076136 11763605557 022421 0 ustar oschreib oschreib import sos.plugintools
import os
import zipfile
import platform
import fnmatch
import shlex
import subprocess
import string
import grp, pwd
class DirTree(object):
"""Builds an ascii representation of a directory structure"""
def __init__(self, top_directory):
self.directory_count = 0
self.file_count = 0
self.buffer = []
self.top_directory = top_directory
self._build_tree()
def buf(self, s):
self.buffer.append(s)
def printtree(self):
print self.as_string()
def as_string(self):
return "\n".join(self.buffer)
def _build_tree(self):
self.buf(os.path.abspath(self.top_directory))
self.tree_i(self.top_directory, first=True)
def _convert_bytes(self, n):
K, M, G, T = 1 << 10, 1 << 20, 1 << 30, 1 << 40
if n >= T:
return '%.1fT' % (float(n) / T)
elif n >= G:
return '%.1fG' % (float(n) / G)
elif n >= M:
return '%.1fM' % (float(n) / M)
elif n >= K:
return '%.1fK' % (float(n) / K)
else:
return '%d' % n
def _format(self, path):
"""Conditionally adds detail to paths"""
stats = os.stat(path)
details = {
"filename": os.path.basename(path),
"user": pwd.getpwuid(stats.st_uid)[0],
"group": grp.getgrgid(stats.st_gid)[0],
"filesize": self._convert_bytes(stats.st_size),
}
return "[%(user)s %(group)s %(filesize)s] %(filename)s" % details
def tree_i(self, dir_, padding='', first=False):
if not first:
self.buf(padding[:-1] + "+-- " + self._format(os.path.abspath(dir_)))
padding += ' '
count = 0
files = os.listdir(dir_)
files.sort(key=string.lower)
for f in files:
count += 1
path = os.path.join(dir_, f)
if f.startswith("."):
pass
elif os.path.isfile(path):
self.file_count += 1
self.buf(padding + '+-- ' + self._format(path))
elif os.path.islink(path):
self.buf(padding + '+-- ' + f + ' -> ' + os.path.basename(os.path.realpath(path)))
if os.path.isdir(path):
self.directory_count += 1
else:
self.file_count += 1
elif os.path.isdir(path):
self.directory_count += 1
if count == len(files):
self.tree_i(path, padding + ' ')
else:
self.tree_i(path, padding + '|')
def find(file_pattern, top_dir, max_depth=None, path_pattern=None):
"""generate function to find files recursively. Usage:
for filename in find("*.properties", /var/log/foobar):
print filename
"""
if max_depth:
base_depth = os.path.dirname(top_dir).count(os.path.sep)
max_depth += base_depth
for path, dirlist, filelist in os.walk(top_dir):
if max_depth and path.count(os.path.sep) >= max_depth:
del dirlist[:]
if path_pattern and not fnmatch.fnmatch(path, path_pattern):
continue
for name in fnmatch.filter(filelist, file_pattern):
yield os.path.join(path,name)
class jboss(sos.plugintools.PluginBase):
"""JBoss related information
"""
optionList = [("home", 'JBoss\'s installation dir (i.e. JBOSS_HOME)', '', False),
("javahome", 'Java\'s installation dir (i.e. JAVA_HOME)', '', False),
("profile", 'Quoted and space separated list of server profiles to limit collection. \
Default=\'all default minimal production standard web\'.', '', False),
("user", 'JBoss JMX invoker user to be used with twiddle.', '', False),
("pass", 'JBoss JMX invoker user\'s password to be used with twiddle.', '', False),
("logsize", 'max size (MiB) to collect per log file', '', 15),
("stdjar", 'Collect jar statistics for standard jars.', '', True),
("servjar", 'Collect jar statistics from any server configuration dirs.', '', True),
("twiddle", 'Collect twiddle data.', '', True),
("appxml", 'Quoted and space separated list of application\'s whose XML descriptors you want. The keyword \"all\" will collect all descriptors in the designated profile(s).', '', False)]
__MD5_CHUNK_SIZE=128
__jbossHome=None
__haveJava=False
__twiddleCmd=None
__jbossSystemJarDirs = [ "client", "lib" , "common/lib" ]
__jbossServerConfigDirs = ["all", "default", "minimal", "production", "standard", "web"]
__jbossHTMLBody=None
def __getJbossHome(self):
"""
Will attempt to locate the JBoss installation dir in either jboss.home or
scrape it from the environment variable JBOSS_HOME.
Returns:
True JBOSS_HOME is set and the path exists. False otherwise.
"""
if self.getOption("home"):
## Prefer this value first over the ENV
self.__jbossHome=self.getOption("home")
self.addAlert("INFO: The JBoss installation directory supplied to SOS is " +
self.__jbossHome)
elif os.environ.get("JBOSS_HOME"):
self.__jbossHome=os.environ.get("JBOSS_HOME")
self.addAlert("INFO: The JBoss installation directory (i.e. JBOSS_HOME) from the environment is " +
self.__jbossHome)
else:
self.addAlert("ERROR: The JBoss installation directory was not supplied.\
The JBoss SOS plug-in cannot continue.")
return False
if os.path.exists(self.__jbossHome):
## We need to set JBOSS_CLASSPATH otherwise some twiddle commands will not work.
jbossClasspath=None
tmp=os.path.join(self.__jbossHome, "lib")
if os.path.exists(tmp):
jbossClasspath=tmp + os.sep + "*" + os.pathsep
else:
self.addAlert("WARN: The JBoss lib directory does not exist. Dir(%s) " % tmp)
tmp=os.path.join(self.__jbossHome, "common" , "lib")
if os.path.exists(tmp):
jbossClasspath+=tmp + os.sep + "*"
else:
self.addAlert("WARN: The JBoss lib directory does not exist. Dir(%s) " % tmp)
os.environ['JBOSS_CLASSPATH']=jbossClasspath
return True
else:
msg = "ERROR: The path to the JBoss installation directory does not exist. Path is: " + self.__jbossHome
print msg
self.addAlert(msg)
return False
def __getJavaHome(self):
"""
This SOS plug-in makes extensive use of JBoss' twiddle program and twiddle uses Java. As such, we
need to ensure that java and JAVA_HOME is known to the plug-in so that it can use Java.
This function will put JAVA_HOME and JAVA_HOME/bin into the environment if they're not already
there.
"""
javaHome=None
java="bin/java"
if self.getOption("javahome"):
## Prefer this value first over the ENV
javaHome=self.getOption("javahome")
self.addAlert("INFO: The Java installation directory supplied to SOS is " +
javaHome)
elif os.environ.get("JAVA_HOME"):
javaHome=os.environ.get("JAVA_HOME")
self.addAlert("INFO: The Java installation directory (i.e. JAVA_HOME) from the environment is " +
javaHome)
else:
## Test to see if Java is already in the PATH
(status, output, rtime) = self.callExtProg("java -version")
if (status == 0):
self.addAlert("INFO: The Java installation directory is in the system path.")
return True
else:
self.addAlert("ERROR: The Java installation directory was not supplied.\
The JBoss SOS plug-in will not collect twiddle data.")
return False
java=os.path.join(javaHome, java)
if os.path.exists(java) and os.access(java, os.X_OK):
os.environ['JAVA_HOME']=javaHome
## Place the supplied Java at the *head* of the path.
os.environ['PATH'] = os.path.join(javaHome, "bin") + os.pathsep + os.environ['PATH']
return True
else:
msg = "ERROR: The path to the Java installation directory does not exist. Path is: %s" % (javaHome)
print msg
self.addAlert(msg)
return False
def __getJMXCredentials(self):
"""
Read the JMX credentials from the option list.
Returns:
A formatted credential string for twiddle consumption if both user and pass
are supplied. None otherwise.
"""
credential = None
## Let's make a best effort not to pass expansions or escapes to the shell
## by strong quoting the user's input
if self.getOption("user"):
credential=" -u '" + self.getOption("user") + "' "
if self.getOption("pass"):
credential+=" -p '" + self.getOption("pass") + "' "
else:
credential=None
return credential
def __updateServerConfigDirs(self):
"""
By default this plug-in will attempt to collect logs from every
JBoss server configuration directory (i.e. profile). The
user may have supplied a limited list, as such, we must respect
that wish.
Returns:
Nothing. Will update __jbossServerConfigDirs if the user
supplied a limited list.
"""
if self.getOption("profile"):
profiles=self.getOption("profile")
## I'd rather use comma as the delimiter but getOption doesn't seem to be passing it through.
## Since we are using spaces as the delimiter, we need to filter out empty list elements
## if the user did something like ' all default web '.
profiles=profiles.split(' ')
## Flter(None doesn't work. Allows 0.
self.__jbossServerConfigDirs=filter(lambda x: len(x), profiles)
return
def __buildTwiddleCmd(self):
"""
Utility function to build the twiddle command with/without credentials
so that it can be used by later fcns. If twiddle is found
"""
## In the off-chance that SOS is ever ported to cygwin or this plugin
## is ported to win...
if platform.system() == "Windows":
self.__twiddleCmd=os.path.join(self.__jbossHome, "bin", "twiddle.bat")
else:
self.__twiddleCmd=os.path.join(self.__jbossHome, "bin", "twiddle.sh")
if os.path.exists(self.__twiddleCmd) and os.access(self.__twiddleCmd, os.X_OK):
credential = self.__getJMXCredentials()
if credential:
self.__twiddleCmd += credential
else:
## Reset twiddlecmd to None
self.addAlert("ERROR: The twiddle program could not be found. Program=%s" % (self.__twiddleCmd))
self.__twiddleCmd = None
return
def __createHTMLBodyStart(self):
"""
The free-form HTML that can be inserted into the SOS report with addCustomText is within
a
block. We need to add a few pieces of HTML so that all of our subsequent data will
be rendered properly.
"""
self.__jbossHTMLBody = """
JBoss SOS Report Table of Contents
"""
def __getMd5(self, file):
"""
Will perform an MD5 sum on a given file and return the file's message digest. This function
will not read the entire file into memory, instead, it will consume the file in 128 byte
chunks. This might be slightly slower but, the intent of a SOS report is to collect data from
a system that could be under stress and we shouldn't stress it more by loading entire Jars into
real memory.
Note: This fcn expects hashlib; however, this isn't always available. If it isn't then
we will use md5sum
"""
retVal="????????????????????????????????"
try:
import hashlib
try:
fd = open(file,"rb")
except IOError, ioe:
msg = "ERROR: Unable to open %s for reading. Error: %s" % (file,ioe)
print msg
self.addAlert(msg)
return retVal
md5 = hashlib.md5()
data = fd.read(self.__MD5_CHUNK_SIZE)
while data:
md5.update(data)
data = fd.read(self.__MD5_CHUNK_SIZE)
retVal = md5.hexdigest()
except ImportError, e:
process = subprocess.Popen(['md5sum', file],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result = process.communicate()
if (process.returncode == 0):
retVal = result[0].partition(' ')[0]
else:
msg = "ERROR: Unable to compute md5sum of %s. Msg (%s)" % (file, result[1])
print msg
self.addAlert(msg)
return retVal
def __getManifest(self, jarFile):
"""
Given a jar file, this function will extract the Manifest and return it's contents
as a string.
"""
manifest=None
try:
zf = zipfile.ZipFile(jarFile)
try:
manifest=zf.read("META-INF/MANIFEST.MF")
except Exception, e:
msg="ERROR: reading manifest from %s. Error: %s" % (jarFile, e)
print msg
self.addAlert(msg)
zf.close()
except Exception, e:
msg="ERROR: reading contents of %s. Error: %s" % (jarFile, e)
print msg
self.addAlert(msg)
return manifest
def __getStdJarInfo(self):
self.__jbossHTMLBody += """
– JBoss System Jar Information
"""
for dir in self.__jbossSystemJarDirs:
path=os.path.join(self.__jbossHome, dir)
if os.path.exists(path):
nicePath=path.replace(os.sep, "-")
self.__jbossHTMLBody += """
— Summary of Jar Files in JBoss System Directory
%s
(
Show /
Hide ):
""" % (path,nicePath,nicePath,nicePath)
found= False
for jarFile in find("*.jar", path):
found= True
nicePath=jarFile.replace(os.sep, "-")
self.__jbossHTMLBody += """
- Jar File: %s
MD5: %s
Manifest File (
Show /
Hide ):
""" % (jarFile,
self.__getMd5(jarFile),
nicePath,
nicePath,
nicePath,
self.__getManifest(jarFile))
if not found:
self.addAlert("WARN: No jars found in JBoss system path (" + path + ").")
self.__jbossHTMLBody += """
"""
else:
self.addAlert("ERROR: JBoss system path (" + path + ") does not exist.")
return
def __getServerConfigJarInfo(self, configDirAry):
self.__jbossHTMLBody += """
– JBoss Server Configurations Jar Information
"""
for dir in configDirAry:
serverDir = os.path.join("server", dir)
path=os.path.join(self.__jbossHome, serverDir)
if os.path.exists(path):
nicePath=path.replace(os.sep, "-")
self.__jbossHTMLBody += """
— Summary of Jar Files in the
%s JBoss Server Configuration
(
Show /
Hide ):
""" % (dir, nicePath,nicePath,nicePath)
found = False
for jarFile in find("*.jar", path):
found = True
nicePath=jarFile.replace(os.sep, "-")
self.__jbossHTMLBody += """
- Jar File: %s
MD5: %s
Manifest File (
Show /
Hide ):
""" % (jarFile,
self.__getMd5(jarFile),
nicePath,
nicePath,
nicePath,
self.__getManifest(jarFile))
if not found:
self.addAlert("WARN: No jars found in the JBoss server configuration (%s)." % (path))
self.__jbossHTMLBody += """
"""
else:
self.addAlert("ERROR: JBoss server configuration path (" + path + ") does not exist.")
return
def __getJBossHomeTree(self):
"""
This function will execute the "tree" command on JBOSS_HOME.
"""
self.__jbossHTMLBody += """
– JBOSS_HOME Directory Tree
"""
try:
output = DirTree(self.__jbossHome).as_string()
self.__jbossHTMLBody += """
%s
""" % (output)
except Exception, e:
self.__jbossHTMLBody += """
ERROR: Unable to generate tree on JBOSS_HOME.
Exception: %s
""" % e
return
def __getMbeanData(self, dataTitle, divId, twiddleOpts):
credentials = ""
if self.__haveJava and self.__twiddleCmd:
self.__jbossHTMLBody += """
""" % (dataTitle, divId, divId, divId,twiddleOpts)
cmd = "%s %s" % (self.__twiddleCmd, twiddleOpts)
proc = subprocess.Popen(shlex.split(cmd), stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
output = proc.communicate()[0]
status = proc.returncode
if status == 0 and output:
self.__jbossHTMLBody += output.strip()
else:
self.__jbossHTMLBody += """
ERROR: Unable to collect %s data.
Output: %s
Status: %d
""" % (twiddleOpts, output, status)
else:
self.__jbossHTMLBody += "ERROR: Unable to collect data twiddle or Java is missing."
self.__jbossHTMLBody += """
"""
return
def __getTwiddleData(self):
"""
This function co-locates all of the calls to twiddle so that they can be easily disabled.
"""
## Get jboss.system.* Data
self.__jbossHTMLBody += """
– JBoss JMX MBean Data from jboss.system:*
"""
self.__getMbeanData("JBoss Server Info",
"jboss-server-info",
" get 'jboss.system:type=ServerInfo' ")
self.__getMbeanData("JBoss Server Config Info",
"jboss-server-config-info",
" get 'jboss.system:type=ServerConfig' ")
self.__getMbeanData("JBoss CXF Server Config Info",
"jboss-cxfserver-config-info",
" get 'jboss.ws:service=ServerConfig' ")
self.__getMbeanData("JBoss Memory Pool Info",
"jboss-memory-pool-info",
" invoke 'jboss.system:type=ServerInfo' listMemoryPools true ")
self.__getMbeanData("JBoss Thread CPU Utilization",
"jboss-thread-cpu-info",
" invoke 'jboss.system:type=ServerInfo' listThreadCpuUtilization ")
self.__getMbeanData("JBoss Thread Dump",
"jboss-thread-dump",
" invoke 'jboss.system:type=ServerInfo' listThreadDump ")
self.__getMbeanData("JBoss Logging Config Info",
"jboss-logging-config-info",
" get 'jboss.system:service=Logging,type=Log4jService' ")
## Get jboss.* Data
self.__jbossHTMLBody += """
– JBoss JMX MBean Data from jboss:*
"""
self.__getMbeanData("JBoss System Properties",
"jboss-system-properties-info",
" invoke 'jboss:name=SystemProperties,type=Service' showAll ")
self.__getMbeanData("JBoss JNDI List View",
"jboss-jndi-list-info",
" invoke 'jboss:service=JNDIView' list true ")
## MBean Summary
self.__jbossHTMLBody += """
– JBoss MBean Summary
"""
self.__getMbeanData("JBoss MBean Vendor/Version Info",
"jboss-vendor-version",
" get 'JMImplementation:type=MBeanServerDelegate' ")
self.__getMbeanData("JBoss MBean Count",
"jboss-mbean-count",
" serverinfo -c ")
self.__getMbeanData("JBoss MBean List",
"jboss-mbean-list",
" serverinfo -l ")
##JBoss Messaging Data
self.__jbossHTMLBody += """
– JBoss JMX Messaging MBean Data from jboss.messaging:*
"""
self.__getMbeanData("JBoss Message Counters",
"jboss-message-counters",
" invoke 'jboss.messaging:service=ServerPeer' listMessageCountersAsHTML ")
self.__getMbeanData("JBoss Prepared Transactions Table",
"jboss-prepared-transactions",
" invoke 'jboss.messaging:service=ServerPeer' listAllPreparedTransactions ")
self.__getMbeanData("JBoss Active Clients Table",
"jboss-active-clients",
" invoke 'jboss.messaging:service=ServerPeer' showActiveClientsAsHTML ")
## Get j2ee Data query 'jboss.j2ee:*'
self.__jbossHTMLBody += """
– JBoss JMX J2EE MBean Data from jboss.j2ee:*
"""
self.__getMbeanData("JBoss J2EE MBeans",
"jboss-j2ee-mbeans",
" query 'jboss.j2ee:*' ")
## VFS
self.__jbossHTMLBody += """
– JBoss JMX VFS MBean Data from jboss.vfs:*
"""
self.__getMbeanData("JBoss VFS Cached Contexts",
"jboss-vfs-contexts",
" invoke 'jboss.vfs:service=VFSCacheStatistics' listCachedContexts ")
## Get jsr77 Data
self.__jbossHTMLBody += """
– JBoss JSR77 Data
"""
self.__getMbeanData("JBoss JSR77 Data",
"jboss-jsr77",
" jsr77 ")
return
def __getFiles(self, configDirAry):
"""
This function will collect files from JBOSS_HOME for analysis. The scope of files to
be collected are determined by options to this SOS plug-in.
"""
for dir in configDirAry:
path=os.path.join(self.__jbossHome, "server", dir)
## First add forbidden files
self.addForbiddenPath(os.path.join(path, "tmp"))
self.addForbiddenPath(os.path.join(path, "work"))
self.addForbiddenPath(os.path.join(path, "data"))
if os.path.exists(path):
## First get everything in the conf dir
confDir=os.path.join(path, "conf")
self.doCopyFileOrDir(confDir)
## Log dir next
logDir=os.path.join(path, "log")
for logFile in find("*", logDir):
self.addCopySpecLimit(logFile, self.getOption("logsize"))
## Deploy dir
deployDir=os.path.join(path, "deploy")
for deployFile in find("*", deployDir, max_depth=1):
self.addCopySpec(deployFile)
## Get application deployment descriptors if designated.
if self.isOptionEnabled("appxml"):
appxml=self.getOption("appxml")
## I'd rather use comma as the delimiter but getOption doesn't seem to be passing it through.
## Since we are using spaces as the delimiter, we need to filter out empty list elements
## if the user did something like ' all default web '.
appxml=appxml.split(' ')
## Flter(None doesn't work. Allows 0.
appxml=filter(lambda x: len(x), appxml)
for app in appxml:
pat = os.path.join("*%s*" % (app,), "WEB-INF")
for file in find("*.xml", deployDir, path_pattern=pat):
self.addCopySpec(file)
return
def setup(self):
## We need to know where JBoss is installed and if we can't find it we
## must exit immediately.
if not self.__getJbossHome():
self.exit_please()
return
## Check to see if the user passed in a limited list of server config jars.
self.__updateServerConfigDirs()
## Generate HTML Body for report
self.__createHTMLBodyStart()
## Generate hashes of the stock Jar files for the report.
if self.getOption("stdjar"):
self.__getStdJarInfo()
## Generate hashes for the Jars in the various profiles
if self.getOption("servjar"):
self.__getServerConfigJarInfo(self.__jbossServerConfigDirs)
## Generate a Tree for JBOSS_HOME
self.__getJBossHomeTree()
if self.getOption("twiddle"):
## We need to know where Java is installed or at least ensure that it
## is available to the plug-in so that we can run twiddle.
self.__haveJava = self.__getJavaHome()
self.__buildTwiddleCmd()
self.__getTwiddleData()
self.addCustomText(self.__jbossHTMLBody)
self.__getFiles(self.__jbossServerConfigDirs)
return
def postproc(self):
"""
Obfuscate passwords.
"""
for dir in self.__jbossServerConfigDirs:
path=os.path.join(self.__jbossHome, "server", dir)
## Really annoying that there appears to be no vehicle to
## say I want ignore case...argh!
self.doRegexSub(os.path.join(path,"conf","login-config.xml"),
r"\"[Pp][Aa][Ss][Ss][Ww][Oo][Rr][Dd]\".*>.*[Mm][Oo][Dd][Uu][Ll][Ee]-[Oo][Pp][Tt][Ii][Oo][Nn].*>",
r'"password">********')
tmp = os.path.join(path,"conf", "props")
for propFile in find("*-users.properties", tmp):
self.doRegexSub(propFile,
r"=(.*)",
r'=********')
## Remove PW from -ds.xml files
tmp=os.path.join(path, "deploy")
for dsFile in find("*-ds.xml", tmp):
self.doRegexSub(dsFile,
r"<[Pp][Aa][Ss][Ss][Ww][Oo][Rr][Dd].*>.*[Pp][Aa][Ss][Ss][Ww][Oo][Rr][Dd].*>",
r"********")
return
ovirt-log-collector-3.1.0/src/sos/plugins/postgresql.py 0000664 0032753 0032753 00000005454 11763605557 023477 0 ustar oschreib oschreib import sos.plugintools
import os
import fnmatch
import shlex
import subprocess
import tempfile
def find(file_pattern, top_dir, max_depth=None, path_pattern=None):
"""generate function to find files recursively. Usage:
for filename in find("*.properties", /var/log/foobar):
print filename
"""
if max_depth:
base_depth = os.path.dirname(top_dir).count(os.path.sep)
max_depth += base_depth
for path, dirlist, filelist in os.walk(top_dir):
if max_depth and path.count(os.path.sep) >= max_depth:
del dirlist[:]
if path_pattern and not fnmatch.fnmatch(path, path_pattern):
continue
for name in fnmatch.filter(filelist, file_pattern):
yield os.path.join(path,name)
# Class name must be the same as file name and method names must not change
class postgresql(sos.plugintools.PluginBase):
"""PostgreSQL related information"""
optionList = [
("pghome", 'PostgreSQL server home directory.', '', '/var/lib/pgsql'),
("username", 'username for pg_dump', '', 'postgres'),
("password", 'password for pg_dump', '', ''),
("dbname", 'database name to dump for pg_dump', '', ''),
]
def pg_dump(self):
dest_file = os.path.join(self.tmp_dir, "sos_pgdump.tar")
old_env_pgpassword = os.environ.get("PGPASSWORD")
os.environ["PGPASSWORD"] = "%s" % (self.getOption("password"))
(status, output, rtime) = self.callExtProg("pg_dump %s -U %s -w -f %s -F t" %
(self.getOption("dbname"),
self.getOption("username"),
dest_file))
if old_env_pgpassword is not None:
os.environ["PGPASSWORD"] = "%s" % (old_env_pgpassword)
if (status == 0):
self.addCopySpec(dest_file)
else:
self.addAlert("ERROR: Unable to execute pg_dump. Error(%s)" % (output))
def setup(self):
if self.getOption("dbname"):
if self.getOption("password"):
self.tmp_dir = tempfile.mkdtemp()
self.pg_dump()
else:
self.addAlert("WARN: password must be supplied to dump a database.")
# Copy PostgreSQL log files.
for file in find("*.log", self.getOption("pghome")):
self.addCopySpec(file)
# Copy PostgreSQL config files.
for file in find("*.conf", self.getOption("pghome")):
self.addCopySpec(file)
self.addCopySpec(os.path.join(self.getOption("pghome"), "data" , "PG_VERSION"))
self.addCopySpec(os.path.join(self.getOption("pghome"), "data" , "postmaster.opts"))
def postproc(self):
import shutil
shutil.rmtree(self.tmp_dir)
ovirt-log-collector-3.1.0/src/sos/plugins/Makefile 0000664 0032753 0032753 00000001327 11763605557 022355 0 ustar oschreib oschreib PYTHON=python
EXCLUDES=__init__.py
PACKAGE = sos/$(shell basename `pwd`)
PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)')
PYSYSDIR := $(shell $(PYTHON) -c 'import sys; print sys.prefix')
PYLIBDIR = $(PYSYSDIR)/lib/python$(PYVER)
PKGDIR = $(PYLIBDIR)/site-packages/$(PACKAGE)
all:
$(PYTHON) -m compileall -x $(EXCLUDES) .
$(PYTHON) -OO -m compileall -x $(EXCLUDES) .
clean:
rm -f *.pyc *.pyo *~
install: all
mkdir -p $(DESTDIR)/$(PKGDIR)
for p in $(filter-out $(EXCLUDES), $(wildcard *.py)) ; do \
install -m 755 $$p $(DESTDIR)/$(PKGDIR)/$$p; \
done
for p in $(filter-out $(EXCLUDES), $(wildcard *.pyo) $(wildcard *.pyc)) ; do \
install -m 644 $$p $(DESTDIR)/$(PKGDIR)/$$p; \
done
ovirt-log-collector-3.1.0/src/sos/plugins/engine.py 0000664 0032753 0032753 00000002424 11763605557 022533 0 ustar oschreib oschreib import sos.plugintools
# Class name must be the same as file name and method names must not change
class engine(sos.plugintools.PluginBase):
"""oVirt related information"""
optionList = [
("vdsmlogs", 'Directory containing all of the SOS logs from the hypervisor(s)', '', False),
("prefix", "Prefix the sosreport archive", '', False)
]
def setup(self):
# Copy engine config files.
self.addCopySpec("/etc/ovirt-engine")
self.addCopySpec("/var/log/ovirt-engine")
self.addCopySpec("/etc/rhevm")
self.addCopySpec("/var/log/rhevm/")
if self.getOption("vdsmlogs"):
self.addCopySpec(self.getOption("vdsmlogs"))
def postproc(self):
"""
Obfuscate passwords.
"""
self.doRegexSub("/etc/ovirt-engine/engine-config/engine-config.properties",
r"Password.type=(.*)",
r'Password.type=********')
self.doRegexSub("/etc/rhevm/rhevm-config/rhevm-config.properties",
r"Password.type=(.*)",
r'Password.type=********')
if self.getOption("prefix"):
current_name = self.policy().reportName
self.policy().reportName = "LogCollector-" + current_name
ovirt-log-collector-3.1.0/src/rhev/ 0000775 0032753 0032753 00000000000 11763605557 017371 5 ustar oschreib oschreib ovirt-log-collector-3.1.0/src/rhev/logcollector.conf 0000664 0032753 0032753 00000004637 11763605557 022742 0 ustar oschreib oschreib [LogCollector]
#
### oVirt Engine Configuration:
#
## username to use with the REST API
#user=user@example.com
# password to use with the REST API
#passwd=PASSWORD
## hostname or IP address of the oVirt Engine
#engine=localhost:8443
## collect all the logs from oVirt Engine and all the hypervisors
#all=false
## collect all the logs from oVirt Engine and all the RHEV-H(s) in a cluster
#cluster=None
## collect all the logs from oVirt Engine and all the RHEV-H(s) in a DC
#data-center=None
## collect all the logs from oVirt Engine and all the RHEV-H(s) in a comma separated list of IP(s)/hostname(s)
#hosts=hostname,hostname,IP,...,IP,hostname
## collect all the logs from oVirt Engine and all the RHEV-H(s) that match a pattern (e.g. rhev*)
#host-pattern=rhev*
#
### SOSReport Options for JBoss, VDSM, and oVirt Engine
#
## JBoss's installation dir (i.e. JBOSS_HOME)
#jboss-home=/var/lib/jbossas
## Java's installation dir (i.e. JAVA_HOME)
#java-home=/usr/lib/jvm/java
## Quoted and space separated list of server profiles to limit collection. (default='engine-slimmed')
#jboss-profile=engine-slimmed
## JBoss JMX invoker user to be used with twiddle
#jboss-user=admin
## JBoss JMX invoker user's password to be used with twiddle
#jboss-pass=admin
# JBoss max size (MiB) to collect per log file (default=15)
#jboss-logsize=15
## collect jar statistics for JBoss standard jars.(default=on)
#jboss-stdjar=on
## collect jar statistics from any server configuration dirs (default=on)
#jboss-servjar=on
## Tell JBoss to collect twiddle data. You need to supply JMX UID and PW for this to work.
#jboss-twiddle=on
## quoted and space separated list of application's whose XML descriptors you want (default=all)
#jboss-appxml=all
#
### SSH Configuration
## the port to ssh and scp on
#ssh-port=22
## the path to the ssh identity file to use
#key-file=/etc/pki/engine/keys/engine_id_rsa
## max concurrent connections for fetching RHEV-H logs
#max-connections=MAX_CONNECTIONS
#
### Database Configuration
#
## PostgreSQL database user name
#pg-user=postgres
## PostgreSQL database password
#pg-pass=PASS
## PostgreSQL database name (default=engine)
#pg-dbname=engine
## PostgreSQL database hostname or IP address
#pg-dbhost=localhost
## The public SSH key for the host upon which the PostgreSQL DB lives.
#pg-host-key=/path/to/key/file
## The SSH user that will be used to connect to the server upon which the remote PostgreSQL database lives
#pg-ssh-user=root
ovirt-log-collector-3.1.0/src/rhev/helper/ 0000775 0032753 0032753 00000000000 11763605557 020650 5 ustar oschreib oschreib ovirt-log-collector-3.1.0/src/rhev/helper/hypervisors.py 0000664 0032753 0032753 00000011032 11763605557 023614 0 ustar oschreib oschreib """This module uses the REST API to get a collection of information about hypervisors"""
import logging
import urllib2
import base64
import threading
from ovirtsdk.api import API
from ovirtsdk.xml import params
class ENGINETree(object):
class DataCenter(object):
def __init__(self, id, name):
self.id = id
self.name = name
self.clusters = set()
def add_cluster(self, cluster):
self.clusters.add(cluster)
def __str__(self):
return self.name
class Cluster(object):
def __init__(self, id, name):
self.id = id
self.name = name
self.hosts = set()
def add_host(self, host):
self.hosts.add(host)
def __str__(self):
return self.name
class Host(object):
def __init__(self, address, name=None):
self.address = address
self.name = name
def __str__(self):
return self.address
def __init__(self):
self.datacenters = set()
self.clusters = set()
self.hosts = set()
def add_datacenter(self, datacenter):
dc_obj = self.DataCenter(datacenter.id, datacenter.name)
self.datacenters.add(dc_obj)
def add_cluster(self, cluster):
c_obj = self.Cluster(cluster.id, cluster.name)
self.clusters.add(c_obj)
if cluster.get_data_center() is not None:
for dc in self.datacenters:
if dc.id == cluster.get_data_center().id:
dc.add_cluster(c_obj)
else:
dummySeen = 0
for dc in self.datacenters:
if dc.id == "":
dc.add_cluster(c_obj)
dummySeen = 1
if dummySeen == 0:
dc = self.DataCenter("", "")
dc.add_cluster(c_obj)
self.datacenters.add(dc)
def add_host(self, host):
host_obj = self.Host(host.get_address(), host.name)
self.hosts.add(host_obj)
if host.get_cluster() is not None:
for cluster in self.clusters:
if cluster.id == host.get_cluster().id:
cluster.add_host(host_obj)
else:
dummySeen = 0
for cluster in self.clusters:
if cluster.id == "":
cluster.add_host(host_obj)
dummySeen = 1
if dummySeen == 0:
c_obj = self.Cluster("", "")
c_obj.add_host(host_obj)
self.clusters.add(c_obj)
dc = self.DataCenter("", "")
dc.add_cluster(c_obj)
self.datacenters.add(dc)
def __str__(self):
return "\n".join(["%-20s | %-20s | %s" % (dc, cluster, host)
for dc in self.datacenters
for cluster in dc.clusters
for host in cluster.hosts])
def get_sortable(self):
return [(dc.name, cluster.name, host.address)
for dc in self.datacenters
for cluster in dc.clusters
for host in cluster.hosts]
def _initialize_api(hostname, username, password):
"""
Initialize the oVirt RESTful API
"""
url = "https://" + hostname + "/api"
api = API(url=url,
username=username,
password=password)
try:
pi = api.get_product_info()
if pi is not None:
vrm = '%s.%s.%s' % (pi.get_version().get_major(),
pi.get_version().get_minor(),
pi.get_version().get_revision())
logging.debug("API Vendor(%s)\tAPI Version(%s)" % (pi.get_vendor(), vrm))
else:
logging.error(_("Unable to connect to REST API."))
return None
except Exception, e:
logging.error(_("Unable to connect to REST API. Message: %s") % e)
return None
return api
def get_all(hostname, username, password):
tree = ENGINETree()
try:
api = _initialize_api(hostname, username, password)
if api is not None:
for dc in api.datacenters.list():
tree.add_datacenter(dc)
for cluster in api.clusters.list():
tree.add_cluster(cluster)
for host in api.hosts.list():
tree.add_host(host)
return set(tree.get_sortable())
except Exception, e:
logging.error(_("Failure fetching information about hypervisors from API . Error: %s") % e)
return set()
ovirt-log-collector-3.1.0/src/rhev/helper/__init__.py 0000664 0032753 0032753 00000000031 11763605557 022753 0 ustar oschreib oschreib __all__ = ["hypervisors"] ovirt-log-collector-3.1.0/src/rhev/logcollector.py 0000664 0032753 0032753 00000125765 11763605557 022453 0 ustar oschreib oschreib #!/usr/bin/python
import sys
import os
from optparse import OptionParser, OptionGroup, SUPPRESS_HELP
import subprocess
import shlex
import pprint
import fnmatch
import traceback
import shutil
import logging
import getpass
from helper import hypervisors
versionNum="1.0.0"
STREAM_LOG_FORMAT = '%(levelname)s: %(message)s'
FILE_LOG_FORMAT = '%(asctime)s::%(levelname)s::%(module)s::%(lineno)d::%(name)s:: %(message)s'
FILE_LOG_DSTMP = '%Y-%m-%d %H:%M:%S'
DEFAULT_SSH_KEY = "/etc/pki/ovirt-engine/keys/engine_id_rsa"
DEFAULT_SSH_USER = 'root'
DEFAULT_CONFIGURATION_FILE = "/etc/ovirt-engine/logcollector.conf"
DEFAULT_SCRATCH_DIR='/tmp/logcollector'
DEFAULT_LOG_FILE='/var/log/ovirt-engine/engine-log-collector.log'
DEFAULT_TIME_SHIFT_FILE='time_diff.txt'
def multilog(logger, msg):
for line in str(msg).splitlines():
logger(line)
def get_from_prompt(msg, default=None, prompter=raw_input):
try:
value = prompter(msg)
if value.strip():
return value.strip()
else:
return default
except EOFError:
print
return default
class ExitCodes():
"""
A simple psudo-enumeration class to hold the current and future exit codes
"""
NOERR=0
CRITICAL=1
WARN=2
exit_code=NOERR
class Caller(object):
"""
Utility class for forking programs.
"""
def __init__(self, configuration):
self.configuration = configuration
def prep(self, cmd):
_cmd = cmd % self.configuration
return shlex.split(_cmd)
def call(self, cmds):
"""Uses the configuration to fork a subprocess and run cmds."""
_cmds = self.prep(cmds)
proc = subprocess.Popen(_cmds,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
returncode = proc.returncode
logging.debug("returncode(%s)" % returncode)
logging.debug("STDOUT(%s)" % stdout)
logging.debug("STDERR(%s)" % stderr)
if returncode == 0:
return stdout
else:
raise Exception(stderr)
class Configuration(dict):
"""This class is a dictionary subclass that knows how to read and """
"""handle our configuration. Resolution order is defaults -> """
"""configuration file -> command line options."""
class SkipException(Exception):
"This exception is raised when the user aborts a prompt"
pass
def __init__(self,
parser=None):
self.command = "list"
self.parser = parser
self.options = None
self.args = None
# Immediately, initialize the logger to the INFO log level and our
# logging format which is : and not the default of
# :
self.__initLogger(logging.INFO)
if not parser:
raise Exception("Configuration requires a parser")
self.options, self.args = self.parser.parse_args()
if os.geteuid() != 0:
raise Exception("This tool requires root permissions to run.")
# At this point we know enough about the command line options
# to test for verbose and if it is set we should re-initialize
# the logger to DEBUG. This will have the effect of printing
# stack traces if there are any exceptions in this class.
if getattr(self.options, "verbose"):
self.__initLogger(logging.DEBUG)
self.load_config_file()
if self.options:
# Need to parse again to override conf file options
self.options, self.args = self.parser.parse_args(values=self.options)
self.from_options(self.options, self.parser)
# Need to parse out options from the option groups.
self.from_option_groups(self.options, self.parser)
if self.args:
self.from_args(self.args)
# Finally, all options from the command line and possibly a configuration
# file have been processed. We need to re-initialize the logger if
# the user has supplied either --quiet processing or supplied a --log-file.
# This will ensure that any further log messages throughout the lifecycle
# of this program go to the log handlers that the user has specified.
if self.options.log_file or self.options.quiet:
level = logging.INFO
if self.options.verbose:
level = logging.DEBUG
self.__initLogger(level, self.options.quiet, self.options.log_file)
def __missing__(self, key):
return None
def load_config_file(self):
"""Loads the user-supplied config file or the system default.
If the user supplies a bad filename we will stop."""
if self.options and getattr(self.options, "conf_file"):
if os.path.isfile(self.options.conf_file):
self.from_file(self.options.conf_file)
else:
raise Exception("The specified configuration file does not exist. File=(%s)" %
self.options.conf_file)
elif os.path.isfile(DEFAULT_CONFIGURATION_FILE):
self.from_file(DEFAULT_CONFIGURATION_FILE)
def from_option_groups(self,options,parser):
for optGrp in parser.option_groups:
for optGrpOpts in optGrp.option_list:
opt_value = getattr(options, optGrpOpts.dest)
if opt_value is not None:
self[optGrpOpts.dest] = opt_value
def from_options(self, options, parser):
for option in parser.option_list:
if option.dest:
opt_value = getattr(options, option.dest)
if opt_value is not None:
self[option.dest] = opt_value
def from_file(self, filename):
import ConfigParser
cp = ConfigParser.ConfigParser()
cp.read(filename)
# we want the items from the LogCollector section only
try:
opts = ["--%s=%s" % (k,v)
for k,v in cp.items("LogCollector")]
(new_options, args) = self.parser.parse_args(args=opts, values=self.options)
self.from_option_groups(new_options, self.parser)
self.from_options(new_options, self.parser)
except ConfigParser.NoSectionError:
pass
def from_args(self, args):
self.command = args[0]
if self.command not in ('list', 'collect'):
raise Exception("%s is not a valid command." % self.command)
def prompt(self, key, msg):
if key not in self:
self._prompt(raw_input, key, msg)
def getpass(self, key, msg):
if key not in self:
self._prompt(getpass.getpass, key, msg)
# This doesn't ask for CTRL+C to abort because KeyboardInterrupts don't
# seem to behave the same way every time. Take a look at the link:
# http://stackoverflow.com/questions/4606942/why-cant-i-handle-a-keyboardinterrupt-in-python
def _prompt(self, prompt_function, key, msg=None):
value = get_from_prompt(msg="Please provide the %s (CTRL+D to skip): " % msg,
prompter=prompt_function)
if value:
self[key] = value
else:
raise self.SkipException
def ensure(self, key, default=""):
if key not in self:
self[key] = default
def has_all(self, *keys):
return all(self.get(key) for key in keys)
def has_any(self, *keys):
return any(self.get(key) for key in keys)
def __ensure_path_to_file(self, file_):
dir_ = os.path.dirname(file_)
if not os.path.exists(dir_):
logging.info("%s does not exists. It will be created." % dir_)
os.makedirs(dir_, 0755)
def __log_to_file(self, file_, level):
try:
self.__ensure_path_to_file(file_)
hdlr = logging.FileHandler(filename=file_, mode='w')
fmt = logging.Formatter(FILE_LOG_FORMAT, FILE_LOG_DSTMP)
hdlr.setFormatter(fmt)
logging.root.addHandler(hdlr)
logging.root.setLevel(level)
except Exception, e:
logging.error("Could not configure file logging: %s" % e)
def __log_to_stream(self, level):
sh = logging.StreamHandler()
fmt = logging.Formatter(STREAM_LOG_FORMAT)
sh.setLevel(level)
sh.setFormatter(fmt)
logging.root.addHandler(sh)
def __initLogger(self, logLevel=logging.INFO, quiet=None, logFile=None):
"""
Initialize the logger based on information supplied from the
command line or configuration file.
"""
# If you call basicConfig more than once without removing handlers
# it is effectively a noop. In this program it is possible to call
# __initLogger more than once as we learn information about what
# options the user has supplied in either the config file or
# command line; hence, we will need to load and unload the handlers
# to ensure consistently fomatted output.
log = logging.getLogger()
for h in log.handlers:
log.removeHandler(h)
if quiet:
if logFile:
# Case: Batch and log file supplied. Log to only file
self.__log_to_file(logFile, logLevel)
else:
# If the user elected quiet mode *and* did not supply
# a file. We will be *mostly* quiet but not completely.
# If there is an exception/error/critical we will print
# to stdout/stderr.
logging.basicConfig(level=logging.ERROR, format=STREAM_LOG_FORMAT)
else:
if logFile:
# Case: Not quiet and log file supplied. Log to both file and
# stdout/stderr
self.__log_to_file(logFile, logLevel)
self.__log_to_stream(logLevel)
else:
# Case: Not quiet and no log file supplied. Log to only stdout/stderr
logging.basicConfig(level=logLevel, format=STREAM_LOG_FORMAT)
class CollectorBase(object):
def __init__(self,
hostname,
configuration=None,
**kwargs):
self.hostname = hostname
if configuration:
self.configuration = configuration.copy()
else:
self.configuration = {}
self.prep()
self.caller = Caller(self.configuration)
def prep(self):
self.configuration['ssh_cmd'] = self.format_ssh_command()
self.configuration['scp_cmd'] = self.format_ssh_command(cmd="scp")
def get_key_file(self):
return self.configuration.get("key_file")
def get_ssh_user(self):
return "%s@" % DEFAULT_SSH_USER
def parse_sosreport_stdout(self, stdout):
def reportFinder(line):
if fnmatch.fnmatch(line, '*sosreport-*tar*'):
return line
else:
return None
def md5Finder(line):
if fnmatch.fnmatch(line, 'The md5sum is*'):
return line
else:
return None
try:
lines = stdout.splitlines()
fileAry = filter(reportFinder,lines)
if fileAry is not None:
if fileAry[0] is not None and len(fileAry) > 0:
path = fileAry[0].strip()
filename = os.path.basename(path)
self.configuration["filename"] = filename
if os.path.isabs(path):
self.configuration["path"] = path
else:
self.configuration["path"] = os.path.join(self.configuration["local_tmp_dir"], filename)
else:
self.configuration["filename"] = None
self.configuration["path"] = None
else:
self.configuration["filename"] = None
self.configuration["path"] = None
fileAry = filter(md5Finder,lines)
if fileAry is not None and len(fileAry) > 0:
if fileAry[0] is not None:
md5sum = fileAry[0].partition(": ")[-1]
self.configuration["checksum"] = md5sum
else:
self.configuration["checksum"] = None
else:
self.configuration["checksum"] = None
logging.debug("filename(%s)" % self.configuration["filename"])
logging.debug("path(%s)" % self.configuration["path"])
logging.debug("checksum(%s)" % self.configuration["checksum"])
except IndexError, e:
logging.debug("message(%s)" % e)
logging.debug("parse_sosreport_stdout: " + traceback.format_exc())
raise Exception("Could not parse sosreport output to determine filename")
def format_ssh_command(self, cmd="ssh"):
cmd = "/usr/bin/%s " % cmd
if "ssh_port" in self.configuration:
port_flag = "-p" if cmd.startswith("/usr/bin/ssh") else "-P"
cmd += port_flag + " %(ssh_port)s " % self.configuration
if self.get_key_file():
cmd += "-i %s " % self.get_key_file()
# ignore host key checking
cmd += "-oStrictHostKeyChecking=no "
cmd += self.get_ssh_user()
return cmd + "%s" % self.hostname
class HyperVisorData(CollectorBase):
def __init__(self,
hostname,
configuration=None,
semaphore=None,
queue=None,
**kwargs):
super(HyperVisorData, self).__init__(hostname,configuration)
self.semaphore = semaphore
self.queue = queue
def prep(self):
self.configuration["hostname"] = self.hostname
self.configuration['ssh_cmd'] = self.format_ssh_command()
self.configuration['scp_cmd'] = self.format_ssh_command(cmd="scp")
self.configuration['reports'] = ",".join((
"libvirt",
"vdsm",
"general",
"networking",
"hardware",
"process",
"yum",
"filesys",
"devicemapper",
"selinux",
"kernel",
))
# these are the reports that will work with rhev2.2 hosts
self.configuration['bc_reports'] = "vdsm,general,networking,hardware,process,yum,filesys"
def get_time_diff(self, stdout):
import datetime
h_time = datetime.datetime.strptime(
stdout.strip(), "%a, %d %b %Y %H:%M:%S +0000")
l_time = datetime.datetime.utcnow()
logging.debug("host <%s> time: %s" % (self.configuration["hostname"], h_time.isoformat()))
logging.debug("local <%s> time: %s" % ("localhost", l_time.isoformat(),))
time_diff = "%(hostname)s " % self.configuration
if h_time > l_time:
self.queue.append(time_diff + "+%s" % (h_time - l_time))
else:
self.queue.append(time_diff + "-%s" % (l_time - h_time))
def sosreport(self):
cmd = """%(ssh_cmd)s "
VERSION=`/bin/rpm -q --qf '[%%{VERSION}]' sos | /bin/sed 's/\.//'`;
if [ "$VERSION" -ge "22" ]; then
/usr/sbin/sosreport --batch -k general.all_logs=True -o %(reports)s
elif [ "$VERSION" -ge "17" ]; then
/usr/sbin/sosreport --no-progressbar -k general.all_logs=True -o %(bc_reports)s
else
/bin/echo "No valid version of sosreport found." 1>&2
exit 1
fi
"
"""
return self.caller.call(cmd)
def run(self):
try:
logging.info("collecting information from %(hostname)s" % self.configuration)
stdout = self.sosreport()
self.parse_sosreport_stdout(stdout)
self.configuration["hypervisor_dir"] = os.path.join(self.configuration.get("local_scratch_dir"),self.configuration.get("hostname"))
os.mkdir(self.configuration["hypervisor_dir"])
self.configuration['archive_name'] = self.configuration.get("hostname") + "-" + os.path.basename(self.configuration.get("path"))
self.caller.call('%(scp_cmd)s:%(path)s %(hypervisor_dir)s/%(archive_name)s')
self.caller.call('%(ssh_cmd)s "/bin/rm %(path)s*"')
# setting up a pipeline since passing stdin to communicate doesn't seem to work
echo_cmd = self.caller.prep('/bin/echo "%(checksum)s %(hypervisor_dir)s/%(archive_name)s"')
md5sum_cmd = self.caller.prep("/usr/bin/md5sum -c -")
result = None
p1 = subprocess.Popen(echo_cmd, stdout=subprocess.PIPE)
p2 = subprocess.Popen(md5sum_cmd, stdin=p1.stdout, stdout=subprocess.PIPE)
result = p2.communicate()[0]
stdout = self.caller.call('%(ssh_cmd)s "/bin/date -uR"')
try:
self.get_time_diff(stdout)
except ValueError, e:
logging.debug("get_time_diff: " + str(e))
if result and "OK" not in result:
logging.error("checksum test: " + result)
raise Exception("%(local_scratch_dir)s/%(filename)s failed checksum test!" % self.configuration)
except Exception, e:
ExitCodes.exit_code=ExitCodes.WARN
logging.error("Failed to collect logs from: %s; %s" % (self.configuration.get("hostname"), e))
multilog(logging.debug,traceback.format_exc())
logging.debug("Configuration for %(hostname)s:" % self.configuration)
multilog(logging.debug,pprint.pformat(self.configuration))
finally:
if self.semaphore:
self.semaphore.release()
logging.info("finished collecting information from %(hostname)s" % self.configuration)
class ENGINEData(CollectorBase):
def build_options(self):
opts = ["-k rpm.rpmva=off",
"-k engine.vdsmlogs=%s" % self.configuration.get("local_scratch_dir"),
"-k engine.prefix=on"]
for key, value in self.configuration.iteritems():
if key.startswith("java") or key.startswith("jboss"):
opts.append('-k %s="%s"' % (key,value))
if self.configuration.get("ticket_number"):
opts.append("--ticket-number=%s" % self.configuration.get("ticket_number"))
if self.configuration.get("upload"):
opts.append("--upload=%s" % self.configuration.get("upload"))
return " ".join(opts)
def sosreport(self):
self.configuration["reports"] = ",".join((
"jboss",
"engine",
"rpm",
"libvirt",
"general",
"networking",
"hardware",
"process",
"yum",
"filesys",
"devicemapper",
"selinux",
"kernel",
))
self.configuration["sos_options"] = self.build_options()
stdout = self.caller.call('/usr/sbin/sosreport --batch -k general.all_logs=True --report --tmp-dir=%(local_tmp_dir)s -o %(reports)s %(sos_options)s')
self.parse_sosreport_stdout(stdout)
if os.path.exists(self.configuration["path"]):
archiveSize = '%.1fM' % (float(os.path.getsize(self.configuration["path"])) / (1 << 20))
else:
archiveSize = None
return """Log files have been collected and placed in %s.
The MD5 for this file is %s and its size is %s""" % (
self.configuration["path"] ,
self.configuration["checksum"],
archiveSize)
class PostgresData(CollectorBase):
def get_key_file(self):
"""
Override the base get_key_file method to return the SSH key for the
PostgreSQL system if there is one. Returns None if there isn't one.
"""
return self.configuration.get("pg_host_key")
def get_ssh_user(self):
"""
Override the base get_ssh_user method to return the SSH user for the
PostgreSQL system if there is one.
"""
if self.configuration.get("pg_ssh_user"):
return "%s@" % self.configuration.get("pg_ssh_user")
else:
return "%s@" % DEFAULT_SSH_USER
def sosreport(self):
if self.configuration.get("pg_pass"):
opt = '-k postgresql.dbname=%(pg_dbname)s -k postgresql.username=%(pg_user)s -k postgresql.password=%(pg_pass)s'
else:
opt = ""
if self.hostname == "localhost":
stdout = self.caller.call('/usr/sbin/sosreport --batch --report -o postgresql '
'--tmp-dir=%(local_scratch_dir)s ' + opt)
self.parse_sosreport_stdout(stdout)
# Prepend postgresql- to the .md5 file that is produced by SOS
# so that it is easy to distinguish from the other N reports
# that are all related to hypervisors. Note, that we
# only do this in the case of a local PostgreSQL DB because
# when the DB is remote the .md5 file is not copied.
os.rename("%s.md5" % (self.configuration["path"]),
os.path.join(self.configuration["local_scratch_dir"],
"postgresql-%s.md5" % self.configuration["filename"]))
else:
# The PG database is on a remote host
cmd = '%(ssh_cmd)s "/usr/sbin/sosreport --batch --report -o postgresql ' + opt
stdout = self.caller.call(cmd)
self.parse_sosreport_stdout(stdout)
self.caller.call('%(scp_cmd)s:%(path)s %(local_scratch_dir)s')
self.caller.call('%(ssh_cmd)s "rm %(path)s*"')
# Prepend postgresql- to the PostgreSQL SOS report
# so that it is easy to distinguished from the other N reports
# that are all related to hypervisors.
os.rename(os.path.join(self.configuration["local_scratch_dir"], self.configuration["filename"]),
os.path.join(self.configuration["local_scratch_dir"], "postgresql-%s" % self.configuration["filename"]))
class LogCollector(object):
def __init__(self, configuration):
self.conf = configuration
if self.conf.command is None:
raise Exception("No command specified.")
def write_time_diff(self, queue):
local_scratch_dir = self.conf.get("local_scratch_dir")
with open(os.path.join(local_scratch_dir, DEFAULT_TIME_SHIFT_FILE), "w") as fd:
for record in queue:
fd.write(record + "\n")
def _get_hypervisors_from_api(self):
if not self.conf:
raise Exception("No configuration.")
try:
self.conf.prompt("engine", msg="hostname of oVirt Engine")
self.conf.prompt("user", msg="REST API username for oVirt Engine")
self.conf.getpass("passwd", msg="REST API password for the %s oVirt Engine user" % self.conf.get("user"))
except Configuration.SkipException:
logging.info("Will not collect hypervisor list from oVirt Engine API.")
raise
try:
return hypervisors.get_all(self.conf.get("engine"),
self.conf.get("user"),
self.conf.get("passwd"))
except Exception, e:
ExitCodes.exit_code=ExitCodes.WARN
logging.error("_get_hypervisors_from_api: %s" % e)
return set()
@staticmethod
def _sift_patterns(list_):
"""Returns two sets: patterns and others. A pattern is any string
that contains the any of the following: * [ ] ?"""
patterns = set()
others = set()
try:
for candidate in list_:
if any(c in candidate for c in ('*', '[', ']', '?')):
patterns.add(candidate)
else:
others.add(candidate)
except TypeError:
pass
return patterns, others
def _filter_hosts(self, which, pattern):
logging.debug("filtering host list with %s against %s name" % (pattern, which))
if which == "host":
return set([(dc, cl, h) for dc, cl, h in self.conf.get("hosts")
if fnmatch.fnmatch(h, pattern)])
elif which == "cluster":
return set([(dc, cl, h) for dc, cl, h in self.conf.get("hosts")
if fnmatch.fnmatch(cl, pattern)])
elif which == "datacenter":
return set([(dc, cl, h) for dc, cl, h in self.conf.get("hosts")
if fnmatch.fnmatch(dc, pattern)])
def set_hosts(self):
"""Fetches the hostnames for the supplied cluster or datacenter.
Filtering is applied if patterns are found in the --hosts, --cluster
or --datacenters options. There can be multiple patterns in each
option. Patterns found within the same option are inclusive and
each each option set together is treated as an intersection.
"""
self.conf["hosts"] = set()
host_patterns, host_others = self._sift_patterns(self.conf.get("hosts_list"))
datacenter_patterns = self.conf.get("datacenter", [])
cluster_patterns = self.conf.get("cluster", [])
if host_patterns:
self.conf['host_pattern'] = host_patterns
if any((host_patterns,
datacenter_patterns,
cluster_patterns)) or not host_others:
self.conf["hosts"] = self._get_hypervisors_from_api()
host_filtered = set()
cluster_filtered = set()
datacenter_filtered = set()
if host_patterns:
for pattern in host_patterns:
host_filtered |= self._filter_hosts("host", pattern)
self.conf['hosts'] &= host_filtered
if datacenter_patterns:
for pattern in datacenter_patterns:
datacenter_filtered |= self._filter_hosts("datacenter", pattern)
self.conf['hosts'] &= datacenter_filtered
if cluster_patterns:
for pattern in cluster_patterns:
cluster_filtered |= self._filter_hosts("cluster", pattern)
self.conf['hosts'] &= cluster_filtered
# build a set of hostnames that are already in the target host list.
# So that we can prevent duplication in the next step
hostnames = set((t[2] for t in self.conf['hosts']))
for hostname in host_others:
if hostname not in hostnames:
self.conf['hosts'].add(("", "", hostname))
return bool(self.conf.get("hosts"))
def list_hosts(self):
def get_host(tuple_):
return tuple_[2]
host_list = list(self.conf.get("hosts"))
host_list.sort(key=get_host)
fmt = "%-20s | %-20s | %s"
print "Host list (datacenter=%(datacenter)s, cluster=%(cluster)s, host=%(host_pattern)s):" % self.conf
print fmt % ("Data Center", "Cluster", "Hostname/IP Address")
print "\n".join(fmt % (dc, cluster, host) for dc, cluster, host in host_list)
def get_hypervisor_data(self):
hosts = self.conf.get("hosts")
if hosts:
if not self.conf.get("quiet"):
continue_ = get_from_prompt(
msg="About to collect information from %d hypervisors. Continue? (Y/n): " % len(hosts),
default='y')
if continue_ not in ('y', 'Y'):
logging.info("Aborting hypervisor collection...")
return
logging.info("Gathering information from selected hypervisors...")
max_connections = self.conf.get("max_connections", 10)
import threading
from collections import deque
# max_connections may be defined as a string via a .rc file
sem = threading.Semaphore(int(max_connections))
time_diff_queue = deque()
threads = []
for datacenter, cluster, host in hosts:
sem.acquire(True)
collector = HyperVisorData(host.strip(),
configuration=self.conf,
semaphore=sem,
queue=time_diff_queue)
thread = threading.Thread(target=collector.run)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
self.write_time_diff(time_diff_queue)
def get_postgres_data(self):
if self.conf.get("no_postgresql") == False:
try:
try:
self.conf.getpass("pg_pass", msg="password for the PostgreSQL user, %s, to dump the %s PostgreSQL database instance" %
(self.conf.get('pg_user'),
self.conf.get('pg_dbname')))
logging.info("Gathering PostgreSQL the oVirt Engine database and log files from %s..." % (self.conf.get("pg_dbhost")))
except Configuration.SkipException:
logging.info("PostgreSQL oVirt Engine database will not be collected.")
logging.info("Gathering PostgreSQL log files from %s..." % (self.conf.get("pg_dbhost")))
collector = PostgresData(self.conf.get("pg_dbhost"),
configuration=self.conf)
collector.sosreport()
except Exception, e:
ExitCodes.exit_code=ExitCodes.WARN
logging.error("Could not collect PostgreSQL information: %s" % e)
else:
ExitCodes.exit_code=ExitCodes.NOERR
logging.info("Skipping postgresql collection...")
def get_engine_data(self):
logging.info("Gathering oVirt Engine information...")
if self.conf.get("enable_jmx"):
try:
self.conf.getpass("jboss.pass", msg="password for the JBoss JMX user")
except Configuration.SkipException:
logging.info("JBoss JMX information will not be collected because the JMX user's password was not supplied.")
collector = ENGINEData("localhost",
configuration=self.conf)
stdout = collector.sosreport()
logging.info(stdout)
def parse_password(option, opt_str, value, parser):
value = getpass.getpass("Please enter %s: " % (option.help))
setattr(parser.values, option.dest, value)
if __name__ == '__main__':
def comma_separated_list(option, opt_str, value, parser):
setattr(parser.values, option.dest, [v.strip() for v in value.split(",")])
usage_string = "\n".join(("Usage: %prog [options] list",
" %prog [options] collect"))
epilog_string = """\nReturn values:
0: The program ran to completion with no errors.
1: The program encountered a critical failure and stopped.
2: The program encountered a problem gathering data but was able to continue.
"""
OptionParser.format_epilog = lambda self, formatter: self.epilog
parser = OptionParser(usage_string,
version="Version " + versionNum,
epilog=epilog_string)
parser.add_option("", "--conf-file", dest="conf_file",
help="path to configuration file (default=%s)" % DEFAULT_CONFIGURATION_FILE,
metavar="PATH")
parser.add_option("", "--local-tmp", dest="local_tmp_dir",
help="directory to copy reports to locally (default=%s)" % DEFAULT_SCRATCH_DIR, metavar="PATH",
default=DEFAULT_SCRATCH_DIR)
parser.add_option("", "--ticket-number", dest="ticket_number",
help="ticket number to pass with the sosreport",
metavar="TICKET")
parser.add_option("", "--upload", dest="upload",
help="Upload the report to Red Hat (use exclusively if advised from a Red Hat support representative).",
metavar="FTP_SERVER")
parser.add_option("", "--quiet", dest="quiet",
action="store_true", default=False,
help="reduce console output (default=False)")
parser.add_option("", "--log-file",
dest="log_file",
help="path to log file (default=%s)" % DEFAULT_LOG_FILE,
metavar="PATH",
default=DEFAULT_LOG_FILE)
parser.add_option("-v", "--verbose", dest="verbose",
action="store_true", default=False)
engine_group = OptionGroup(parser,
"oVirt Engine Configuration",
"""The options in the oVirt Engine configuration group can be used to filter log collection from one or more hypervisors.
If the --no-hypervisors option is specified, data is not collected from any hypervisor.""")
engine_group.add_option("", "--no-hypervisors",
help="skip collection from hypervisors (default=False)",
dest="no_hypervisor",
action="store_true",
default=False)
engine_group.add_option("-u", "--user", dest="user",
help="username to use with the REST API. This should be in UPN format.",
metavar="user@engine.example.com")
engine_group.add_option("-p",
"--passwd",
dest="passwd",
help=SUPPRESS_HELP)
engine_group.add_option("-r", "--engine", dest="engine", metavar="engine.example.com",
help="hostname or IP address of the oVirt Engine (default=localhost:8443)",
default="localhost:8443")
engine_group.add_option("-c", "--cluster", dest="cluster",
help="pattern, or comma separated list of patterns to filter the host list by cluster name (default=None)",
action="callback",
callback=comma_separated_list,
type="string",
default=None, metavar="CLUSTER")
engine_group.add_option("-d", "--data-center", dest="datacenter",
help="pattern, or comma separated list of patterns to filter the host list by data center name (default=None)",
action="callback",
callback=comma_separated_list,
type="string",
default=None, metavar="DATACENTER")
engine_group.add_option("-H", "--hosts", dest="hosts_list", action="callback",
callback=comma_separated_list,
type="string",
help="""comma separated list of hostnames, hostname patterns, FQDNs, FQDN patterns,
IP addresses, or IP address patterns from which the log collector should collect hypervisor logs (default=None)""")
ssh_group = OptionGroup(parser, "SSH Configuration",
"""The options in the SSH configuration group can be used to specify the maximum
number of concurrent SSH connections to hypervisor(s) for log collection, the
SSH port, and a identity file to be used.""")
ssh_group.add_option("", "--ssh-port", dest="ssh_port",
help="the port to ssh and scp on", metavar="PORT",
default=22)
ssh_group.add_option("-k", "--key-file", dest="key_file",
help="""the identity file (private key) to be used for accessing the hypervisors (default=%s).
If a identity file is not supplied the program will prompt for a password. It is strongly recommended to
use key based authentication with SSH because the program may make multiple SSH connections
resulting in multiple requests for the SSH password.""" % DEFAULT_SSH_KEY,
metavar="KEYFILE",
default=DEFAULT_SSH_KEY)
ssh_group.add_option("", "--max-connections", dest="max_connections",
help="max concurrent connections for fetching hypervisor logs (default = 10)",
default=10)
db_group = OptionGroup(parser, "PostgreSQL Database Configuration",
"""The log collector will connect to the oVirt Engine PostgreSQL database and dump the data
for inclusion in the log report unless --no-postgresql is specified. The PostgreSQL user ID and database
name can be specified if they are different from the defaults. If the PostgreSQL database
is not on the localhost set pg-dbhost, provide a pg-ssh-user, and optionally supply pg-host-key and the log collector
will gather remote PostgreSQL logs. The PostgreSQL SOS plug-in must be installed on pg-dbhost for
successful remote log collection.""")
db_group.add_option("", "--no-postgresql", dest="no_postgresql",
help="This option causes the tool to skip the postgresql collection (default=false)",
action="store_true",
default=False)
db_group.add_option("", "--pg-user", dest="pg_user",
help="PostgreSQL database user name (default=postgres)",
metavar="postgres",
default="postgres")
db_group.add_option("",
"--pg-pass",
dest="pg_pass",
help=SUPPRESS_HELP)
db_group.add_option("", "--pg-dbname", dest="pg_dbname",
help="PostgreSQL database name (default=engine)",
metavar="engine",
default="engine")
db_group.add_option("", "--pg-dbhost", dest="pg_dbhost",
help="PostgreSQL database hostname or IP address (default=localhost)",
metavar="localhost",
default="localhost")
db_group.add_option("", "--pg-ssh-user", dest="pg_ssh_user",
help="""the SSH user that will be used to connect to the
server upon which the remote PostgreSQL database lives. (default=root)""",
metavar="root",
default='root')
db_group.add_option("", "--pg-host-key", dest="pg_host_key",
help="""the identity file (private key) to be used for accessing the host
upon which the PostgreSQL database lives (default=not needed if using localhost)""",
metavar="none")
jboss_group = OptionGroup(parser,
"SOSReport Options",
"""The JBoss SOS plug-in will always be executed. To activate data collection
from JBoss's JMX console enable-jmx, java-home, jboss-user, and jboss-pass must
also be specified. If no jboss-pass is supplied in the configuration file then
it will be asked for prior to collection time.""")
jboss_group.add_option("", "--jboss-home", dest="jboss.home",
help="JBoss's installation dir (default=/var/lib/jbossas)",
metavar="/path/to/jbossas",
default="/var/lib/jbossas")
jboss_group.add_option("", "--java-home", dest="jboss.javahome",
help="Java's installation dir (default=/usr/lib/jvm/java)",
metavar="/path/to/java",
default="/usr/lib/jvm/java")
jboss_group.add_option("", "--jboss-profile",
dest="jboss.profile",
action="callback",
type="string",
help="comma separated list of server profiles to limit collection (default='engine-slimmed')",
callback=comma_separated_list,
metavar="PROFILE1, PROFILE2",
default="engine-slimmed")
jboss_group.add_option("", "--enable-jmx", dest="enable_jmx",
help="Enable the collection of run-time metrics from the oVirt Engine JBoss JMX interface",
action="store_true",
default=False)
jboss_group.add_option("", "--jboss-user", dest="jboss.user",
help="JBoss JMX username (default=admin)",
metavar="admin",
default="admin")
jboss_group.add_option("",
"--jboss-pass",
dest="jboss.pass",
help=SUPPRESS_HELP)
jboss_group.add_option("", "--jboss-logsize", dest="jboss.logsize",
help="max size (MiB) to collect per log file (default=15)",
metavar="15",
default=15)
jboss_group.add_option("", "--jboss-stdjar", dest="jboss.stdjar",
metavar="on or off",
help="collect jar statistics for JBoss standard jars.(default=on)")
jboss_group.add_option("", "--jboss-servjar", dest="jboss.servjar",
metavar="on or off",
help="collect jar statistics from any server configuration dirs (default=on)")
jboss_group.add_option("", "--jboss-twiddle", dest="jboss.twiddle",
metavar="on or off",
help="collect twiddle data (default=on)")
jboss_group.add_option("", "--jboss-appxml",
dest="jboss.appxml",
action="callback",
type="string",
callback=comma_separated_list,
help="""comma separated list of application's whose XML descriptors you want (default=all)""",
metavar="APP, APP2",
default="all")
parser.add_option_group(engine_group)
parser.add_option_group(jboss_group)
parser.add_option_group(ssh_group)
parser.add_option_group(db_group)
try:
conf = Configuration(parser)
collector = LogCollector(conf)
# We must ensure that the working directory exits before
# we start doing anything.
if os.path.exists(conf["local_tmp_dir"]):
if not os.path.isdir(conf["local_tmp_dir"]):
raise Exception('%s is not a directory.' % (conf["local_tmp_dir"]))
else:
logging.info("%s does not exist. It will be created." % (conf["local_tmp_dir"]))
os.makedirs(conf["local_tmp_dir"])
# We need to make a temporary scratch directory wherein
# all of the output from VDSM and PostgreSQL SOS plug-ins
# will be dumped. The contents of this directory will be scooped
# up by the oVirt Engine SOS plug-in via the engine.vdsmlogs option
# and included in a single .xz file.
conf["local_scratch_dir"] = os.path.join(conf["local_tmp_dir"], 'RHEVH-and-PostgreSQL-reports')
if not os.path.exists(conf["local_scratch_dir"]):
os.makedirs(conf["local_scratch_dir"])
else:
if len(os.listdir(conf["local_scratch_dir"])) != 0:
raise Exception("""the scratch directory for temporary storage of hypervisor reports is not empty.
It should be empty so that reports from a prior invocation of the log collector are not collected again.
The directory is: %s'""" % (conf["local_scratch_dir"]))
if conf.command == "collect":
if not conf.get("no_hypervisor"):
if collector.set_hosts():
collector.get_hypervisor_data()
else:
logging.info("No hypervisors were selected, therefore no hypervisor data will be collected.")
else:
logging.info("Skipping hypervisor collection...")
collector.get_postgres_data()
collector.get_engine_data()
elif conf.command == "list":
if collector.set_hosts():
collector.list_hosts()
else:
logging.info("No hypervisors were found, therefore no hypervisor data will be listed.")
# Clean up the temp directory
shutil.rmtree(conf["local_scratch_dir"])
except KeyboardInterrupt, k:
print "Exiting on user cancel."
except Exception, e:
multilog(logging.error, e)
print "Use the -h option to see usage."
logging.debug("Configuration:")
try:
logging.debug("command: %s" % conf.command)
#multilog(logging.debug, pprint.pformat(conf))
except:
pass
multilog(logging.debug, traceback.format_exc())
sys.exit(ExitCodes.CRITICAL)
sys.exit(ExitCodes.exit_code)
ovirt-log-collector-3.1.0/src/rhev/engine-log-collector.8 0000664 0032753 0032753 00000032015 11763605557 023473 0 ustar oschreib oschreib .\" engine-log-collector - oVirt Engine Tool for Collecting Data
.TH "engine-log-collector" "8" "" "Keith Robertson" ""
.SH "NAME"
engine\-log\-collector \- oVirt Log Collector
.SH "SYNOPSIS"
\fBengine\-log\-collector\fP [options] list
.PP
\fBengine\-log\-collector\fP [options] collect
.SH "DESCRIPTION"
.PP
The \fBengine\-log\-collector\fP command gathers data from many different components (logs, databases, and environmental information) associated with an instance of a oVirt Enterprise Virtualization Engine Manager. The tool is intended to be run from the Linux system on which the is running as the root user.\&
.PP
The \fBengine\-log\-collector\fP will, by default, attempt to interact with the oVirt Engine REST API. This command typically gets the oVirt Engine's hostname and port number (engine=) and user name (user=) from the \fB/etc/engine/logcollector.conf\fP file. Alternatively, you can provide the user name and password from the command line. The user name and password are authenticated by the authentication service associated with the oVirt Engine. Currently, a special local account (admin), Red Hat IPA server, and Microsoft Active Directory server is supported for this task.
.PP
Central to the \fBengine\-log\-collector\fP command is a series of plug\-ins to the \fBsosreport\fP(1) application. These plug\-ins and other components gather information about hypervisors managed by the oVirt Engine into a single archive (a sort of \fBsosreport\fP(1) of sosreports).
.PP
The \fBlist\fP option lists hypervisors controlled by the oVirt Engine, while \fBcollect\fP (default) actually runs the collection. The result of running \fBengine\-log\-collector\fP is a compressed archive that contains logs and other information from some or all of the following:
.\' Describe engine\-slimmed
.IP "\fB* Hypervisor Log Collection\fP"
Logs from all nodes managed by the oVirt Engine are gathered by default. The \-\-no\-hypervisors option lets you choose to not collect any hypervisor information. Other options let you limit log collection to specific clusters, data centers or hosts. Some options require that the log collector communicate with the oVirt Engine RESTful API. The communication is needed to retrieve the list of hosts in a cluster or datacenter, and is transparent to the user.\&
.IP "\fB* PostgreSQL Log Collection\fP"
Log collector always tries to get some PostgreSQL information. Using a PostgreSQL \fBsosreport\fP(1) plug\-in, log collector gathers all logs in POSTGRE_HOME and optionally dumps a designated database if a username and password are supplied. PostgreSQL reports are prefixed with postgre\-. For example:\&
postgresql\-sosreport\-rhel6\-20110613221045\-f914.ta r.xz.\&
.IP "\fB* JBoss Log Collection\fP"
The log collector always attempt to gather some information from JBoss. The JBoss \fBsosreport\fP(1) plug\-in is extensive and it will capture logs, configuration information, datasource files and other information by default. If the user supplies a JMX console administrator user ID and password even more information will be captured (e.g. memory and thread information). The oVirt Engine installer does not configure a JMX console administrator by default. Follow the instructions in the JBoss manuals to securely enable the JMX console administrator to collect this useful information, otherwise press Control\-D to skip the collection of this information when prompted.\&
.IP "\fB* Clock Skew\fP"
To deal with cases where hypervisors are not syncronized with NTP, a file called time_diff.txt is created to list the clock skew of each hypervisor relative to the oVirt Engine. This file is included in the report whenever you elect to collect information from a hypervisor.
.PP
.SH "GENERAL OPTIONS"
The following are general options you can use with this command:\&
.IP "\fB\-\-version\fP"
Show the program's version number and exit.\&
.IP "\fB\-h, \-\-help\fP"
Show the help message and exit.\&
.IP "\fB\-\-local\-tmp=PATH\fP"
Local directory where reports are copied (default=/tmp/logcollector).\&
.IP "\fB\-\-config\-file=PATH\fP"
Path to the configuration file (default=/etc/engine/logcollector.conf).\&
.IP "\fB\-\-ticket\-number=TICKET\fP"
Ticket number (also called a case number) to pass with the \fBsosreport\fP(1).\&
.IP "\fB\-\-upload=FTP_SERVER\fP"
Upload the report produced by this command to Red Hat (use exclusively if advised from a Red Hat support representative). Replace FTP_SERVER with the location of the FTP server.\&
.IP "\fB\-\-quiet\fP"
Reduce the amount of console output (default=False).\&
.IP "\fB\-\-log\-file=PATH\fP"
Path to \fBengine\-log\-collector\fP log file (default=/var/log/engine\-log\-collector.log).\&
.IP "\fB\-v, \-\-verbose\fP"
Display verbose output.\&
.SH "oVirt Engine CONFIGURATION OPTIONS"
The options in the oVirt Engine configuration group can be used to filter log collection from one or more hypervisors. If the \-\-no\-hypervisors option is specified, data is not collected from any hypervisor. Wild card globbing (*, ?, []) can be used to match names.\&
.IP "\fB\-\-no\-hypervisors\fP"
Skip all collection from hypervisors (default=False).\&
.IP "\fB\-u user@engine.example.com, \-\-user=user@engine.example.com\fP"
Indicates the user name to use with the REST API. This should be in UPN format.\&
.IP "\fB\-r engine.example.com, \-\-engine=engine.example.com\fP"
Hostname or IP address of the oVirt Engine (default=localhost:8443).\&
.IP "\fB\-c CLUSTER, \-\-cluster=CLUSTER\fP"
Replace CLUSTER with a pattern or comma\-separated list of patterns, to filter the host list by cluster name (default=None).\&
.IP "\fB\-d DATACENTER, \-\-data\-center=DATACENTER\fP"
Replace DATACENTER with a pattern, or comma separated list of patterns, to filter the host list by data center name (default=None).\&
.IP "\fB\-H HOST_LIST, \-\-hosts=HOST_LIST\fP"
Replace HOST_LIST with a pattern, or comma separated list of hostnames, hostname patterns, FQDNs, FQDN patterns, IP addresses, or IP address patterns from which the log collector should collect hypervisor logs (default=None).\&
.SH "SOSREPORT OPTIONS"
The JBoss \fBsosreport\fP(1) plug\-in is always executed. To activate data collection from JBoss's JMX console java\-home, jboss\-user, and jboss\-pass must also be specified. If no jboss\-pass is supplied in the configuration file then it will be asked for prior to collection time.
.IP "\fB\-\-jboss\-home=/path/to/jbossas\fP"
JBoss's installation dir (default=/var/lib/jbossas).\&
.IP "\fB\-\-java\-home=/path/to/java\fP"
Java's installation dir (default=/usr/lib/jvm/java).\&
.IP "\fB\-\-jboss\-profile=PROFILE1,PROFILE2\fP"
Comma separated list of server profiles to limit
collection (default=engine\-slimmed).\&
.IP "\fB\-\-jboss\-user=admin\fP"
JBoss JMX username (default=admin).\&
.IP "\fB\-\-jboss\-logsize=15\fP"
Maximum size (MiB) to collect per log file (default=15).\&
.IP "\fB\-\-jboss\-stdjar=on or off\fP"
Collect jar statistics for JBoss standard jars (default=on).\&
.IP "\fB\-\-jboss\-servjar=on or off\fP"
Collect jar statistics from any server configuration
dirs (default=on).\&
.IP "\fB\-\-jboss\-twiddle=on or off\fP"
Collect JBoss twiddle data (default=on).\&
.IP "\fB\-\-jboss\-appxml=APP, APP2\fP"
Comma separated list of application's whose XML descriptors you want (default=all).\&
.SH "SSH CONFIGURATION"
The options in the SSH configuration group can be used to specify the maximum number of concurrent SSH connections to hypervisors(s) for log collection, the SSH port, and a identity file to be used.\&
.IP "\fB\-\-ssh\-port=PORT\fP"
The port used for ssh and scp communications.\&
.IP "\fB\-k KEYFILE, \-\-key\-file=KEYFILE\fP"
The identity file (private key) to be used for accessing the hypervisors (default=/etc/pki/engine/keys/engine_id_rsa). If an identity file is not supplied the program will prompt for a password. It is strongly recommended to use key\-based authentication with SSH because the program may make multiple SSH connections resulting in multiple requests for the SSH password.\&
.IP "\fB\-\-max\-connections=MAX_CONNECTIONS\fP"
Maximum concurrent connections for fetching hypervisor logs (default=10).\&
.SH "POSTGRESQL DATABASE CONFIGURATION"
The log collector will connect to the oVirt Engine PostgreSQL database and dump the data for inclusion in the log report, unless \-\-no\-postgresql is specified. The PostgreSQL user ID and database name can be specified if they are different from the defaults. If the PostgreSQL database is not on the localhost, set pg\-dbhost, provide a pg\-ssh\-user, and optionally supply pg\-host\-key and the log collector will gather remote PostgreSQL logs. The PostgreSQL \fBsosreport\fP(1) plug\-in must be installed on pg\-dbhost for successful remote log collection.\&
.IP "\fB\-\-no\-postgresql\fP"
This option causes the tool to skip the postgresql collection (default=false).\&
.IP "\fB\-\-pg\-user=postgres\fP"
PostgreSQL database user name (default=postgres).\&
.IP "\fB\-\-pg\-dbname=engine\fP"
PostgreSQL database name (default=engine).\&
.IP "\fB\-\-pg\-dbhost=localhost\fP"
PostgreSQL database hostname or IP address (default=localhost).\&
.IP "\fB\-\-pg\-ssh\-user=root\fP"
The SSH user that will be used to connect to the server upon which the remote PostgreSQL database lives (default=root).\&
.IP "\fB\-\-pg\-host\-key=none\fP"
The identity file (private key) to be used for accessing the host upon which the PostgreSQL database lives (default=not needed if using localhost).\&
.SH "EXAMPLES"
Before running \fBengine\-log\-collector\fP to collect data, you should run one or more list commands, to hone in on the data you want. Here's an example of the the output using a list option to see all hosts managed by the oVirt Engine:
.PP
# \fBengine\-log\-collector\fP list
.br
Please provide the password for engine (CTRL+D to abort): ********
.br
Host list (datacenter=None, cluster=None, host=None):
.br
Data Center | Cluster | Hostname/IP Address
.br
Legacy | LegacyCluster | 192.168.122.11
.br
Legacy | NewCluster | 192.168.125.42
Use the \-\-hosts= option to limit output to a particular host or group of hosts (using wildcards, as needed).
.PP
# \fBengine\-log\-collector\fP list \-\-hosts=*.11
.br
Please provide the password for engine (CTRL+D to abort):
.br
Host list (datacenter=None, cluster=None, host=set(['*.11'])):
.br
Data Center | Cluster | Hostname/IP Address
.br
Legacy | LegacyCluster | 192.168.122.11
.PP
Instead of having log collector collect logs from all hypervisors managed by the oVirt Engine (default), you can use the same options you used with \fBlist\fP to limit data collection to specific hosts (or datacenters or clusters). Here, all hosts with IP addresses ending in .11 are matched:\&
.PP
# \fBengine\-log\-collector\fP collect \-\-hosts=*.11
.br
Please provide the password for engine (CTRL+D to abort):
.br
About to collect information from 1 hypervisors. Continue? (Y/n): \fBY\fP
.br
INFO: Gathering information from selected hypervisors...
.br
INFO: Collecting information from 192.168.122.11
.br
.PP
To gather data, from multiple hosts, provide a comma\-separated list of addresses or host names. The following example gathers data from all hosts ending in .11 or .15:\&
.PP
# \fBengine\-log\-collector\fP collect \-\-hosts=*.11,*.15
.PP
Use the cluster option to gather data based on cluster name. Here, information on all hosts with IP addresses ending in .11 or .15 are matched from any cluster beginning with the letter L (using * as a wild card):\&
.PP
# \fBengine\-log\-collector\fP collect \-\-cluster=L* \-\-hosts=*.11,*.15
.PP
Likewise, you can gather data based on data center. Here, all hosts from the example.com domain are matched from any data center beginning with the letter L (using * as a wild card):\&
.PP
# \fBengine\-log\-collector\fP collect \-\-data\-center=L* \-\-hosts=*.example.com
.br
.SH "CONFIGURATION FILE"
To get configuration information, \fBengine\-log\-collector\fP refers to the \fB/etc/engine/logcollector.conf\fP configuration file. To set defaults for any of the options described in this man page, uncomment the settings you want in this file. Here examples of a few lines from that file:
.PP
[LogCollector]
.br
### oVirt Engine Configuration:
.br
## username to use with the REST API
.br
user=joe@example.com
.br
# password to use with the REST API
.br
passwd=L1ghtNingFst1!
.br
## hostname or IP address of the oVirt Engine
.br
engine=myengine.example.com:8443
.SH "ERRORS"
* If JBoss is down for the oVirt Engine, it limits the scope of what you can collect.\&
.br
.SH "RETURN VALUES"
.IP "\fB0\fP"
The program ran to completion with no errors.\&
.IP "\fB1\fP"
The program encountered a critical failure and stopped.\&
.IP "\fB2\fP"
The program encountered a problem gathering data but was able to continue.\&
.PP
.SH "FILES"
.nf
/etc/engine/logcollector.conf
/tmp/logcollector
/var/log/engine\-log\-collector.log
/var/lib/jbossas
/usr/lib/jvm/java
/etc/pki/engine/keys/engine_id_rsa
.fi
.SH "SEE ALSO"
\fBsosreport\fP(1)
.SH "AUTHOR"
.nf
Keith Robertson
ovirt-log-collector-3.1.0/src/rhev/TODO 0000664 0032753 0032753 00000000000 11763605557 020047 0 ustar oschreib oschreib ovirt-log-collector-3.1.0/src/rhev/tests.py 0000664 0032753 0032753 00000004322 11763605557 021106 0 ustar oschreib oschreib #!/usr/bin/env python
import unittest
import logcollector
class HyperVisorDataTest(unittest.TestCase):
def setUp(self):
self._prep({})
def _prep(self, params):
self.conf = logcollector.Configuration()
self.conf.update(params)
self.hvd = logcollector.HyperVisorData(
hostname=params.get("hostname", "dummy_host"),
configuration=self.conf,
semaphore=None,
queue=None)
def test_format_ssh_user(self):
self.assertEquals(self.hvd.format_ssh_user(None), "")
self.assertEquals(self.hvd.format_ssh_user(""), "")
self.assertEquals(self.hvd.format_ssh_user("foo"), "foo@")
self.assertEquals(self.hvd.format_ssh_user("foo@"), "foo@")
def test_format_ssh_command_empty_config(self):
params = {"hostname": "localhost"}
self._prep(params)
self.assertEquals(self.hvd.format_ssh_command(), "ssh localhost")
def test_format_ssh_command_ssh_user(self):
params = {"hostname": "localhost", "ssh_user": "foo"}
self._prep(params)
self.assertEquals(self.hvd.format_ssh_command(), "ssh foo@localhost")
def test_format_ssh_command_ssh_port(self):
params = {"hostname": "localhost", "ssh_user": "foo", "ssh_port": "22"}
self._prep(params)
self.assertEquals(self.hvd.format_ssh_command(), "ssh -p 22 foo@localhost")
def test_format_ssh_command_keyfile(self):
params = {"hostname": "localhost", "ssh_user": "foo",
"ssh_port": "22", "key_file": "/tmp/foobar"}
self._prep(params)
self.assertEquals(self.hvd.format_ssh_command(), "ssh -p 22 -i /tmp/foobar foo@localhost")
def test_format_ssh_command_no_port(self):
params = {"hostname": "localhost", "ssh_user": "foo",
"key_file": "/tmp/foobar"}
self._prep(params)
self.assertEquals(self.hvd.format_ssh_command(), "ssh -i /tmp/foobar foo@localhost")
def test_format_ssh_command_no_user(self):
params = {"hostname": "localhost", "key_file": "/tmp/foobar"}
self._prep(params)
self.assertEquals(self.hvd.format_ssh_command(), "ssh -i /tmp/foobar localhost")
if __name__ == "__main__":
unittest.main()
ovirt-log-collector-3.1.0/src/rhev/Makefile 0000664 0032753 0032753 00000001175 11763605557 021035 0 ustar oschreib oschreib PYTHON=python
EXCLUDES=__init__.py
PACKAGE = $(shell basename `pwd`)
PYVER := $(shell $(PYTHON) -c 'import sys; print "%.3s" %(sys.version)')
PYSYSDIR := $(shell $(PYTHON) -c 'import sys; print sys.prefix')
PYLIBDIR = $(PYSYSDIR)/lib/python$(PYVER)
PKGDIR = $(PYLIBDIR)/site-packages/$(PACKAGE)
man:
gzip -c engine-log-collector.8 > engine-log-collector.8.gz
all: man
$(PYTHON) -m compileall -x $(EXCLUDES) .
$(PYTHON) -OO -m compileall -x $(EXCLUDES) .
clean:
rm -f *.pyc *.pyo *~
#install: all
# mkdir -p $(DESTDIR)/$(PKGDIR)
# for p in $(wildcard *.py) ; do \
# install -m 755 $$p $(DESTDIR)/$(PKGDIR)/$$p; \
# done
ovirt-log-collector-3.1.0/LICENSE 0000664 0032753 0032753 00000023676 11763605557 016661 0 ustar oschreib oschreib
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
ovirt-log-collector-3.1.0/README 0000664 0032753 0032753 00000000000 11763605557 016504 0 ustar oschreib oschreib ovirt-log-collector-3.1.0/ovirt-log-collector.spec.in 0000664 0032753 0032753 00000001753 11763605557 023033 0 ustar oschreib oschreib Name: ovirt-log-collector
Version: $version
Release: 0%{?dist}
Source0: http://ovirt.org/releases/stable/src/ovirt-log-collector-%{version}.tar.gz
License: ASL 2.0
Summary: Log Collector for oVirt Engine
Group: Virtualization/Management
URL: http://www.ovirt.org
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}
BuildArch: noarch
BuildRequires: python-devel
Requires: python-lxml
Requires: ovirt-engine-sdk
Requires: sos
%description
Log Collector tool for oVirt Engine
%prep
%setup -q
%build
%install
rm -rf %{buildroot}/*
make PREFIX=%{buildroot}/ install
%files
%{_datadir}/ovirt-engine/log-collector
%{python_sitelib}/sos/plugins/engine.py*
%{python_sitelib}/sos/plugins/jboss.py*
%{python_sitelib}/sos/plugins/postgresql.py*
%{_bindir}/engine-log-collector
%config(noreplace) %{_sysconfdir}/ovirt-engine/logcollector.conf
%doc %{_mandir}/man8/engine-log-collector.8.gz
%doc AUTHORS
%doc LICENSE
%changelog
* Thu Mar 29 2012 Keith Robertson - 1.0.0-0
- Initial build
ovirt-log-collector-3.1.0/AUTHORS 0000664 0032753 0032753 00000000141 11763605557 016702 0 ustar oschreib oschreib The following people have contributed to this project.
* Keith Robertson
ovirt-log-collector-3.1.0/Makefile 0000664 0032753 0032753 00000006324 11763605557 017303 0 ustar oschreib oschreib #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Makefile for oVirt Engine Log Collector
#
NAME=ovirt-log-collector
# ex. export APP_VERSION=1.0.0
RPM_VERSION:=$(shell echo $(APP_VERSION) | sed "s/-/_/")
# ex. export APP_RELEASE=1
RPM_RELEASE:=$(shell echo $(APP_RELEASE) | sed "s/-/_/")
SPEC_FILE_IN=ovirt-log-collector.spec.in
SPEC_FILE=ovirt-log-collector.spec
RPMTOP=$(shell bash -c "pwd -P")/rpmtop
NAME_VER=$(NAME)-$(RPM_VERSION)
TARBALL=$(NAME)-$(RPM_VERSION).tar.gz
SRPM=$(RPMTOP)/SRPMS/$(NAME)-$(RPM_VERSION)*.src.rpm
ARCH=noarch
PYTHON_SITELIB=$(shell python -c "from distutils.sysconfig import get_python_lib as f;print f()")
CURR_DIR=$(shell bash -c "pwd -P")
all: rpm
clean:
@for i in `find . -iname *.pyc`; do \
rm $$i; \
done; \
for i in `find . -iname *.pyo`; do \
rm $$i; \
done; \
rm -rf $(SPEC_FILE) $(RPMTOP) $(TARBALL) $(NAME)
install: create_dirs install_log_collector
tarball: $(TARBALL)
$(TARBALL):
rsync -avz --exclude=.git . $(NAME)
tar --transform 's,^ovirt-log-collector,$(NAME_VER),S' -cvf $(TARBALL) $(NAME)
rm -rf $(NAME)/
srpm: $(SRPM)
$(SRPM): tarball $(SPEC_FILE_IN)
sed 's/^Version:.*/Version: $(RPM_VERSION)/' $(SPEC_FILE_IN) > $(SPEC_FILE)
sed -i -e's/^Release:.*/Release: $(RPM_RELEASE)%{?dist}/' $(SPEC_FILE)
mkdir -p $(RPMTOP)/{SPECS,RPMS,SRPMS,SOURCES,BUILD,BUILDROOT}
cp -f $(SPEC_FILE) $(RPMTOP)/SPECS/
cp -f $(TARBALL) $(RPMTOP)/SOURCES/
rpmbuild -bs --define="_topdir $(RPMTOP)" --define="_sourcedir $(RPMTOP)/SOURCES" $(SPEC_FILE)
rpm: $(SRPM)
rpmbuild --define="_topdir $(RPMTOP)" --rebuild $<
create_dirs:
@echo "*** Creating Directories"
@mkdir -p $(PREFIX)/usr/share/man/man8/
@mkdir -p $(PREFIX)/usr/bin/
install_log_collector:
@echo "*** Deploying log collector"
install -D -m 0755 ./src/rhev/logcollector.py $(PREFIX)/usr/share/ovirt-engine/log-collector/engine-log-collector.py
install -D -m 0755 ./src/rhev/helper/hypervisors.py $(PREFIX)/usr/share/ovirt-engine/log-collector/helper/hypervisors.py
install -D -m 0755 ./src/rhev/helper/__init__.py $(PREFIX)/usr/share/ovirt-engine/log-collector/helper/__init__.py
install -D -m 0755 ./src/sos/plugins/engine.py $(PREFIX)/$(PYTHON_SITELIB)/sos/plugins/engine.py
install -D -m 0755 ./src/sos/plugins/jboss.py $(PREFIX)/$(PYTHON_SITELIB)/sos/plugins/jboss.py
install -D -m 0755 ./src/sos/plugins/postgresql.py $(PREFIX)/$(PYTHON_SITELIB)/sos/plugins/postgresql.py
/usr/bin/gzip -c ./src/rhev/engine-log-collector.8 > $(PREFIX)/usr/share/man/man8/engine-log-collector.8.gz
chmod 644 $(PREFIX)/usr/share/man/man8/engine-log-collector.8.gz
install -D -m 0600 ./src/rhev/logcollector.conf $(PREFIX)/etc/ovirt-engine/logcollector.conf
ln -s /usr/share/ovirt-engine/log-collector/engine-log-collector.py $(PREFIX)/usr/bin/engine-log-collector