cas-admin lib/cas
by Adam Stokes
cas-admin | 29 ++++++++---------------------
lib/cas/db.py | 14 ++++++++++++--
2 files changed, 20 insertions(+), 23 deletions(-)
New commits:
commit 7513467d1fde862f2abe4127d3ee1ff76a0eb6ca
Author: Adam Stokes <adam.stokes(a)gmail.com>
Date: Tue Mar 31 19:14:58 2009 -0400
- starting to implement db methods into cas-admin
diff --git a/cas-admin b/cas-admin
index 7fbc538..e25b778 100755
--- a/cas-admin
+++ b/cas-admin
@@ -37,6 +37,7 @@ KERNELS = config.get("settings","kernels")
RPMFILTER = config.get("settings","rpmFilter")
DEBUGS = config.get("settings","debugs")
DEBUGLEVEL = config.get("settings","debugLevel")
+DATABASE = config.get("settings","database")
# read maintenance options
PURGELIMIT = config.get("maintenance","purgeLimit")
@@ -46,20 +47,11 @@ class CasDatabaseHandler(object):
def __init__(self, logger):
self.casLog = logger
self.util = UtilBase()
- if os.path.isfile(RPMS):
- self.rpmDB = self.util.load(RPMS)
- else:
- self.rpmDB = {}
-
- def rpmExist(self, rpm):
- """ checks existence of rpm in db
- """
- if self.rpmDB.has_key(rpm):
- return True
- return False
+ # setup database connection
+ self.db = CasStorage(DATABASE)
+ self.db.connect()
def run(self):
- rpms = []
# Uses emacs regex -- see `man find`
cmd = ["find", "-L", KERNELS, "-iregex", RPMFILTER]
pipe = Popen(cmd, stdout=PIPE, stderr=PIPE)
@@ -67,17 +59,12 @@ class CasDatabaseHandler(object):
count = 0
# create list of rpms from `cmd`
for line in pipe.stdout:
- rpms.append(line.strip())
+ self.db.addDebuginfoRPM(line.strip())
self.casLog.status("(found) %-5d kernel(s)" % (count,))
count = count + 1
- # reset count for further informational messaging
- totalRpms = len(rpms)
- count = 0
- """ Build database out in the form of
- RPM - [( DebugKernel, Timestamp),
-' ( DebugKernel2, Timestamp2)]
- """
- for x in rpms:
+ # query database for debuginfo rpms
+ rpms = self.db.getDebuginfoRPM()
+ for id, rpm in rpms:
if not self.rpmExist(x):
self.rpmDB[x] = []
count = count + 1
diff --git a/lib/cas/db.py b/lib/cas/db.py
index 5e9ef7c..fb0b322 100644
--- a/lib/cas/db.py
+++ b/lib/cas/db.py
@@ -22,12 +22,22 @@ class CasStorage(object):
(date text, submitter text, corepath text, debugpath text,
server text)
-create table debuginfo (rpm text)
+create table debuginfo (debug_id integer primary key autoincrement, rpm text)
-create table timestamp (debuginfo integer, debugpath text, timestamp text)''')
+create table timestamp (timestamp_id integer primary key autoincrement,
+debuginfo integer, debugpath text, timestamp text, debug_id integer)''')
self.conn.commit()
return
+ # DEBUGINFO METHODS
+ def addDebuginfoRPM(self, debuginfo):
+ self.cursor.execute("INSERT into debuginfo('rpm') values(?)", debuginfo)
+ return
+
+ def getDebuginfoRPM(self):
+ self.cursor.execute("SELECT * FROM debuginfo")
+ return self.cursor
+
def connect(self):
""" execute connection """
try:
15 years
cas cas-admin cas.conf lib/cas
by Adam Stokes
cas | 5 +----
cas-admin | 2 +-
cas.conf | 11 ++---------
lib/cas/db.py | 46 +++++++++++++++++++++++++++++++---------------
4 files changed, 35 insertions(+), 29 deletions(-)
New commits:
commit 31c1d943ca61c8ddf0dc0b9d24ddbc7f464e8a0b
Author: Adam Stokes <adam.stokes(a)gmail.com>
Date: Tue Mar 31 13:13:07 2009 -0400
- more db changes
diff --git a/cas b/cas
index e255e5e..0dac60c 100755
--- a/cas
+++ b/cas
@@ -42,10 +42,7 @@ WORKDIRECTORY = config.get("settings","workDirectory")
DEBUGLEVEL = config.get("settings","debugLevel")
SMTPHOST = config.get("settings", "mailServer")
# Read database parameters
-CASDB_CONF = {'dbhost': config.get("database","dbhost"),
- 'dbuser': config.get("database","dbuser"),
- 'dbpass': config.get("database","dbpass"),
- 'dbname': config.get("database","dbname")}
+DATABASE = config.get("settings", "database")
# Check for some advanced configurations
# Test to see if we provide a 32bit crash binary
diff --git a/cas-admin b/cas-admin
index e394c9b..7fbc538 100755
--- a/cas-admin
+++ b/cas-admin
@@ -75,7 +75,7 @@ class CasDatabaseHandler(object):
count = 0
""" Build database out in the form of
RPM - [( DebugKernel, Timestamp),
- ( DebugKernel2, Timestamp2)]
+' ( DebugKernel2, Timestamp2)]
"""
for x in rpms:
if not self.rpmExist(x):
diff --git a/cas.conf b/cas.conf
index 5b8808a..2b17e34 100644
--- a/cas.conf
+++ b/cas.conf
@@ -33,15 +33,8 @@ purgeLimit=90
# whenever cas-admin is run
autoPurge=False
-[database]
-# database connection info (mysql only)
-# TODO: perhaps add orm support for multiple backends?
-# A database needs to be manually created
-# $ mysqladmin -uroot -predhat create $dbname
-dbname='casdb'
-dbhost='localhost'
-dbuser='root'
-dbpass='redhat'
+# database connection info
+database='/var/db/cas/cas.db'
[advanced]
# if running a x86_64 system and wish to analyze 32bit
diff --git a/lib/cas/db.py b/lib/cas/db.py
index 57313d2..5e9ef7c 100644
--- a/lib/cas/db.py
+++ b/lib/cas/db.py
@@ -1,38 +1,54 @@
-import MySQLdb
+import os
+import sys
+
+if sys.version_info[:2] > (2,4):
+ try:
+ import sqlite3 as sqlite
+ except ImportError:
+ import sqlite
class CasStorageException(Exception): pass
class CasStorage(object):
- def __init__(self, sql_server, username, password, database):
+ def __init__(self, database):
""" setup database connection and return db cursor for
traversing database """
self.db = database
- self.sql_server = sql_server
- self.username = username
- self.password = password
+ self.conn = None
self.cursor = None
+ def buildTable(self):
+ self.cursor.execute('''create table cas_jobs
+(date text, submitter text, corepath text, debugpath text,
+server text)
+
+create table debuginfo (rpm text)
+
+create table timestamp (debuginfo integer, debugpath text, timestamp text)''')
+ self.conn.commit()
+ return
+
def connect(self):
""" execute connection """
try:
- db_connect = MySQLdb.connect(host=self.sql_server,
- user=self.username,
- passwd=self.password,
- db=self.db)
- self.cursor = db_connect.cursor()
- except OperationalError, e:
- raise CasStorageException(e)
+ if not os.path.isfile(self.db):
+ # build out sql table
+ self.buildTable()
+ self.conn = sqlite.connect(self.db)
+ self.cursor = self.conn.cursor()
+ except:
+ raise CasStorageException('Cannot connect to database')
return
def getAllJobs(self):
""" all jobs """
self.cursor.execute("SELECT * FROM cas_jobs")
- return self.cursor.fetchall()
+ return self.cursor
def getJobById(self, id):
""" single job """
- self.cursor.execute("SELECT * FROM cas_jobs where id=%d" % (id,))
- return self.cursor.fetchone()
+ self.cursor.execute("SELECT * FROM cas_jobs where id=?", id)
+ return self.cursor
def getJobRange(self, days):
""" provides jobs based on creation date from
15 years
cas cas-admin cas.conf cas.sql lib/cas
by Adam Stokes
cas | 7 +++----
cas-admin | 23 +++++++++++++++++++++--
cas.conf | 17 ++++++++++++-----
cas.sql | 27 ++++++++++-----------------
lib/cas/db.py | 15 ++++++++++-----
5 files changed, 56 insertions(+), 33 deletions(-)
New commits:
commit f6c4ec270ffedd4d21e9802eee2b26caa4acd433
Author: Adam Stokes <adam.stokes(a)gmail.com>
Date: Thu Mar 19 21:07:25 2009 -0400
- conf update
- sql update to better reflect the relationships involved
- focus on cas-admin to incorporate maintenance
diff --git a/cas b/cas
index 7303a54..e255e5e 100755
--- a/cas
+++ b/cas
@@ -40,7 +40,6 @@ config = ConfigParser.ConfigParser()
config.read("/etc/cas.conf")
WORKDIRECTORY = config.get("settings","workDirectory")
DEBUGLEVEL = config.get("settings","debugLevel")
-SERVERS = config.get("settings", "servers")
SMTPHOST = config.get("settings", "mailServer")
# Read database parameters
CASDB_CONF = {'dbhost': config.get("database","dbhost"),
@@ -143,8 +142,7 @@ class CasApplication(object):
parser.add_option("-f","--file", dest="filename",
help="Filename")
parser.add_option("-e","--email", dest="email",
- help="Define email for results (must be valid!)",
- action="store")
+ help="Define email for results")
parser.add_option("-m","--modules", dest="kernel_modules",
help="Extract associated kernel modules",
action="store_true")
@@ -241,7 +239,8 @@ class CasApplication(object):
# TODO: Randomize server selection
# TODO: Verify remote server is reachable
casProcessMachine = serverList[debugKernelArch][0]
- self.casLog.info("Machine %s found, processing crash output" % (casProcessMachine,))
+ self.casLog.info("Machine %s found, processing " \
+ "crash output" % (casProcessMachine,))
cmd = os.path.join(self.storagePath,"crash")
client = fc.Overlord(casProcessMachine)
clientDict = client.command.run(cmd)
diff --git a/cas-admin b/cas-admin
index 8bd9819..e394c9b 100755
--- a/cas-admin
+++ b/cas-admin
@@ -23,6 +23,7 @@ import urlparse
from cas.core import CoreBase
from cas.util import UtilBase, Logging
from cas.rpmutils import RPMBase
+from cas.db import CasStorage, CasStorageException
from subprocess import Popen, PIPE
from cas.cas_shutil import rmtree
@@ -33,11 +34,13 @@ if sys.version_info[:2] < (2,4):
config = ConfigParser.ConfigParser()
config.read("/etc/cas.conf")
KERNELS = config.get("settings","kernels")
-RPMS = config.get("settings","rpms")
RPMFILTER = config.get("settings","rpmFilter")
DEBUGS = config.get("settings","debugs")
DEBUGLEVEL = config.get("settings","debugLevel")
-SERVERS = config.get("settings","servers")
+
+# read maintenance options
+PURGELIMIT = config.get("maintenance","purgeLimit")
+AUTOPURGE = config.get("maintenance","autoPurge")
class CasDatabaseHandler(object):
def __init__(self, logger):
@@ -151,9 +154,15 @@ class CasAdminApplication(object):
help="Build CAS DB", action="store_true", default=False)
parser.add_option("-s","--server", dest="server_init",
help="Build SERVER DB", action="store_true", default=False)
+ parser.add_option("-p","--purge", dest="purgeData",
+ help="Purge files", action="store_true", default=False)
+ parser.add_option("-d","--days", dest="purgeDataDays",
+ help="Set how many days back to purge data")
(self.opts, args) = parser.parse_args()
self.buildDB = self.opts.buildDB
self.server_init = self.opts.server_init
+ self.purgeData = self.opts.purgeData
+ self.purgeDataDays = self.opts.purgeDataDays
def run(self):
""" Make sure necessary directories and configuration is setup
@@ -166,6 +175,16 @@ class CasAdminApplication(object):
if not os.path.isdir(DEBUGS):
os.makedirs(DEBUGS)
+ if self.purgeData:
+ ans = raw_input("You are about to purge data, is this what you " \
+ "really want to do? [Y/y/N/n]: ")
+ if ans=='Y' or ans=='y':
+ if not self.purgeDataDays:
+ self.purgeDataDays = PURGELIMIT
+ self.casLog.info("Beginning Purge going back %s days" % (self.purgeDataDays,))
+ else:
+ sys.exit(0)
+
if self.buildDB:
self.casLog.info("Starting CAS DB instance.")
dbHandler = CasDatabaseHandler(self.casLog).run()
diff --git a/cas.conf b/cas.conf
index 82d2ce8..5b8808a 100644
--- a/cas.conf
+++ b/cas.conf
@@ -4,9 +4,6 @@
# indefinately recursive
kernels=/mnt/kernels
-# Database which houses all kernel found in $kernels
-rpms=/var/db/cas/rpms.db
-
# Compose emacs regular expression for determing what
# kernel debug rpms you wish to search for
rpmFilter=.*kernel-debuginfo-[0-9].*\.rpm
@@ -16,21 +13,31 @@ rpmFilter=.*kernel-debuginfo-[0-9].*\.rpm
# cpio is run against an rpm and extract the debug kernel
# for a small amount of time. This could be something like
# /tmp if space permitted
-debugs=/cores/debugs
+debugs=/tmp
# debug level (DEBUG, INFO)
debugLevel=DEBUG
# define work directory
-workDirectory=/cores/processed
+workDirectory=/cores
# Mail server, e.g. mail.example.com
# Provides job results via email
mailServer=mail.example.com
+[maintenance]
+# Time in days to keep jobs and its associated files
+purgeLimit=90
+
+# Enable if want to automatically check and purge data
+# whenever cas-admin is run
+autoPurge=False
+
[database]
# database connection info (mysql only)
# TODO: perhaps add orm support for multiple backends?
+# A database needs to be manually created
+# $ mysqladmin -uroot -predhat create $dbname
dbname='casdb'
dbhost='localhost'
dbuser='root'
diff --git a/cas.sql b/cas.sql
index 5246295..0448425 100644
--- a/cas.sql
+++ b/cas.sql
@@ -2,52 +2,45 @@
-- cas jobs
--
CREATE TABLE jobs (
- id MEDIUMINT NOT NULL AUTO_INCREMENT,
+ id INTEGER PRIMARY KEY AUTO_INCREMENT,
submitter VARCHAR(255),
creation TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
jobpath TEXT,
corefile TEXT,
- debug TEXT,
- PRIMARY KEY (id)
+ debug TEXT
)
--
-- cas servers
--
CREATE TABLE architecture (
- id MEDIUMINT NOT NULL AUTO_INCREMENT,
- arch VARCHAR(255),
- PRIMARY KEY (id)
+ id INTEGER PRIMARY KEY AUTO_INCREMENT,
+ arch VARCHAR(255)
)
--
-- one to many relationship with architectures
--
CREATE TABLE servers (
- id MEDIUMINT NOT NULL AUTO_INCREMENT,
+ id INTEGER PRIMARY KEY AUTO_INCREMENT,
hostname VARCHAR(255),
- arch_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY (arch_id)
+ arch_id INTEGER
)
--
-- debug rpm filename
--
CREATE TABLE debuginforpm (
- id MEDIUMINT NOT NULL AUTO_INCREMENT,
- filename VARCHAR(255),
- PRIMARY KEY (id)
+ id INTEGER PRIMARY KEY AUTO_INCREMENT,
+ filename VARCHAR(255)
)
--
-- debug kernels
--
CREATE TABLE debugkernels (
- id MEDIUMINT NOT NULL AUTO_INCREMENT,
+ id INTEGER PRIMARY KEY AUTO_INCREMENT,
debugpath TEXT,
debugtimestamp TEXT,
- debuginforpm_id INTEGER,
- PRIMARY KEY (id),
- FOREIGN KEY (debuginforpm_id)
+ debuginforpm_id INTEGER
)
diff --git a/lib/cas/db.py b/lib/cas/db.py
index 175a3bb..57313d2 100644
--- a/lib/cas/db.py
+++ b/lib/cas/db.py
@@ -1,5 +1,7 @@
import MySQLdb
+class CasStorageException(Exception): pass
+
class CasStorage(object):
def __init__(self, sql_server, username, password, database):
""" setup database connection and return db cursor for
@@ -12,11 +14,14 @@ class CasStorage(object):
def connect(self):
""" execute connection """
- db_connect = MySQLdb.connect(host=self.sql_server,
- user=self.username,
- passwd=self.password,
- db=self.db)
- self.cursor = db_connect.cursor()
+ try:
+ db_connect = MySQLdb.connect(host=self.sql_server,
+ user=self.username,
+ passwd=self.password,
+ db=self.db)
+ self.cursor = db_connect.cursor()
+ except OperationalError, e:
+ raise CasStorageException(e)
return
def getAllJobs(self):
15 years, 1 month
2 commits - cas cas-admin cas.conf cas.sql lib/cas
by Adam Stokes
cas | 16 ++++++++++------
cas-admin | 8 ++++----
cas.conf | 22 ++++++++++------------
cas.sql | 43 ++++++++++++++++++++++++++++++++++++++++++-
lib/cas/db.py | 14 +++++++++++++-
5 files changed, 79 insertions(+), 24 deletions(-)
New commits:
commit 5b14fda2d3e3017f032fe9a1b866c9025ac516c3
Author: Adam Stokes <adam.stokes(a)gmail.com>
Date: Wed Mar 18 23:10:10 2009 -0400
- extend sql
- start changes to reflect db schema
diff --git a/cas b/cas
index 319ca7b..7303a54 100755
--- a/cas
+++ b/cas
@@ -39,10 +39,14 @@ if sys.version_info[:2] < (2,4):
config = ConfigParser.ConfigParser()
config.read("/etc/cas.conf")
WORKDIRECTORY = config.get("settings","workDirectory")
-RPMS = config.get("settings","rpms")
DEBUGLEVEL = config.get("settings","debugLevel")
SERVERS = config.get("settings", "servers")
SMTPHOST = config.get("settings", "mailServer")
+# Read database parameters
+CASDB_CONF = {'dbhost': config.get("database","dbhost"),
+ 'dbuser': config.get("database","dbuser"),
+ 'dbpass': config.get("database","dbpass"),
+ 'dbname': config.get("database","dbname")}
# Check for some advanced configurations
# Test to see if we provide a 32bit crash binary
@@ -51,7 +55,7 @@ SMTPHOST = config.get("settings", "mailServer")
CRASH_32=None
if config.has_option("advanced", "crash_32"):
CRASH_32=config.get("advanced", "crash_32")
-
+
class CoreHandler(object):
def __init__(self, filename, dst, logger):
self.filename = filename
@@ -161,7 +165,7 @@ class CasApplication(object):
dateFormatted = datenow.strftime("%m.%d.%y.%I.%M.%S")
self.storagePath = os.path.join(WORKDIRECTORY, self.identifier)
self.storagePath = os.path.join(self.storagePath, dateFormatted)
-
+
# build logger object to deal with logging per job and keep things
# clean and easy to debug
self.casLog = Logging(self.storagePath, self.identifier)
@@ -185,7 +189,7 @@ class CasApplication(object):
debugKernel = os.path.abspath(debugKernel)
# setup crash file to finalize the processing of the core file
self.util.buildCrashFile(self.storagePath, corefile, debugKernel)
- # Pull the architecture from the elf file to match up with a
+ # Pull the architecture from the elf file to match up with a
# server providing this architecture
debugKernelArch = self.util.debugKernelArch(debugKernel)
# Read current machine arch to see if we can bypass func and proceed
@@ -204,7 +208,7 @@ class CasApplication(object):
cmdPipe = Popen([cmd], stdout=PIPE, stderr=PIPE)
cmdData = cmdPipe.communicate()
# pull status code to verify crash even ran to completeness
- sts, out, err = (cmdPipe.returncode, cmdData[0].strip(),
+ sts, out, err = (cmdPipe.returncode, cmdData[0].strip(),
cmdData[1].strip())
if sts:
self.casLog.debug("crash problem: err: %s, out: %s" % (err, out))
@@ -219,7 +223,7 @@ class CasApplication(object):
cmdPipe = Popen([cmd], stdout=PIPE, stderr=PIPE)
cmdData = cmdPipe.communicate()
# pull status code to verify crash even ran to completeness
- sts, out, err = (cmdPipe.returncode, cmdData[0].strip(),
+ sts, out, err = (cmdPipe.returncode, cmdData[0].strip(),
cmdData[1].strip())
if sts:
self.casLog.debug("crash problem: err: %s, out: %s" % (err, out))
diff --git a/cas-admin b/cas-admin
index 100140d..8bd9819 100755
--- a/cas-admin
+++ b/cas-admin
@@ -81,7 +81,7 @@ class CasDatabaseHandler(object):
# temporary storage path in form of DEBUGS/COUNT
dst = os.path.join(DEBUGS, str(count))
rpmTool = RPMBase()
- self.casLog.status("(extracting) [%d/%d] %-50s" % (count, totalRpms,
+ self.casLog.status("(extracting) [%d/%d] %-50s" % (count, totalRpms,
os.path.basename(x)))
results = rpmTool.extract(x, dst)
# Sort through extracted debug for each type
@@ -97,12 +97,12 @@ class CasDatabaseHandler(object):
# Cleanup extracted debugs
rmtree(dst)
return
-
+
class CasServerHandler(object):
def __init__(self, logger):
self.casLog = logger
self.util = UtilBase()
-
+
def run(self):
try:
serverList = {}
@@ -146,7 +146,7 @@ class CasAdminApplication(object):
self.casLog = Logging("/var/log","cas-admin")
def parse_options(self, args):
- parser = optparse.OptionParser(usage="casprint [opts] args")
+ parser = optparse.OptionParser(usage="cas-admin [opts] args")
parser.add_option("-b","--build", dest="buildDB",
help="Build CAS DB", action="store_true", default=False)
parser.add_option("-s","--server", dest="server_init",
diff --git a/cas.conf b/cas.conf
index 6ce4579..82d2ce8 100644
--- a/cas.conf
+++ b/cas.conf
@@ -24,24 +24,22 @@ debugLevel=DEBUG
# define work directory
workDirectory=/cores/processed
-# database to house cas servers
-# Server list is stored in a dict in the form of
-# {'x86_64' : [servera, serverb, serverc],
-# 'x86' : [serverm, servern, servero]}
-servers=/var/db/cas/servers.db
-
-# define processing jobs db
-# TODO: determine format type, (e.g raw, json, xml)
-jobs=/var/db/cas/jobs.db
-
# Mail server, e.g. mail.example.com
# Provides job results via email
mailServer=mail.example.com
+[database]
+# database connection info (mysql only)
+# TODO: perhaps add orm support for multiple backends?
+dbname='casdb'
+dbhost='localhost'
+dbuser='root'
+dbpass='redhat'
+
[advanced]
# if running a x86_64 system and wish to analyze 32bit
# cores, define the location of 32bit crash binary
-# EXAMPLE USAGE:
+# EXAMPLE USAGE:
# rpm --relocate /usr/bin=/usr/local/i386 -ivh crash*i386.rpm
-
+
# crash_32=/usr/local/i386/crash
diff --git a/lib/cas/db.py b/lib/cas/db.py
index 9c0cac6..175a3bb 100644
--- a/lib/cas/db.py
+++ b/lib/cas/db.py
@@ -19,6 +19,18 @@ class CasStorage(object):
self.cursor = db_connect.cursor()
return
- def getJobs(self):
+ def getAllJobs(self):
+ """ all jobs """
self.cursor.execute("SELECT * FROM cas_jobs")
return self.cursor.fetchall()
+
+ def getJobById(self, id):
+ """ single job """
+ self.cursor.execute("SELECT * FROM cas_jobs where id=%d" % (id,))
+ return self.cursor.fetchone()
+
+ def getJobRange(self, days):
+ """ provides jobs based on creation date from
+ $days back
+ """
+ pass
commit 223325498434a16147675083ffc6bdd5b28e5f47
Author: Adam Stokes <adam.stokes(a)gmail.com>
Date: Wed Mar 18 23:10:02 2009 -0400
- add additional tables for db migration
diff --git a/cas.sql b/cas.sql
index b7e8eb5..5246295 100644
--- a/cas.sql
+++ b/cas.sql
@@ -1,7 +1,7 @@
--
-- cas jobs
--
-CREATE TABLE cas_jobs (
+CREATE TABLE jobs (
id MEDIUMINT NOT NULL AUTO_INCREMENT,
submitter VARCHAR(255),
creation TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
@@ -10,3 +10,44 @@ CREATE TABLE cas_jobs (
debug TEXT,
PRIMARY KEY (id)
)
+
+--
+-- cas servers
+--
+CREATE TABLE architecture (
+ id MEDIUMINT NOT NULL AUTO_INCREMENT,
+ arch VARCHAR(255),
+ PRIMARY KEY (id)
+)
+
+--
+-- one to many relationship with architectures
+--
+CREATE TABLE servers (
+ id MEDIUMINT NOT NULL AUTO_INCREMENT,
+ hostname VARCHAR(255),
+ arch_id INTEGER,
+ PRIMARY KEY (id),
+ FOREIGN KEY (arch_id)
+)
+
+--
+-- debug rpm filename
+--
+CREATE TABLE debuginforpm (
+ id MEDIUMINT NOT NULL AUTO_INCREMENT,
+ filename VARCHAR(255),
+ PRIMARY KEY (id)
+)
+
+--
+-- debug kernels
+--
+CREATE TABLE debugkernels (
+ id MEDIUMINT NOT NULL AUTO_INCREMENT,
+ debugpath TEXT,
+ debugtimestamp TEXT,
+ debuginforpm_id INTEGER,
+ PRIMARY KEY (id),
+ FOREIGN KEY (debuginforpm_id)
+)
15 years, 1 month
cas.sql
by Adam Stokes
cas.sql | 1 -
1 file changed, 1 deletion(-)
New commits:
commit 6af0beb13bf99eb7127dc8751a545e69964258ec
Author: Adam Stokes <adam.stokes(a)gmail.com>
Date: Wed Mar 18 13:18:07 2009 -0400
- no double timestamp
diff --git a/cas.sql b/cas.sql
index ef3aa33..b7e8eb5 100644
--- a/cas.sql
+++ b/cas.sql
@@ -5,7 +5,6 @@ CREATE TABLE cas_jobs (
id MEDIUMINT NOT NULL AUTO_INCREMENT,
submitter VARCHAR(255),
creation TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
- modified TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
jobpath TEXT,
corefile TEXT,
debug TEXT,
15 years, 1 month
cas.sql
by Adam Stokes
cas.sql | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
New commits:
commit d6d04e7de0e22d11d70cb39f44e0c300ce5d9716
Author: Adam Stokes <adam.stokes(a)gmail.com>
Date: Wed Mar 18 13:16:26 2009 -0400
- update sql
diff --git a/cas.sql b/cas.sql
index 7017d37..ef3aa33 100644
--- a/cas.sql
+++ b/cas.sql
@@ -1,7 +1,13 @@
+--
+-- cas jobs
+--
CREATE TABLE cas_jobs (
- 'ID' PRIMARY AUTO_INCREMENT INTEGER,
- 'PATH' TEXT,
- 'COREFILEPATH' TEXT,
- 'DEBUGPATH' TEXT,
- 'CREATION_TIME' DATETIME CURRENT_TIMESTAMP
+ id MEDIUMINT NOT NULL AUTO_INCREMENT,
+ submitter VARCHAR(255),
+ creation TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ modified TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
+ jobpath TEXT,
+ corefile TEXT,
+ debug TEXT,
+ PRIMARY KEY (id)
)
15 years, 1 month
cas.spec cas.sql doc/index.rst lib/cas
by Adam Stokes
cas.spec | 10 -
cas.sql | 7 +
doc/index.rst | 356 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
lib/cas/db.py | 34 +++--
4 files changed, 390 insertions(+), 17 deletions(-)
New commits:
commit 1c9fbbbcd455db47220ab295c7b2ce9092c01b68
Author: Adam Stokes <adam.stokes(a)gmail.com>
Date: Wed Mar 18 11:52:46 2009 -0400
- start work on using mysql backend
diff --git a/cas.spec b/cas.spec
index e463724..5b53a5a 100644
--- a/cas.spec
+++ b/cas.spec
@@ -3,7 +3,7 @@
Name: cas
Summary: Tool to analyze and configure core file environment
Version: 0.13
-Release: 120%{?dist}
+Release: 121%{?dist}
Source0: https://fedorahosted.org/releases/c/a/cas/%{name}-%{version}.tar.gz
License: GPLv3+
Group: Development/Libraries
@@ -11,12 +11,12 @@ BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot
BuildArch: noarch
Url: http://fedorahosted.org/cas
BuildRequires: python-devel
-Requires: crash
+Requires: crash MySQL-python
%description
CAS provides a user the ability to configure an environment for core analysis
-quickly. All the hassles of matching kernel versions and machine architecture
-types to core dumps are automatically detected and processed.
+quickly. All the hassles of matching kernel versions and machine architecture
+types to core dumps are automatically detected and processed.
%prep
%setup -q
@@ -76,7 +76,7 @@ rm -rf ${RPM_BUILD_ROOT}
- splitting off grabcore to be a download/extract only service
- core of the work to be done specifically by their intended
modules
-
+
* Fri Dec 7 2007 Adam Stokes <astokes at redhat dot com> - 0.9
- release bump
- decompression module added
diff --git a/cas.sql b/cas.sql
new file mode 100644
index 0000000..7017d37
--- /dev/null
+++ b/cas.sql
@@ -0,0 +1,7 @@
+CREATE TABLE cas_jobs (
+ 'ID' PRIMARY AUTO_INCREMENT INTEGER,
+ 'PATH' TEXT,
+ 'COREFILEPATH' TEXT,
+ 'DEBUGPATH' TEXT,
+ 'CREATION_TIME' DATETIME CURRENT_TIMESTAMP
+)
diff --git a/doc/index.rst b/doc/index.rst
new file mode 100644
index 0000000..7ecb87c
--- /dev/null
+++ b/doc/index.rst
@@ -0,0 +1,356 @@
+Core Analysis System
+====================
+
+Introduction
+------------
+
+.. image:: cas_logo.png
+
+Description
+^^^^^^^^^^^
+
+CAS provides a user the ability to configure an environment for core analysis
+quickly. All the hassles of matching kernel versions and machine architecture
+types to core dumps are automatically detected and processed.
+
+Prerequisites
+^^^^^^^^^^^^^
+
+CAS needs at least **Python 2.4** to run. For systems that are not running
+Fedora 9 or later (this would include RHEL 4/5) the EPEL repo needs to be
+installed. Visit `EPEL <https://fedoraproject.org/wiki/EPEL>`_ to enable
+this repository.
+
+Optionally install `Func <http://fedorahosted.org/func>`_
+for automatic processing on other architecture specific systems. This
+can easily be installed via `yum`::
+
+ $ yum install func
+
+The amount of storage needed can be determined base on the following
+information:
+
+- The number of kernel-debuginfo packages needed
+- How many core dumps will be processed.
+
+Typically it is recommended to have at least 1TB for cores and another 500GB for
+the debuginfo packages.
+
+Since analyzing cores requires the same architecture specific systems the core
+was generated on there will need to be systems available of those same types
+in order for analyzation to work properly.
+
+Finally, root access to the CAS server.
+
+Configuration
+^^^^^^^^^^^^^
+
+CAS comes with one main configuration file which is located at ``/etc/cas.conf``.
+The overall contents of this file is shown below, further down we will break up
+each section and describe its meaning::
+
+ [settings]
+ kernels=/mnt/kernels
+ rpms=/var/db/cas/rpms.db
+ rpmFilter=.*kernel-debuginfo-[0-9].*\.rpm
+ debugs=/cores/debugs
+ debugLevel=DEBUG
+ workDirectory=/cores/processed
+ servers=/var/db/cas/servers.db
+ jobs=/var/db/cas/jobs.db
+ mailServer=mail.example.com
+ [advanced]
+ # crash_32=/usr/local/i386/crash
+
+``kernels``: (**Required**) Describes the location of where kernel-debuginfo packages are to be
+stored. This can range anywhere from an nfs mount, samba share, local disk or
+any other type of media the cas server can access.
+
+``rpms``: (**Required**) Database which houses the processing of kernel-debuginfo and stores the
+necessary information for CAS to properly match core dumps.
+
+``rpmFilters``: (**Required**) This is a emacs based regular expression which is essentially
+passed to a find command to locate the various kernel-debuginfo packages defined
+in ``kernels`` directive.
+
+``debugs``: (**Required**) A temporary directory in which to store the extracted vmlinux files
+from the kernel-debuginfo packages for processing. Another solution would be to
+alter this to point an existing directory like ``/tmp``, for instance.
+
+``debugLevel``: As the name suggest it will set the debug level for CAS output.
+Currently the only accepted values are ``DEBUG|INFO``.
+
+``workDirectory``: (**Required**) Defines where all processed cores will be placed. This mount
+point will need to have the most storage assigned to it. Depending on how many
+cores are processed in a given timeframe this area will fill up quickly.
+
+``servers``: If func is installed and configured all associated servers will
+be stored here.
+
+``jobs``: Contains information on current jobs being processed and eventually
+what jobs are in the queue.
+
+``mailServer``: If wanting output of CAS processing email to a certain address
+this directive needs to be set. ``Note`` that the mail server should not
+require smtp authentication.
+
+``crash_32``: Primarily used on x86_64 systems to process x86 cores. If x86
+version of crash is installed this directive can be set to the crash binary
+and CAS will automatically process x86 cores on a x86_64 machine. ``Note`` this
+is only available if the CAS server is a x86_64 machine.
+
+Setup & Execution
+-----------------
+
+Preparing CAS Server
+^^^^^^^^^^^^^^^^^^^^
+
+To install the CAS package simply type::
+
+ $ yum install cas
+
+Once installed edit ``/etc/cas.conf`` as root using any preferred text editor.
+As described above the required directives need to be altered to suit the
+environment in question.
+
+In this example, ``/mnt/kernels`` is an nfs mount which houses the kernel-debuginfo
+packages. ``/cores`` is where all processed cores are stored and ``/tmp`` is the
+temporary storage for collecting the necessary data from the kernel-debuginfos.
+A mail server is setup within the environment to email CAS results and this
+optional directive is shown to reflect that. Finally, the CAS server is an x86_64
+machine and the environment will be processing x86 cores, therefore, the directive
+for this is uncommented and path to the x86 crash binary is given. ``Note`` there
+is information provided within the configuration file for installing the x86 crash
+to a different location.
+
+Altering the configuration to reflect the above assumptions would show the
+following::
+
+ [settings]
+ kernels=/mnt/kernels
+ rpms=/var/db/cas/rpms.db
+ rpmFilter=.*kernel-debuginfo-[0-9].*\.rpm
+ debugs=/tmp
+ debugLevel=DEBUG
+ workDirectory=/cores
+ servers=/var/db/cas/servers.db
+ jobs=/var/db/cas/jobs.db
+ mailServer=mail.cas-server.com
+ [advanced]
+ crash_32=/usr/local/i386/crash
+
+Now that the configuration file is altered and ``/mnt/kernels`` should be populated
+with kernel-debuginfo rpm's the next section will describe running CAS.
+
+Running CAS
+^^^^^^^^^^^
+
+First, one or two administrative tasks need to be run. The required task is to build
+a database for all the data gathered from the kernel-debuginfo packages.::
+
+ $ cas-admin -b
+
+If ``Func`` is installed and several systems are deployed for CAS to use run::
+
+ $ cas-admin -s
+
+At this point CAS is configured and looking at the output of CAS help there are
+a few options to pass::
+
+ Usage: cas [opts] args
+
+ Options:
+ -h, --help show this help message and exit
+ -i IDENTIFIER, --identifier=IDENTIFIER
+ Unique ID for core
+ -f FILENAME, --file=FILENAME
+ Filename
+ -e EMAIL, --email=EMAIL
+ Define email for results (must be valid!)
+ -m, --modules Extract associated kernel modules
+
+CAS prepares its directory hierarchy based on the ``identifier`` this option is
+therefore required. ``filename`` is also required as it tells CAS exactly which
+core to process and associate with ``identifier``. If wanting email results from
+CAS simply pass it the email parameter.
+
+An example, of a user wanting to process a corefile named ``vmcore.12345``::
+
+ $ cas -i 12345 -f vmcore.12345 -e user(a)cas-server.com
+
+In the above example an assumption is made that ``12345`` is associated to some
+form of ticketing system so to keep things organized an identifier was set of
+that number.
+
+The directory hierarchy for the current job should look like ``/cores/12345``.
+In addition to the processing of core files there is also a ``process log`` contained
+within this directory for each job processed. If multiple jobs for the same identifier
+are issued they are placed within a sub directory marked by the current timestamp
+and the relevant data associated with it.
+
+The last option worth mentioning is for core analyst who are needing to work
+within the core that requires one of the kernel modules loaded during the crash.
+This can be extracted by passing the ``modules`` parameter in the CAS execution
+statement. ``Note`` the ``modules`` parameter is not heavily used but can be
+useful when analyzing filesystem issues and the like.
+
+From this point on CAS will download, process, and email the results of its
+initial analysis to the specified email address. From there further instructions
+are provided in either the email or the ``process log`` on how to access and analyze
+the core.
+
+Analyzing
+---------
+
+Continuing with the previous example the results of CAS processing should be emailed
+and look something similar to::
+
+ Subject: CAS results for 12345
+ Date: Wed, 11 Feb 2009 08:44:37 -0500
+
+ Location: /cores/processed/12345/02.11.09.08.44.19
+ Server: x86_64.cas-server.com
+ Output data:
+ PID: 0 TASK: ffffffff803e9b80 CPU: 0 COMMAND: "swapper"
+ #0 [ffffffff8047a0a0] smp_call_function_interrupt at ffffffff8011d191
+ #1 [ffffffff8047a0b0] call_function_interrupt at ffffffff80110bf5
+ --- <IRQ stack> ---
+ #2 [ffffffff80529f08] call_function_interrupt at ffffffff80110bf5
+ [exception RIP: default_idle+32]
+ RIP: ffffffff8010e7a9 RSP: ffffffff80529fb8 RFLAGS: 00000246
+ RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000018
+ RDX: ffffffff8010e789 RSI: ffffffff803e9b80 RDI: 0000010008001780
+ RBP: 0000000000000000 R8: ffffffff80528000 R9: 0000000000000080
+ R10: 0000000000000100 R11: 0000000000000004 R12: 0000000000000000
+ R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
+ ORIG_RAX: fffffffffffffffa CS: 0010 SS: 0018
+ #3 [ffffffff80529fb8] cpu_idle at ffffffff8010e81c
+
+ PID: 0 TASK: 100f57cb030 CPU: 1 COMMAND: "swapper"
+ #0 [1000107bfa0] smp_call_function_interrupt at ffffffff8011d191
+ #1 [1000107bfb0] call_function_interrupt at ffffffff80110bf5
+ --- <IRQ stack> ---
+ #2 [10001073e98] call_function_interrupt at ffffffff80110bf5
+ [exception RIP: default_idle+32]
+ RIP: ffffffff8010e7a9 RSP: 0000010001073f48 RFLAGS: 00000246
+ RAX: 0000000000000000 RBX: 0000000000000e86 RCX: 0000000000000018
+ RDX: ffffffff8010e789 RSI: 00000100f57cb030 RDI: 00000102000a4780
+ RBP: 0000000000000001 R8: 0000010001072000 R9: 0000000000000040
+ R10: 0000000000000000 R11: 0000000000000008 R12: 0000000000000000
+ R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000
+ ORIG_RAX: fffffffffffffffa CS: 0010 SS: 0018
+ #3 [10001073f48] cpu_idle at ffffffff8010e81c
+
+ PID: 6122 TASK: 101f3658030 CPU: 2 COMMAND: "gfs_quotad"
+ #0 [101f21efb20] start_disk_dump at ffffffffa03183ff
+ #1 [101f21efb50] try_crashdump at ffffffff8014cc1d
+ #2 [101f21efb60] die at ffffffff80111c90
+ #3 [101f21efb80] do_invalid_op at ffffffff80112058
+ #4 [101f21efc40] error_exit at ffffffff80110e1d
+ [exception RIP: do_dlm_lock+366]
+
+ ... snip ...
+
+From this email a ``location`` is provided ``/cores/processed/12345/02.11.09.08.44.19``
+and the server in which further analyzation can be continued ``x86_64.cas-server.com``.
+
+Normally from a support perspective this email should contain enough information
+for a kernel engineer to begin debugging the problem. Assuming more is needed
+the information provided previously will prove beneficial for anyone wishing
+to access this data.
+
+Logging into the stated server and changing into the directory defined several
+files are presented::
+
+ $ pwd
+ /cores/processed/12345/02.11.09.08.44.19
+ $ ls
+ 12345.log crash crash.in crash.out usr vmcore.12345
+
+``12345.log``: contains any informational messages presented during the processing
+of the core. Everything from informational to debug statements are provided here.
+
+``crash``: a script autogenerated to provide an automated way of gathering intial
+data from the coredump. ``Note`` if wanting to use this crash wrapper in a more
+manual approach some alterations to the script need to occur.
+
+crash wrapper in its original form::
+
+ #!/bin/sh
+ /usr/bin/crash \
+ /cores/processed/12345/02.11.09.08.44.19/vmcore.12345 \
+ /cores/processed/12345/02.11.09.08.44.19/usr/*/*/*/*/2.6.9*largesmp/vmlinux \
+ -s < /cores/processed/12345/02.11.09.08.44.19/crash.in
+
+In order to remove automation the redirect from crash.in needs to be removed::
+
+ #!/bin/sh
+ /usr/bin/crash /cores/processed/12345/02.11.09.08.44.19/vmcore.12345 \
+ /cores/processed/12345/02.11.09.08.44.19/usr/*/*/*/*/2.6.9*largesmp/vmlinux
+
+**Alternative to using the crash wrapper**
+
+It is possible to specify the vmlinux and corefile with crash on the command line::
+
+ $ crash /cores/processed/12345/02.11.09.08.44.19/usr/*/*/*/*/2.6.9*largesmp/vmlinux \
+ /cores/processed/12345/02.11.09.08.44.19/vmcore.12345
+
+``crash.in``: a list of commands to be read into crash during the automated
+analysis::
+
+ bt -a >>/cores/processed/12345/02.11.09.08.44.19/crash.out
+ sys >>/cores/processed/12345/02.11.09.08.44.19/crash.out
+ log >>/cores/processed/12345/02.11.09.08.44.19/crash.out
+ mod >>/cores/processed/12345/02.11.09.08.44.19/crash.out
+ exit
+
+``crash.out``: output of initial crash analysis and the same data which
+is sent in an email if defined.
+
+``usr``: directory structure from the extraction of the vmlinux file
+from the associated kernel-debuginfo rpm for use within crash::
+
+ /cores/processed/12345/02.11.09.08.44.19/
+ usr/lib/debug/lib/modules/2.6.9-78.18.ELlargesmp/vmlinux
+
+``vmcore.12345``: corefile from which was either defined or extracted from
+a compressed archive during CAS initialization.
+
+Troubleshooting
+---------------
+
+Some of the major problems that arise when using CAS usually boils down to some
+improper usage of the compression and archiving tools.
+
+When compressing a core which may need to be sent over the network to a CAS server
+one of the proper ways to do so is::
+
+ $ tar cvjf vmcore.12345.tar.bz2 vmcore.12345
+
+Other various ways of compressing archive are as follows::
+
+ $ tar cvzf vmcore.tar.gz vmcore
+ $ gzip vmcore
+ $ bzip2 vmcore
+
+``Note``: please do not double compress or CAS will fail.
+
+Another issue, which isn't primarily a fault of CAS, are
+incomplete or corrupted cores. If either of these occur
+there is a chance that CAS will not be able to process
+the data needed to associate a debug kernel or do any
+sort of automated analysis. Unfortunately, there is not
+much that can be done to resolve these sort of issues
+other than verifying that the process which happens when
+a system coredump and when that dump reaches the
+system specified for retrieval is solid and are seeing
+no errors.
+
+Resources
+=========
+
+* `CAS Wiki <http://fedorahosted.org/cas>`_
+* `CAS FAQ <https://fedorahosted.org/cas/wiki/CasFAQ>`_
+* `Mailing list <https://fedorahosted.org/mailman/listinfo/cas>`_
+* `Upstream releases <https://fedorahosted.org/releases/c/a/cas/>`_
+* Checkout latest from Git, ``git clone git://git.fedorahosted.org/cas.git``
diff --git a/lib/cas/db.py b/lib/cas/db.py
index 6f7e942..9c0cac6 100644
--- a/lib/cas/db.py
+++ b/lib/cas/db.py
@@ -1,14 +1,24 @@
-""" storage container """
+import MySQLdb
-class Core(object):
- def __init__(self):
- self.timestamp = None
- self.debugKernel = None
+class CasStorage(object):
+ def __init__(self, sql_server, username, password, database):
+ """ setup database connection and return db cursor for
+ traversing database """
+ self.db = database
+ self.sql_server = sql_server
+ self.username = username
+ self.password = password
+ self.cursor = None
-class CasServer(object):
- """ Handles the definition of cas servers for each architecture
- needing to be used for analyzing cores
- """
- def __init__(self):
- self.hostname = None
- self.architecture = None
+ def connect(self):
+ """ execute connection """
+ db_connect = MySQLdb.connect(host=self.sql_server,
+ user=self.username,
+ passwd=self.password,
+ db=self.db)
+ self.cursor = db_connect.cursor()
+ return
+
+ def getJobs(self):
+ self.cursor.execute("SELECT * FROM cas_jobs")
+ return self.cursor.fetchall()
15 years, 1 month