Skip to content

Instantly share code, notes, and snippets.

@vvikramjhu
Forked from anonymous/sql errors
Last active December 25, 2015 04:39
Show Gist options
  • Save vvikramjhu/6919063 to your computer and use it in GitHub Desktop.
Save vvikramjhu/6919063 to your computer and use it in GitHub Desktop.
try:
curs.execute('INSERT INTO nvd_cpe\
(cpe, cpe_part, cpe_vendor, cpe_product, cpe_version, cpe_update, \
cpe_edition, cpe_language) VALUES (?, ?, ?, ?, ?, ?, ?, ?)', \
(nvd_cpe, nvd_cpe_variables[2], nvd_cpe_variables[3], nvd_cpe_variables[4], \
nvd_cpe_variables[5], nvd_cpe_variables[6], nvd_cpe_variables[7],
nvd_cpe_variables[8]))
except Exception as e:
print e
error : (2036, 'Using unsupported buffer type: 12885492 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885364 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885300 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12884724 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885524 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885492 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885364 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885300 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12884724 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885524 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885492 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885364 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885300 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12884724 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885524 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885492 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885364 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885300 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12884724 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885524 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885492 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885364 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12885300 (parameter: 3)', None)
(2036, 'Using unsupported buffer type: 12884724 (parameter: 3)', None)
#!/usr/bin/env python
# Important : for this file to work under windows search for "mv" and change to "move". then change the "/" in lines
# 283 and 291 to "\\". Thats it !!
#imports
import os
import sys
import datetime
import glob
import oursql
#get the timestamp of when the program started
from time import gmtime, strftime
import re
TIME_STARTED = strftime("%a_%d_%b_%Y_%H_%M_%S_GMT", gmtime())
# "Constants"
#Python 2.5 doesn't have real constants
#define the config file
global CONFIG_FILE
global final_output_buffer
global cve_to_score
global releases
global products_to_cve
global product_to_release
global cve_to_date
global output_file
global max_score
global product_to_release
global aliases
CONFIG_FILE = {}
final_output_buffer = {}
cve_to_score = {}
releases = {}
products_to_cve = {}
product_to_release = {}
cve_to_date = {}
output_file = open("cpe2.csv", "w")
max_score = 0.0
product_to_release = {}
aliases = {}
#split the and append to a timestamp to the basename of the file
def versioned_filename(filename):
global TIME_STARTED
if filename.count(".") == 0:
return filename + "_" + TIME_STARTED
else:
temp_filename = filename.rsplit(".", 1)
return temp_filename[0] + "_" + TIME_STARTED + "." + temp_filename[1]
def read_config_file(filename):
global CONFIG_FILE
#TODO check validity of filenames
default_logfile_filename = "CVE_Importer_Log.txt"
default_logfile_location = "."
temp = {}
temp["logfile"] = versioned_filename(default_logfile_location + "/" + default_logfile_filename)
#TODO check that values 1 or 2
temp["cpe_version"] = 2
#TODO check that values are "download" and "local"
temp["download_or_local"] = "download"
#TODO check for years <2002, greater than current year, and check to make sure it is an integer
#today=datetime.date.today()
#current_year=today.year
temp["local_dir"] = "Wed_15_Apr_2009_14_03_12_GMT"
# TODO change back to all the years
temp["years_to_download"] = [2013]
temp["backup_delete"] = "delete"
CONFIG_FILE = temp
def parse_alias():
global aliases
alias = {}
alias_files = glob.glob('*.alias')
for alias_file in alias_files:
alias_data = open(alias_file).readlines()
alias[alias_data[0]] = alias_data[1:]
for key, value in alias.iteritems():
key = key.strip()
key = key.lower()
key = key.replace(" ", "_")
for x in value:
x = x.strip()
x = x.lower()
x = x.replace(" ", "_")
aliases[x] = key
## This method takes as input the xml file from the government site and parses it for information. The information is
## necessary for the cpe and cve tables in the database. The xml is made of <event>. Each event tag is taken up one at
## a time and parsed
## The items ( elements in the xml) parsed and obtained below are as follows
## a) date -- line 131 b) score -- line 134 c) cve_name line 137 d) vuln_summary 144
## e) vuln_config_name -- 164 f) release_list 187 g) product_name 193
def parse_xml(filename, version):
global final_output_buffer
global output_file_buffer
global cve_to_score
global releases
global products_to_cve
global product_to_release
global cve_to_date
global output_file
global max_score
global release_list
global aliases
# TODO remove line below Open nvd_cpe_sql file for writing
cpe_sql = open("nvd_cpe_sql.sql", 'w')
# Open a new database connection so its nvd_cpe can be filled directly
conn = oursql.connect(host='127.0.0.1', user='root', passwd='ernest', db='esm', port=3306)
curs = conn.cursor(oursql.Cursor)
if version == 2:
cpe_dict = {}
data = open(filename).read()
entry_list = data.split("<entry")
xml_header = entry_list[0]
#TODO parse xml_header
entry_list.pop(0)
for entry in entry_list:
date = ""
cpe_count = 0
score = "-"
is_rejected = False
is_disputed = False
has_vuln_config = True
if entry.count("<cvss:generated-on-datetime>") > 0:
temp_date = entry.split("<cvss:generated-on-datetime>")[1]
temp_date = temp_date.split("</cvss:generated-on-datetime>")[0]
cvss_datetime = temp_date
date = temp_date.split("T")[0]
if entry.count("<cvss:score>") > 0:
temp_score = entry.split("<cvss:score>")[1]
score = temp_score.split("</cvss:score>")[0]
if entry.count("<cvss:access-vector>") > 0:
temp_vector = entry.split("<cvss:access-vector>")[1]
access_vector = temp_vector.split("</cvss:access-vector>")[0]
if entry.count("<cvss:access-complexity>") > 0:
temp_complexity = entry.split("<cvss:access-complexity>")[1]
access_complexity = temp_complexity.split("</cvss:access-complexity>")[0]
if entry.count("<cvss:authentication>") > 0:
temp_authentication = entry.split("<cvss:authentication>")[1]
authentication = temp_authentication.split("</cvss:authentication>")[0]
if entry.count("<cvss:confidentiality-impact>") > 0:
temp_confidentiality_impact = entry.split("<cvss:confidentiality-impact>")[1]
confidentiality_impact = temp_confidentiality_impact.split("</cvss:confidentiality-impact>")[0]
if entry.count("<cvss:integrity-impact>") > 0:
temp_integrity_impact = entry.split("<cvss:integrity-impact>")[1]
integrity_impact = temp_integrity_impact.split("</cvss:integrity-impact>")[0]
if entry.count("<cvss:availability-impact>") > 0:
temp_availability_impact = entry.split("<cvss:availability-impact>")[1]
availability_impact = temp_availability_impact.split("</cvss:availability-impact>")[0]
if entry.count("<cvss:source>") > 0:
temp_source = entry.split("<cvss:source>")[1]
cvss_source = temp_source.split("</cvss:source>")[0]
if entry.count("<vuln:published-datetime>") > 0:
temp_published_datetime = entry.split("<vuln:published-datetime>")[1]
cve_published = temp_published_datetime.split("</vuln:published-datetime>")[0]
if entry.count("<vuln:last-modified-datetime>") > 0:
temp_last_modified_datetime = entry.split("<vuln:last-modified-datetime>")[1]
cve_last_modified = temp_last_modified_datetime.split("</vuln:last-modified-datetime>")[0]
#def cve_name
# match id="CVE-..." so that correct id is captured for parsing
match_full_id = re.search('(id="CVE.*")', entry)
if (match_full_id.group(0)):
temp_cve_name = (match_full_id.group(0)).split("id=\"")[1]
cve_name = temp_cve_name.split("\"")[0]
#if len(temp_cve_name.split("\"")[1].strip())>3:
# print "Possible error in parsing "+cve_name+". Expected no additional data after cve name in xml tag"
# pass
#def vuln_summary
vuln_summary = ""
temp_vuln_summary = entry.split("<vuln:summary>")[1]
vuln_summary = temp_vuln_summary.split("</vuln:summary>")[0]
if len(temp_vuln_summary.split("\"")) > 1 and len(temp_vuln_summary.split("\"")[1].strip()) > 3:
#print "Possible error in parsing "+vuln_summary+". Expected no additional data after vuln:summary in xml tag"
pass
if len(vuln_summary) < 5:
#print "Possible error in "+cve_name+" vuln:summary is blank."
pass
if vuln_summary == "":
#print "Possible error in "+cve_name+" vuln:summary Not Set"
pass
if vuln_summary.count("** DISPUTED **") > 0:
is_disputed = True
if vuln_summary.count("** REJECT **") > 0:
is_rejected = True
else:
#def vuln_config
if entry.count("vuln:vulnerable-configuration") < 1:
has_vuln_config = False
else:
vuln_config_name = ""
temp_vuln_config = entry.split("<vuln:vulnerable-configuration id=\"")[1]
vuln_config_name = temp_vuln_config.split("\"")[0]
if vuln_config_name != "http://nvd.nist.gov":
#print "Possible error in "+cve_name+" vuln_config_name. Was expecting \
# "http://nvd.nist.gov\". Found "+vuln_config_name
pass
if len(temp_vuln_summary.split("\"")) > 1 and len(temp_vuln_summary.split("\"")[1].strip()) > 3:
pass
#print "Possible error in parsing "+vuln_summary+". Expected no \
# additional data after vuln:summary in xml tag"
if vuln_config_name == "":
pass
#print "Possible error in "+cve_name+" vuln_config_name Not Set."
#parse product, vendor, release
#release_list=[]
cve_to_date[cve_name] = date
# Cpe calculations below
product_name = ""
if entry.count("<vuln:vulnerable-software-list>") > 0:
temp_release_list = entry.split("<vuln:vulnerable-software-list>")[1]
temp_release_list = temp_release_list.split("</vuln:vulnerable-software-list>")[0]
is_first = True
for release in temp_release_list.split("<vuln:product>"):
if len(release.split("</vuln:product>")[0].strip()) > 0:
if is_first:
is_first = False
release_list = [release.split("</vuln:product>")[0]]
cve_to_score[cve_name] = score
else:
release_list.append(release.split("</vuln:product>")[0])
temp_product = release.split("</vuln:product>")[0]
# Get nvd_cpe = the whole cpe string. this goes into db
nvd_cpe = temp_product
nvd_cpe_variables = temp_product.split(":")
# the second word in variables still has the preceding / eg /a . The / needs to be removed
nvd_cpe_variables[1] = (nvd_cpe_variables[1]).split("/")[1]
temp_product = temp_product.split(":")
length_temp_product = len(temp_product)
product_name = temp_product[2] + " " + temp_product[3]
#get all variables necessary to create nvd_cpe_sql.sql file
# Append nvd_cpe to list of variables
nvd_cpe_variables.insert(0, nvd_cpe)
# Fill up with null values in the end for nvd_cpe_variables if list not 8 words long
nvd_cpe_variables.extend([None] * (8 - length_temp_product))
# Append only those values which are present Leave the rest blank
#nvd_cpe_part = temp_product[1]
#nvd_cpe_vendor = temp_product[2]
#nvd_cpe_product = temp_product[3]
#nvd_cpe_version = temp_product[4]
#nvd_cpe_update = temp_product[5]
#nvd_cpe_edition = temp_product[6]
#nvd_cpe_language = temp_product[7]
#nvd_cpe_variables.extend([nvd_cpe, nvd_cpe_vendor, nvd_cpe_product, nvd_cpe_version,
# nvd_cpe_update, nvd_cpe_edition, nvd_cpe_language])
# fill up database but catch exception
#try:
# curs.execute('INSERT INTO nvd_cpe\
# (cpe, cpe_part, cpe_vendor, cpe_product, cpe_version, cpe_update, \
# cpe_edition, cpe_language) VALUES (?, ?, ?, ?, ?, ?, ?, ?)', \
# (nvd_cpe, nvd_cpe_variables[2], nvd_cpe_variables[3], nvd_cpe_variables[4], \
# nvd_cpe_variables[5], nvd_cpe_variables[6], nvd_cpe_variables[7],
# nvd_cpe_variables[8]))
#except Exception as e:
# print e
#try:
# curs.execute("INSERT INTO nvd_cpe(cpe) VALUES (\"" + nvd_cpe + "\");")
#except Exception as e:
# print e
try:
curs.execute('INSERT INTO nvd_cpe(cpe) VALUES (?)', (nvd_cpe, ))
except Exception as e:
print e
temp_product_alias = temp_product[2] + ":" + temp_product[3]
#if aliases.has_key(temp_product_alias):
# #print temp_product_alias
# product_name=aliases[temp_product_alias].split(":")[0]+" "+aliases[temp_product_alias].split(":")[1]
if products_to_cve.has_key(product_name):
products_to_cve[product_name].append(cve_name)
else:
products_to_cve[product_name] = [cve_name]
# Fill up nvd_cve
#try:
# curs.execute('INSERT INTO nvd_cve\
# (cve, cvss_score, cvss_access_vector, cvss_access_complexity, \
# cvss_authentication, cvss_confidentiality_impact, cvss_integrity_impact, \
# cvss_availability_impact, cvss_source, cvss_generated_on_datetime, cve_summary, \
# cve_published, cve_last_modified, cpe)'
# ' VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', \
# (cve_name, score, access_vector, access_complexity, authentication, \
# confidentiality_impact, integrity_impact, availability_impact, \
# cvss_source, cvss_datetime, vuln_summary, cve_published, cve_last_modified, \
# nvd_cpe))
#except Exception as e:
# print (e)
for release in release_list:
product_name = release.split(":")[2] + " " + release.split(":")[3]
if product_to_release.has_key(product_name):
if product_to_release[product_name].count(release) == 0:
product_to_release[product_name].append(release)
else:
product_to_release[product_name] = [release]
if cpe_dict.has_key(release):
cpe_dict[release].append(cve_name)
else:
cpe_dict[release] = [cve_name]
output = ""
output = output + cve_name
output = output + "," + score
if len(release_list) > 0:
cpe_count = len(release_list)
pass
output = output + "," + str(cpe_count)
if is_rejected:
output = output + ",REJECTED"
else:
output = output + ","
if is_disputed:
output = output + ",Disputed"
else:
output = output + ","
releases = " ".join(release_list)
output = output + ",\"" + releases.strip() + "\""
output = output + ",\"" + vuln_summary.replace("\"", "'") + "\""
#if has_vuln_config==False:
# print "No Vuln Config Present"
output_file_buffer.append(output)
elif version == 1:
#TODO import previous code
pass
else:
# this should be a log message
print "Unknown Version"
sys.exit()
# This method creates the name of the file which will be downloaded using wget. It is used in the main method
def make_wget_string(version, year):
string = "wget "
if version == 2:
string = string + "http://static.nvd.nist.gov/feeds/xml/cve/nvdcve-2.0-" + str(year)
elif version == 1:
string = string + "http://nvd.nist.gov/download/nvdcve-" + str(year)
else:
# this should be a log message
print "Unknown Version"
sys.exit()
return string + ".xml"
# This function is called from the parse_xml function .It will fill up the nvd_cpe_sql file with sql insert statements
# Based on the nvd_cpe table currently present. It will later be modified
#def fill_cpe_table(variables, curs):
# # create all insert statements and put into the nvd_cpe_sql.sql file
# cpe_file.write\
# ("\n" + "INSERT INTO nvd_cpe(cpe, cpe_part, cpe_vendor, cpe_product, cpe_version, cpe_update, cpe_language)\
# VALUES (\"" + variables[0] + "\", \"" + variables[1] + "\", \
# \"" + variables[2] + "\", \"" + variables[3] + "\", \"" + variables[4] + "\", \
# \"" + variables[5] + "\", \"" + variables[6] + "\");")
#
def make_final_outfile():
global product_to_release
global products_to_cve
global cve_to_score
global cve_to_date
global max_score
global aliases
output_file_buffer = [
"OSS,Vendor,Product,Alias,Releases,Release Count, Total CVEs,Average CVE Score, Risk/Time Score, CVE List,Public Score"]
for key, value in product_to_release.iteritems():
try:
cve_list = set(products_to_cve[key])
running_cve_score = 0
running_risk_time_score = 0
cve_array = []
for cve in cve_list:
score = float(cve_to_score[cve])
cve_name = cve
date = cve_to_date[cve_name].split("-")
days_since_first_cve = datetime.date(int(date[0]), int(date[1]), int(date[2])) - datetime.date(1987, 12,
31)
running_cve_score += score
running_risk_time_score += (days_since_first_cve.days) * (days_since_first_cve.days) * score
if running_risk_time_score > max_score:
max_score = running_risk_time_score
cve_array.append(cve_name)
joined_cve_array = ":".join(cve_array)
output_string = "No," + key.split(" ")[0] + "," + key
if aliases.has_key(key.replace(" ", ":")):
output_string = output_string + "," + aliases[key.replace(" ", ":")].replace(":", " ") + ","
else:
output_string = output_string + ",,"
output_string = output_string + str(product_to_release[key]).replace(",", " ").replace("'", "").strip(
"[]") + "," + str(len(cve_list)) + "," + str(running_cve_score) + "," + str(
running_cve_score / len(cve_list)) + "," + str(running_risk_time_score) + "," + joined_cve_array
output_file_buffer.append(output_string)
#print output_string
except KeyError:
pass
print ""
print "Writing output file: Risk_Report.csv"
output_file = open("Risk_Report.csv", "w")
sql_output_file = open("Risk_Report_SQL.sql", "w")
sql_output_buffer = []
final_output_buffer = []
first_time_2 = True
for line in output_file_buffer:
if first_time_2:
first_time_2 = False
final_output_buffer.append(line)
else:
temp = line.rsplit(",")
temp_index = len(temp) - 4
final_output_buffer.append(line + "," + str(float(temp[temp_index]) / max_score))
sql_output_buffer.append("INSERT INTO vendors (cpe_vendor_name) VALUES (\"" + temp[1] + "\");")
sql_output_buffer.append(
"INSERT INTO products (cpe_vendor_name, cpe_product_name) VALUES (\"" + temp[1] + "\",\"" +
temp[2].split(" ")[1] + "\");")
for release in temp[4].split(" "):
if release.count(":") > 3:
sql_output_buffer.append(
"INSERT INTO releases (cpe_vendor_name, cpe_product_name, cpe_release_name) VALUES (\"" + temp[
1] + "\",\"" + temp[2].split(" ")[1] + "\",\"" + release.split(":")[4] + "\");")
output_file.write("\n".join(final_output_buffer))
output_file.close()
sql_output_file.write("\n".join(sql_output_buffer))
sql_output_file.close()
print "Prelim File Done."
aliased_file_source = open("Risk_Report.csv", "r")
new_alias_file = open("Aliased_Risk_Report.csv", "w")
first = True
alias_buffer = []
#products_final=open("product_final.txt","r")
#vendors_final=open("vendor_final.txt","r")
releases_final = open("release_final.txt", "r")
groups_final = open("groups.txt", "r")
#products=products_final.readlines()
releases = releases_final.readlines()
groups = groups_final.readlines()
#vendors=vendors_final.readlines()
#prods={}
rels = {}
grps = {}
#vends={}
#for prod in products:
# temp=prod.split("***")
# if len(temp[0])>1:
# prods[temp[0]]=temp[1]
for rel in releases:
temp = rel.split("***")
if len(temp[0]) > 1:
rels[temp[0].strip(":")] = temp[1]
for group in groups:
temp = group.split("***")
if len(temp[0]) > 1:
if grps.has_key(temp[0].strip(":")):
grps[temp[0].strip(":")] = grps[temp[0].strip(":")] + " " + temp[1].strip()
else:
grps[temp[0].strip(":")] = temp[1].strip()
#for vend in vendors:
# temp=vend.split("***")
# if len(temp[0])>1:
# vends[temp[0]]=temp[1]
release = ""
alias_buffer = [
"OSS,Vendor,Product,Notes,Releases,Release Count, Total CVEs,Average CVE Score, Risk/Time Score, CVE List,Public Score,Release Alias,Group"]
for line in aliased_file_source:
temp_alias_buffer = []
if first:
first = False
else:
line_parts = line.split(",")
#1=vendor,2=release,3=release, strip first 7 characters
temp = line_parts[4]
release = ""
group = ""
#temp_rel_1={}
#temp_group_1={}
for cpe in temp.split(" "):
if len(cpe[7:]) > 6:
temp_for_grouping = cpe[7:].split(":")
for_grouping = temp_for_grouping[0] + ":" + temp_for_grouping[1]
if rels.has_key(cpe[7:]):
#temp_rel_1[cpe[7:]]=1
#if temp_rel_1.has_key(cpe[7:]):
#pass
#else:
# release=release+" "+release.strip()+cpe[7:]+"-'"+rels[cpe[7:]].strip()+"'"
release = release.strip() + cpe[7:] + "-'" + rels[cpe[7:]].strip() + "'"
if grps.has_key(for_grouping):
group = group.strip() + cpe[7:] + "-'" + grps[cpe[7:]].strip() + "'"
if grps.has_key(for_grouping):
group = group.strip() + cpe[7:] + "-'" + grps[cpe[7:]].strip() + "'"
temp_alias_buffer.append(line.strip() + ",\"" + release.strip() + "\",\"" + group.strip())
alias_buffer.append("".join(temp_alias_buffer))
alias_buffer.append("\"\n")
new_alias_file.write(" ".join(alias_buffer))
new_alias_file.close()
print "Aliased File Done"
def main(argv):
global CONFIG_FILE
global final_output_buffer
global output_file_buffer
global cve_to_score
global releases
global products_to_cve
global product_to_release
global cve_to_date
global output_file
global output_file_buffer
global product_to_release
output_file_buffer = ["CVE, Score, CPE Count, Rejected, Disputed, Releases, Summary"]
print "Deleting any partial NVD XML files (nvdcve-2*.xml)"
os.system("del nvdcve-2*.xml")
global TIME_STARTED
if CONFIG_FILE["download_or_local"] == "download":
for year in CONFIG_FILE["years_to_download"]:
command_line_string = make_wget_string(CONFIG_FILE["cpe_version"], year)
os.system(command_line_string)
print "Making directory " + TIME_STARTED
os.system("mkdir \"%s\"" % (TIME_STARTED))
print "Moving XML files to " + TIME_STARTED
os.system("move nvdcve-2*.xml \"%s\"" % (TIME_STARTED))
for year in CONFIG_FILE["years_to_download"]:
parse_xml(TIME_STARTED + "\\" + "nvdcve-2.0-" + str(year) + ".xml", 2)
elif CONFIG_FILE["download_or_local"] == "local":
TIME_STARTED = CONFIG_FILE["local_dir"]
if CONFIG_FILE["cpe_version"] == 1:
os.system(
"copy %s %s" % (CONFIG_FILE["xml_location"].rstrip("/").rstrip("\\") + "\\" + "nvdcve-20??.xml", "."))
elif CONFIG_FILE["cpe_version"] == 2:
for year in CONFIG_FILE["years_to_download"]:
parse_xml(TIME_STARTED + "\\" + "nvdcve-2.0-" + str(year) + ".xml", 2)
else:
print("Unknown option in configuration file for download_or_local")
sys.exit()
if __name__ == "__main__":
parse_alias()
read_config_file("not done yet")
main(sys.argv)
output_file.write("\n".join(output_file_buffer))
output_file.close()
make_final_outfile()
sys.exit()
#code : did it withough the try catch
print 'here is the nvd_cpe var: ' + repr(nvd_cpe)
curs.execute('INSERT INTO nvd_cpe(cpe) VALUES (?)', (nvd_cpe, ))
break
Ooutput :
C:\Documents and Settings\Varun\My Documents\Airius rough ( for testing)>python
main-test_linux.py
Deleting any partial NVD XML files (nvdcve-2*.xml)
Could Not Find C:\Documents and Settings\Varun\My Documents\Airius rough ( for t
esting)\nvdcve-2*.xml
SYSTEM_WGETRC = c:/progra~1/wget/etc/wgetrc
syswgetrc = C:\Program Files\GnuWin32/etc/wgetrc
--2013-10-10 11:28:31-- http://static.nvd.nist.gov/feeds/xml/cve/nvdcve-2.0-201
3.xml
Resolving static.nvd.nist.gov... 129.6.13.18
Connecting to static.nvd.nist.gov|129.6.13.18|:80... connected.
HTTP request sent, awaiting response... 200 OK
Length: 28233001 (27M) [text/xml]
Saving to: `nvdcve-2.0-2013.xml'
100%[======================================>] 28,233,001 2.11M/s in 13s
2013-10-10 11:28:44 (2.08 MB/s) - `nvdcve-2.0-2013.xml' saved [28233001/28233001
]
Making directory Thu_10_Oct_2013_15_28_31_GMT
Moving XML files to Thu_10_Oct_2013_15_28_31_GMT
C:\Documents and Settings\Varun\My Documents\Airius rough ( for testing)\nvdcve-
2.0-2013.xml
here is the nvd_cpe var: 'cpe:/a:microsoft:.net_framework:1.1:sp1'
Traceback (most recent call last):
File "main-test_linux.py", line 551, in <module>
main(sys.argv)
File "main-test_linux.py", line 534, in main
parse_xml(TIME_STARTED + "\\" + "nvdcve-2.0-" + str(year) + ".xml", 2)
File "main-test_linux.py", line 271, in parse_xml
curs.execute('INSERT INTO nvd_cpe(cpe) VALUES (?)', (nvd_cpe, ))
File "cursor.pyx", line 122, in oursql.Cursor.execute (oursqlx\oursql.c:15747)
File "statement.pyx", line 409, in oursql._Statement.execute (oursqlx\oursql.c
:10273)
File "util.pyx", line 91, in oursql._do_warnings_query (oursqlx\oursql.c:3577)
oursql.CollatedWarningsError: (None, 'query caused warnings', [(<class 'oursql.W
arning'>, (u"Incorrect decimal value: '' for column '' at row -1", 1366L))])
C:\Documents and Settings\Varun\My Documents\Airius rough ( for testing)>
#This works :
try:
curs.execute("INSERT INTO nvd_cpe(cpe) VALUES (\"" + nvd_cpe + "\");")
except Exception as e:
print e
#This does not work ( using single quotes): error Unknown column nvd_cpe in "field list"
try:
curs.execute('INSERT INTO nvd_cpe(cpe) VALUES (\"" + nvd_cpe + "\");')
except Exception as e:
print e
#This does not work : (None, '1 parameters expected, 35 given', None)
# (None, '1 parameters expected, 30 given', None)
# (None, '1 parameters expected, 34 given', None)
# (None, '1 parameters expected, 31 given', None)
try:
curs.execute('INSERT INTO nvd_cpe(cpe) VALUES (?)', nvd_cpe)
except Exception as e:
print e
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment