Improve Python code

This commit is contained in:
Ayaz Salikhov 2017-06-04 22:51:48 +02:00 committed by Daniel Marjamäki
parent 62c22a8e06
commit 2dd6168258
23 changed files with 456 additions and 370 deletions

View File

@ -1,4 +1,4 @@
#/usr/bin/python
#!/usr/bin/env python
#
# Cert: Some extra CERT checkers
#
@ -43,11 +43,11 @@ def isLocalUnpackedStruct(arg):
def isBitwiseOp(token):
return token and (token.str in ['&', '|', '^'])
return token and (token.str in {'&', '|', '^'})
def isComparisonOp(token):
return token and (token.str in ['==', '!=', '>', '>=', '<', '<='])
return token and (token.str in {'==', '!=', '>', '>=', '<', '<='})
# EXP42-C
@ -66,13 +66,13 @@ def exp42(data):
if token.astOperand1.str == 'memcmp' and (isLocalUnpackedStruct(arg1) or isLocalUnpackedStruct(arg2)):
reportError(
token, 'style', 'EXP42-C Comparison of struct padding data (fix either by packing the struct using \'#pragma pack\' or by rewriting the comparison)')
token, 'style', "EXP42-C Comparison of struct padding data " +
"(fix either by packing the struct using '#pragma pack' or by rewriting the comparison)")
# EXP46-C
# Do not use a bitwise operator with a Boolean-like operand
# int x = (a == b) & c;
def exp46(data):
for token in data.tokenlist:
if isBitwiseOp(token) and (isComparisonOp(token.astOperand1) or isComparisonOp(token.astOperand2)):

View File

@ -37,6 +37,7 @@ class Directive:
self.file = element.get('file')
self.linenr = element.get('linenr')
class ValueType:
"""
ValueType class. Contains (promoted) type information for each node in the AST.
@ -70,10 +71,11 @@ class ValueType:
self.typeScope = IdMap[self.typeScopeId]
def isIntegral(self):
return self.type == 'bool' or self.type == 'char' or self.type == 'short' or self.type == 'int' or self.type == 'long' or self.type == 'long long'
return self.type in {'bool', 'char', 'short', 'int', 'long', 'long long'}
def isFloat(self):
return self.type == 'float' or self.type == 'double' or self.type == 'long double'
return self.type in {'float', 'double', 'long double'}
class Token:
"""
@ -129,7 +131,6 @@ class Token:
@endcode
"""
Id = None
str = None
next = None
@ -302,7 +303,6 @@ class Scope:
self.nestedIn = IdMap[self.nestedInId]
class Function:
"""
Information about a function
@ -391,7 +391,6 @@ class Variable:
self.typeEndToken = IdMap[self.typeEndTokenId]
class ValueFlow:
"""
ValueFlow::Value class
@ -404,7 +403,6 @@ class ValueFlow:
values Possible values
"""
Id = None
values = None
@ -438,7 +436,6 @@ class ValueFlow:
self.values.append(ValueFlow.Value(value))
class Configuration:
"""
Configuration class
@ -619,20 +616,16 @@ class CppcheckData:
tok = Token(node)
tok.file = files[int(node.get('fileIndex'))]
self.rawTokens.append(tok)
for i in range(len(self.rawTokens)):
if i > 0:
self.rawTokens[i].previous = self.rawTokens[i-1]
if i + 1 < len(self.rawTokens):
self.rawTokens[i].next = self.rawTokens[i+1]
for i in range(len(self.rawTokens) - 1):
self.rawTokens[i + 1].previous = self.rawTokens[i]
self.rawTokens[i].next = self.rawTokens[i + 1]
# root is 'dumps' node, each config has its own 'dump' subnode.
for cfgnode in data.getroot():
if cfgnode.tag=='dump':
if cfgnode.tag == 'dump':
self.configurations.append(Configuration(cfgnode))
def parsedump(filename):
"""
parse a cppcheck dump file
@ -640,7 +633,6 @@ def parsedump(filename):
return CppcheckData(filename)
def astIsFloat(token):
"""
Check if type of ast node is float/double
@ -651,9 +643,7 @@ def astIsFloat(token):
if token.str == '.':
return astIsFloat(token.astOperand2)
if '+-*/%'.find(token.str) == 0:
if True == astIsFloat(token.astOperand1):
return True
return astIsFloat(token.astOperand2)
return astIsFloat(token.astOperand1) or astIsFloat(token.astOperand2)
if not token.variable:
# float literal?
if token.str[0].isdigit():
@ -682,6 +672,7 @@ class CppCheckFormatter(argparse.HelpFormatter):
return text[2:].splitlines()
return argparse.HelpFormatter._split_lines(self, text, width)
def ArgumentParser():
"""
Returns an argparse argument parser with an already-added
@ -697,6 +688,7 @@ def ArgumentParser():
"Pre-defined templates: gcc, vs, edit")
return parser
def reportError(template, callstack=[], severity='', message='', id=''):
"""
Format an error message according to the template.
@ -715,7 +707,7 @@ def reportError(template, callstack=[], severity='', message='', id=''):
elif template == 'edit':
template = '{file} +{line}: {severity}: {message}'
# compute 'callstack}, {file} and {line} replacements
stack = ' -> '.join(['[' + f + ':' + str(l) + ']' for (f, l) in callstack])
stack = ' -> '.join('[' + f + ':' + str(l) + ']' for (f, l) in callstack)
file = callstack[-1][0]
line = str(callstack[-1][1])
# format message

View File

@ -1,4 +1,4 @@
#/usr/bin/python
#!/usr/bin/env python
#
# Locate casts in the code
#
@ -6,7 +6,7 @@
import cppcheckdata
import sys
messages = []
messages = set()
for arg in sys.argv[1:]:
print('Checking ' + arg + '...')
@ -35,6 +35,6 @@ for arg in sys.argv[1:]:
msg = '[' + token.file + ':' + str(
token.linenr) + '] (information) findcasts.py: found a cast\n'
if not msg in messages:
messages.append(msg)
if msg not in messages:
messages.add(msg)
sys.stderr.write(msg)

View File

@ -1,4 +1,4 @@
#/usr/bin/python
#!/usr/bin/env python
#
# MISRA C 2012 checkers
#
@ -15,12 +15,13 @@ import cppcheckdata
import sys
import re
ruleTexts={}
ruleTexts = {}
VERIFY = False
VERIFY_EXPECTED = []
VERIFY_ACTUAL = []
def reportError(location, num1, num2):
if VERIFY:
VERIFY_ACTUAL.append(str(location.linenr) + ':' + str(num1) + '.' + str(num2))
@ -28,11 +29,13 @@ def reportError(location, num1, num2):
errmsg = None
num = num1 * 100 + num2
if num in ruleTexts:
errmsg = ruleTexts[num] + ' [misra-c2012-'+str(num1)+'.'+str(num2)+']'
errmsg = ruleTexts[num] + ' [misra-c2012-' + str(num1) + '.' + str(num2) + ']'
else:
errmsg = 'misra rule ' + str(num1) + '.' + str(num2) + ' violation (use --rule-texts=<file> to get proper output)'
errmsg = 'misra rule ' + str(num1) + '.' + str(num2) +\
' violation (use --rule-texts=<file> to get proper output)'
sys.stderr.write('[' + location.file + ':' + str(location.linenr) + '] ' + errmsg + '\n')
def simpleMatch(token, pattern):
for p in pattern.split(' '):
if not token or token.str != p:
@ -48,38 +51,41 @@ LONG_BIT = 0
LONG_LONG_BIT = 0
POINTER_BIT = 0
KEYWORDS = ['auto',
'break',
'case',
'char',
'const',
'continue',
'default',
'do',
'double',
'else',
'enum',
'extern',
'float',
'for',
'goto',
'if',
'int',
'long',
'register',
'return',
'short',
'signed',
'sizeof',
'static',
'struct',
'switch',
'typedef',
'union',
'unsigned',
'void',
'volatile',
'while']
KEYWORDS = {
'auto',
'break',
'case',
'char',
'const',
'continue',
'default',
'do',
'double',
'else',
'enum',
'extern',
'float',
'for',
'goto',
'if',
'int',
'long',
'register',
'return',
'short',
'signed',
'sizeof',
'static',
'struct',
'switch',
'typedef',
'union',
'unsigned',
'void',
'volatile',
'while'
}
def getEssentialType(expr):
if not expr:
@ -87,11 +93,11 @@ def getEssentialType(expr):
if expr.variable:
typeToken = expr.variable.typeStartToken
while typeToken and typeToken.isName:
if typeToken.str in ['char', 'short', 'int', 'long', 'float', 'double']:
if typeToken.str in {'char', 'short', 'int', 'long', 'float', 'double'}:
return typeToken.str
typeToken = typeToken.next
elif expr.astOperand1 and expr.astOperand2 and expr.str in ['+', '-', '*', '/', '%', '&', '|', '^']:
elif expr.astOperand1 and expr.astOperand2 and expr.str in {'+', '-', '*', '/', '%', '&', '|', '^'}:
e1 = getEssentialType(expr.astOperand1)
e2 = getEssentialType(expr.astOperand2)
if not e1 or not e2:
@ -108,6 +114,7 @@ def getEssentialType(expr):
return None
def bitsOfEssentialType(expr):
type = getEssentialType(expr)
if type is None:
@ -124,13 +131,15 @@ def bitsOfEssentialType(expr):
return LONG_LONG_BIT
return 0
def isCast(expr):
if not expr or expr.str != '(' or not expr.astOperand1 or expr.astOperand2:
return False
if simpleMatch(expr,'( )'):
if simpleMatch(expr, '( )'):
return False
return True
def isFunctionCall(expr):
if not expr:
return False
@ -142,14 +151,16 @@ def isFunctionCall(expr):
return False
return True
def countSideEffects(expr):
if not expr or expr.str in [',', ';']:
if not expr or expr.str in {',', ';'}:
return 0
ret = 0
if expr.str in ['++', '--', '=']:
if expr.str in {'++', '--', '='}:
ret = 1
return ret + countSideEffects(expr.astOperand1) + countSideEffects(expr.astOperand2)
def getForLoopExpressions(forToken):
if not forToken or forToken.str != 'for':
return None
@ -160,7 +171,9 @@ def getForLoopExpressions(forToken):
return None
if not lpar.astOperand2.astOperand2 or lpar.astOperand2.astOperand2.str != ';':
return None
return [lpar.astOperand2.astOperand1, lpar.astOperand2.astOperand2.astOperand1, lpar.astOperand2.astOperand2.astOperand2]
return [lpar.astOperand2.astOperand1,
lpar.astOperand2.astOperand2.astOperand1,
lpar.astOperand2.astOperand2.astOperand2]
def hasFloatComparison(expr):
@ -173,6 +186,7 @@ def hasFloatComparison(expr):
return cppcheckdata.astIsFloat(expr.astOperand1) or cppcheckdata.astIsFloat(expr.astOperand2)
return False
def hasSideEffectsRecursive(expr):
if not expr:
return False
@ -180,13 +194,15 @@ def hasSideEffectsRecursive(expr):
prev = expr.astOperand1.previous
if prev and (prev.str == '{' or prev.str == '{'):
return hasSideEffectsRecursive(expr.astOperand2)
if expr.str in ['++', '--', '=']:
if expr.str in {'++', '--', '='}:
return True
# Todo: Check function calls
return hasSideEffectsRecursive(expr.astOperand1) or hasSideEffectsRecursive(expr.astOperand2)
def isBoolExpression(expr):
return expr and expr.str in ['!', '==', '!=', '<', '<=', '>', '>=', '&&', '||']
return expr and expr.str in {'!', '==', '!=', '<', '<=', '>', '>=', '&&', '||'}
def isConstantExpression(expr):
if expr.isNumber:
@ -201,30 +217,32 @@ def isConstantExpression(expr):
return False
return True
def isUnsignedInt(expr):
# TODO this function is very incomplete. use ValueType?
if not expr:
return False
if expr.isNumber:
return expr.str.find('u')>0 or expr.str.find('U')>0
if expr.str in ['+','-','*','/','%']:
return expr.str.find('u') > 0 or expr.str.find('U') > 0
if expr.str in {'+', '-', '*', '/', '%'}:
return isUnsignedInt(expr.astOperand1) or isUnsignedInt(expr.astOperand2)
return False
def getPrecedence(expr):
if not expr:
return 16
if not expr.astOperand1 or not expr.astOperand2:
return 16
if expr.str in ['*', '/', '%']:
if expr.str in {'*', '/', '%'}:
return 12
if expr.str in ['+', '-']:
if expr.str in {'+', '-'}:
return 11
if expr.str in ['<<', '>>']:
if expr.str in {'<<', '>>'}:
return 10
if expr.str in ['<', '>', '<=', '>=']:
if expr.str in {'<', '>', '<=', '>='}:
return 9
if expr.str in ['==', '!=']:
if expr.str in {'==', '!='}:
return 8
if expr.str == '&':
return 7
@ -236,7 +254,7 @@ def getPrecedence(expr):
return 4
if expr.str == '||':
return 3
if expr.str in ['?',':']:
if expr.str in {'?', ':'}:
return 2
if expr.isAssignmentOp:
return 1
@ -244,6 +262,7 @@ def getPrecedence(expr):
return 0
return -1
def noParentheses(tok1, tok2):
while tok1 and tok1 != tok2:
if tok1.str == '(' or tok1.str == ')':
@ -251,6 +270,7 @@ def noParentheses(tok1, tok2):
tok1 = tok1.next
return tok1 == tok2
def findGotoLabel(gotoToken):
label = gotoToken.next.str
tok = gotoToken.next.next
@ -262,42 +282,46 @@ def findGotoLabel(gotoToken):
tok = tok.next
return None
def findInclude(directives, header):
for directive in directives:
if directive.str == '#include ' + header:
return directive
return None
def misra_3_1(rawTokens):
for token in rawTokens:
if token.str.startswith('/*') or token.str.startswith('//'):
if token.str[2:].find('//')>=0 or token.str[2:].find('/*')>=0:
if '//' in token.str[2:] or '/*' in token.str[2:]:
reportError(token, 3, 1)
def misra_5_1(data):
for token in data.tokenlist:
if token.isName and len(token.str) > 31:
reportError(token, 5, 1)
def misra_5_3(data):
scopeVars = {}
for var in data.variables:
if var.isArgument:
# TODO
continue
if not var.nameToken.scope in scopeVars:
if var.nameToken.scope not in scopeVars:
scopeVars[var.nameToken.scope] = []
scopeVars[var.nameToken.scope].append(var)
for innerScope in data.scopes:
if innerScope.type == 'Global':
continue
if not innerScope in scopeVars:
if innerScope not in scopeVars:
continue
for innerVar in scopeVars[innerScope]:
outerScope = innerScope.nestedIn
while outerScope:
if not outerScope in scopeVars:
if outerScope not in scopeVars:
outerScope = outerScope.nestedIn
continue
found = False
@ -310,11 +334,13 @@ def misra_5_3(data):
break
outerScope = outerScope.nestedIn
def misra_5_4(data):
for dir in data.directives:
if re.match(r'#define [a-zA-Z0-9_]{64,}', dir.str):
reportError(dir, 5, 4)
def misra_5_5(data):
macroNames = []
for dir in data.directives:
@ -325,21 +351,25 @@ def misra_5_5(data):
if var.nameToken.str in macroNames:
reportError(var.nameToken, 5, 5)
def misra_7_1(rawTokens):
for tok in rawTokens:
if re.match(r'^0[0-7]+$', tok.str):
reportError(tok, 7, 1)
def misra_7_3(rawTokens):
for tok in rawTokens:
if re.match(r'^[0-9]+l', tok.str):
reportError(tok, 7, 3)
def misra_8_11(data):
for var in data.variables:
if var.isExtern and simpleMatch(var.nameToken.next, '[ ]') and var.nameToken.scope.type == 'Global':
reportError(var.nameToken, 8, 11)
def misra_8_12(data):
for token in data.tokenlist:
if token.str != '{':
@ -353,26 +383,29 @@ def misra_8_12(data):
break
if etok.str == '=':
rhsValues = etok.astOperand2.values
if rhsValues and len(rhsValues)==1:
if rhsValues and len(rhsValues) == 1:
if rhsValues[0].intvalue in values:
reportError(etok, 8, 12)
break
values.append(rhsValues[0].intvalue)
etok = etok.next
def misra_8_14(rawTokens):
for token in rawTokens:
if token.str == 'restrict':
reportError(token, 8, 14)
def misra_9_5(rawTokens):
for token in rawTokens:
if simpleMatch(token, '[ ] = { ['):
reportError(token, 9, 5)
def misra_10_4(data):
for token in data.tokenlist:
if not token.str in ['+','-','*','/','%','&','|','^'] and not token.isComparisonOp:
if token.str not in {'+', '-', '*', '/', '%', '&', '|', '^'} and not token.isComparisonOp:
continue
if not token.astOperand1 or not token.astOperand2:
continue
@ -385,15 +418,16 @@ def misra_10_4(data):
if e1 and e2 and e1 != e2:
reportError(token, 10, 4)
def misra_10_6(data):
for token in data.tokenlist:
if token.str != '=' or not token.astOperand1 or not token.astOperand2:
continue
vt1 = token.astOperand1.valueType
vt2 = token.astOperand2.valueType
if not vt1 or vt1.pointer>0:
if not vt1 or vt1.pointer > 0:
continue
if not vt2 or vt2.pointer>0:
if not vt2 or vt2.pointer > 0:
continue
try:
intTypes = ['char', 'short', 'int', 'long', 'long long']
@ -407,13 +441,14 @@ def misra_10_6(data):
except ValueError:
pass
def misra_10_8(data):
for token in data.tokenlist:
if not isCast(token):
continue
if not token.valueType or token.valueType.pointer>0:
if not token.valueType or token.valueType.pointer > 0:
continue
if not token.astOperand1.valueType or token.astOperand1.valueType.pointer>0:
if not token.astOperand1.valueType or token.astOperand1.valueType.pointer > 0:
continue
if not token.astOperand1.astOperand1:
continue
@ -429,6 +464,7 @@ def misra_10_8(data):
except ValueError:
pass
def misra_11_3(data):
for token in data.tokenlist:
if not isCast(token):
@ -437,9 +473,11 @@ def misra_11_3(data):
vt2 = token.astOperand1.valueType
if not vt1 or not vt2:
continue
if vt1.pointer==vt2.pointer and vt1.pointer>0 and vt1.type != vt2.type and vt1.isIntegral() and vt2.isIntegral() and vt1.type != 'char':
if vt1.pointer == vt2.pointer and vt1.pointer > 0 and vt1.type != vt2.type and\
vt1.isIntegral() and vt2.isIntegral() and vt1.type != 'char':
reportError(token, 11, 3)
def misra_11_4(data):
for token in data.tokenlist:
if not isCast(token):
@ -448,9 +486,10 @@ def misra_11_4(data):
vt2 = token.astOperand1.valueType
if not vt1 or not vt2:
continue
if vt1.pointer==0 and vt2.pointer>0 and vt2.type != 'void':
if vt1.pointer == 0 and vt2.pointer > 0 and vt2.type != 'void':
reportError(token, 11, 4)
def misra_11_5(data):
for token in data.tokenlist:
if not isCast(token):
@ -459,9 +498,10 @@ def misra_11_5(data):
vt2 = token.astOperand1.valueType
if not vt1 or not vt2:
continue
if vt1.pointer>0 and vt1.type != 'void' and vt2.pointer==vt1.pointer and vt2.type == 'void':
if vt1.pointer > 0 and vt1.type != 'void' and vt2.pointer == vt1.pointer and vt2.type == 'void':
reportError(token, 11, 5)
def misra_11_6(data):
for token in data.tokenlist:
if not isCast(token):
@ -470,11 +510,12 @@ def misra_11_6(data):
vt2 = token.astOperand1.valueType
if not vt1 or not vt2:
continue
if vt1.pointer==1 and vt1.type=='void' and vt2.pointer==0:
if vt1.pointer == 1 and vt1.type == 'void' and vt2.pointer == 0:
reportError(token, 11, 6)
elif vt1.pointer==0 and vt2.pointer==1 and vt2.type=='void':
elif vt1.pointer == 0 and vt2.pointer == 1 and vt2.type == 'void':
reportError(token, 11, 6)
def misra_11_7(data):
for token in data.tokenlist:
if not isCast(token):
@ -483,20 +524,23 @@ def misra_11_7(data):
vt2 = token.astOperand1.valueType
if not vt1 or not vt2:
continue
if vt1.pointer>0 and vt1.type=='record' and vt2.pointer>0 and vt2.type=='record' and vt1.typeScopeId != vt2.typeScopeId:
if vt1.pointer > 0 and vt1.type == 'record' and\
vt2.pointer > 0 and vt2.type == 'record' and vt1.typeScopeId != vt2.typeScopeId:
reportError(token, 11, 7)
def misra_11_8(data):
for token in data.tokenlist:
if not isCast(token):
continue
if not token.valueType or not token.astOperand1.valueType:
continue
if token.valueType.pointer==0 or token.valueType.pointer==0:
if token.valueType.pointer == 0 or token.valueType.pointer == 0:
continue
if token.valueType.constness==0 and token.astOperand1.valueType.constness>0:
if token.valueType.constness == 0 and token.astOperand1.valueType.constness > 0:
reportError(token, 11, 8)
def misra_11_9(data):
for directive in data.directives:
res1 = re.match(r'#define ([A-Za-z_][A-Za-z_0-9]*) (.*)', directive.str)
@ -505,27 +549,29 @@ def misra_11_9(data):
name = res1.group(1)
if name == 'NULL':
continue
value = res1.group(2).replace(' ','')
value = res1.group(2).replace(' ', '')
if value == '((void*)0)':
reportError(directive, 11, 9)
def misra_12_1_sizeof(rawTokens):
state = 0
for tok in rawTokens:
if tok.str.startswith('//') or tok.str.startswith('/*'):
continue
if tok.str == 'sizeof':
state = 1
elif state == 1:
if re.match(r'^[a-zA-Z_]',tok.str):
state = 2
else:
state = 0
elif state == 2:
if tok.str in ['+','-','*','/','%']:
reportError(tok, 12, 1)
else:
state = 0
state = 0
for tok in rawTokens:
if tok.str.startswith('//') or tok.str.startswith('/*'):
continue
if tok.str == 'sizeof':
state = 1
elif state == 1:
if re.match(r'^[a-zA-Z_]', tok.str):
state = 2
else:
state = 0
elif state == 2:
if tok.str in {'+', '-', '*', '/', '%'}:
reportError(tok, 12, 1)
else:
state = 0
def misra_12_1(data):
for token in data.tokenlist:
@ -533,7 +579,7 @@ def misra_12_1(data):
if p < 2 or p > 12:
continue
p1 = getPrecedence(token.astOperand1)
if p1 <= 12 and p1 > p and noParentheses(token.astOperand1,token):
if p1 <= 12 and p1 > p and noParentheses(token.astOperand1, token):
reportError(token, 12, 1)
continue
p2 = getPrecedence(token.astOperand2)
@ -541,31 +587,34 @@ def misra_12_1(data):
reportError(token, 12, 1)
continue
def misra_12_2(data):
for token in data.tokenlist:
if not (token.str in ['<<','>>']):
continue
if (not token.astOperand2) or (not token.astOperand2.values):
continue
maxval = 0
for val in token.astOperand2.values:
if val.intvalue > maxval:
maxval = val.intvalue
if maxval == 0:
continue
sz = bitsOfEssentialType(token.astOperand1)
if sz <= 0:
continue
if maxval >= sz:
reportError(token, 12, 2)
for token in data.tokenlist:
if not (token.str in {'<<', '>>'}):
continue
if (not token.astOperand2) or (not token.astOperand2.values):
continue
maxval = 0
for val in token.astOperand2.values:
if val.intvalue > maxval:
maxval = val.intvalue
if maxval == 0:
continue
sz = bitsOfEssentialType(token.astOperand1)
if sz <= 0:
continue
if maxval >= sz:
reportError(token, 12, 2)
def misra_12_3(data):
for token in data.tokenlist:
if token.str != ',' or token.scope.type == 'Enum':
continue
if token.astParent and (token.astParent.str in ['(', ',', '{']):
continue
reportError(token, 12, 3)
for token in data.tokenlist:
if token.str != ',' or token.scope.type == 'Enum':
continue
if token.astParent and token.astParent.str in {'(', ',', '{'}:
continue
reportError(token, 12, 3)
def misra_12_4(data):
max_uint = 0
@ -586,23 +635,26 @@ def misra_12_4(data):
reportError(token, 12, 4)
break
def misra_13_1(data):
for token in data.tokenlist:
if token.str != '=':
continue
init = token.next
if init and init.str == '{' and hasSideEffectsRecursive(init):
reportError(init,13,1)
reportError(init, 13, 1)
def misra_13_3(data):
for token in data.tokenlist:
if not token.str in ['++', '--']:
continue
astTop = token
while astTop.astParent and not astTop.astParent.str in [',', ';']:
astTop = astTop.astParent
if countSideEffects(astTop) >= 2:
reportError(astTop, 13, 3)
if token.str not in {'++', '--'}:
continue
astTop = token
while astTop.astParent and astTop.astParent.str not in {',', ';'}:
astTop = astTop.astParent
if countSideEffects(astTop) >= 2:
reportError(astTop, 13, 3)
def misra_13_4(data):
for token in data.tokenlist:
@ -610,21 +662,24 @@ def misra_13_4(data):
continue
if not token.astParent:
continue
if token.astOperand1.str == '[' and (token.astOperand1.previous.str=='{' or token.astOperand1.previous.str==','):
if token.astOperand1.str == '[' and token.astOperand1.previous.str in {'{', ','}:
continue
if not (token.astParent.str in [',', ';']):
reportError(token, 13, 4)
reportError(token, 13, 4)
def misra_13_5(data):
for token in data.tokenlist:
if token.isLogicalOp and hasSideEffectsRecursive(token.astOperand2):
reportError(token, 13, 5)
def misra_13_6(data):
for token in data.tokenlist:
if token.str == 'sizeof' and hasSideEffectsRecursive(token.next):
reportError(token, 13, 6)
def misra_14_1(data):
for token in data.tokenlist:
if token.str != 'for':
@ -633,6 +688,7 @@ def misra_14_1(data):
if exprs and hasFloatComparison(exprs[1]):
reportError(token, 14, 1)
def misra_14_2(data):
for token in data.tokenlist:
expressions = getForLoopExpressions(token)
@ -648,16 +704,18 @@ def misra_14_4(data):
for token in data.tokenlist:
if token.str != '(':
continue
if not token.astOperand1 or not (token.astOperand1.str in ['if', 'while']):
if not token.astOperand1 or not (token.astOperand1.str in {'if', 'while'}):
continue
if not isBoolExpression(token.astOperand2):
reportError(token, 14, 4)
def misra_15_1(data):
for token in data.tokenlist:
if token.str == "goto":
reportError(token, 15, 1)
def misra_15_2(data):
for token in data.tokenlist:
if token.str != 'goto':
@ -667,6 +725,7 @@ def misra_15_2(data):
if not findGotoLabel(token):
reportError(token, 15, 2)
def misra_15_3(data):
for token in data.tokenlist:
if token.str != 'goto':
@ -682,17 +741,19 @@ def misra_15_3(data):
if not scope:
reportError(token, 15, 3)
def misra_15_5(data):
for token in data.tokenlist:
if token.str == 'return' and token.scope.type != 'Function':
reportError(token, 15, 5)
def misra_15_6(rawTokens):
state = 0
indent = 0
tok1 = None
for token in rawTokens:
if token.str in ['if', 'for', 'while']:
if token.str in {'if', 'for', 'while'}:
if simpleMatch(token.previous, '# if'):
continue
if simpleMatch(token.previous, "} while"):
@ -719,6 +780,7 @@ def misra_15_6(rawTokens):
if token.str != '{':
reportError(tok1, 15, 6)
def misra_15_7(data):
for token in data.tokenlist:
if not simpleMatch(token, 'if ('):
@ -730,11 +792,13 @@ def misra_15_7(data):
# TODO add 16.1 rule
def misra_16_2(data):
for token in data.tokenlist:
if token.str == 'case' and token.scope.type != 'Switch':
reportError(token, 16, 2)
def misra_16_3(rawTokens):
# state: 0=no, 1=break is seen but not its ';', 2=after 'break;', 'comment', '{'
state = 0
@ -747,13 +811,14 @@ def misra_16_3(rawTokens):
else:
state = 0
elif token.str.startswith('/*') or token.str.startswith('//'):
if token.str.lower().find('fallthrough')>0:
if token.str.lower().find('fallthrough') > 0:
state = 2
elif token.str == '{':
state = 2
elif token.str == 'case' and state != 2:
reportError(token, 16, 3)
def misra_16_4(data):
for token in data.tokenlist:
if token.str != 'switch':
@ -773,6 +838,7 @@ def misra_16_4(data):
if tok and tok.str != 'default':
reportError(token, 16, 4)
def misra_16_5(data):
for token in data.tokenlist:
if token.str != 'default':
@ -781,7 +847,7 @@ def misra_16_5(data):
continue
tok2 = token
while tok2:
if tok2.str in ['}', 'case']:
if tok2.str in {'}', 'case'}:
break
if tok2.str == '{':
tok2 = tok2.link
@ -789,6 +855,7 @@ def misra_16_5(data):
if tok2 and tok2.str == 'case':
reportError(token, 16, 5)
def misra_16_6(data):
for token in data.tokenlist:
if not (simpleMatch(token, 'switch (') and simpleMatch(token.next.link, ') {')):
@ -800,7 +867,7 @@ def misra_16_6(data):
count = count + 1
elif tok.str == '{':
tok = tok.link
if simpleMatch(tok.previous.previous,'break ;'):
if simpleMatch(tok.previous.previous, 'break ;'):
count = count + 1
elif tok.str == '}':
break
@ -808,24 +875,28 @@ def misra_16_6(data):
if count < 2:
reportError(token, 16, 6)
def misra_16_7(data):
for token in data.tokenlist:
if simpleMatch(token, 'switch (') and isBoolExpression(token.next.astOperand2):
reportError(token, 16, 7)
def misra_17_1(data):
for token in data.tokenlist:
if isFunctionCall(token) and token.astOperand1.str in ['va_list', 'va_arg', 'va_start', 'va_end' , 'va_copy']:
if isFunctionCall(token) and token.astOperand1.str in {'va_list', 'va_arg', 'va_start', 'va_end', 'va_copy'}:
reportError(token, 17, 1)
def misra_17_6(rawTokens):
for token in rawTokens:
if simpleMatch(token, '[ static'):
reportError(token, 17, 6)
def misra_17_8(data):
for token in data.tokenlist:
if not (token.isAssignmentOp or (token.str in ['++','--'])):
if not (token.isAssignmentOp or (token.str in {'++', '--'})):
continue
if not token.astOperand1:
continue
@ -833,6 +904,7 @@ def misra_17_8(data):
if var and var.isArgument:
reportError(token, 17, 8)
def misra_18_5(data):
for var in data.variables:
if not var.isPointer:
@ -848,6 +920,7 @@ def misra_18_5(data):
if count > 2:
reportError(var.nameToken, 18, 5)
def misra_18_8(data):
for var in data.variables:
if not var.isArray or not var.isLocal:
@ -859,11 +932,13 @@ def misra_18_8(data):
if not isConstantExpression(typetok.astOperand2):
reportError(var.nameToken, 18, 8)
def misra_19_2(data):
for token in data.tokenlist:
if token.str == 'union':
reportError(token, 19, 2)
def misra_20_1(data):
for directive in data.directives:
if not directive.str.startswith('#include'):
@ -875,15 +950,17 @@ def misra_20_1(data):
reportError(directive, 20, 1)
break
def misra_20_2(data):
for directive in data.directives:
if not directive.str.startswith('#include '):
continue
for pattern in ['\\', '//', '/*', '\'']:
if directive.str.find(pattern)>0:
for pattern in {'\\', '//', '/*', "'"}:
if directive.str.find(pattern) > 0:
reportError(directive, 20, 2)
break
def misra_20_3(rawTokens):
linenr = -1
for token in rawTokens:
@ -896,57 +973,67 @@ def misra_20_3(rawTokens):
if not headerToken or not (headerToken.str.startswith('<') or headerToken.str.startswith('"')):
reportError(token, 20, 3)
def misra_20_4(data):
for directive in data.directives:
res = re.search(r'#define ([a-z][a-z0-9_]+)', directive.str)
if res and (res.group(1) in KEYWORDS):
reportError(directive, 20, 4)
def misra_20_5(data):
for directive in data.directives:
if directive.str.startswith('#undef '):
reportError(directive, 20, 5)
def misra_21_3(data):
for token in data.tokenlist:
if isFunctionCall(token) and (token.astOperand1.str in ['malloc', 'calloc', 'realloc', 'free']):
if isFunctionCall(token) and (token.astOperand1.str in {'malloc', 'calloc', 'realloc', 'free'}):
reportError(token, 21, 3)
def misra_21_4(data):
directive = findInclude(data.directives, '<setjmp.h>')
if directive:
reportError(directive, 21, 4)
def misra_21_5(data):
directive = findInclude(data.directives, '<signal.h>')
if directive:
reportError(directive, 21, 5)
def misra_21_7(data):
for token in data.tokenlist:
if isFunctionCall(token) and (token.astOperand1.str in ['atof', 'atoi', 'atol', 'atoll']):
if isFunctionCall(token) and (token.astOperand1.str in {'atof', 'atoi', 'atol', 'atoll'}):
reportError(token, 21, 7)
def misra_21_8(data):
for token in data.tokenlist:
if isFunctionCall(token) and (token.astOperand1.str in ['abort', 'getenv', 'system']):
if isFunctionCall(token) and (token.astOperand1.str in {'abort', 'getenv', 'system'}):
reportError(token, 21, 8)
def misra_21_9(data):
for token in data.tokenlist:
if (token.str in ['bsearch', 'qsort']) and token.next and token.next.str == '(':
if (token.str in {'bsearch', 'qsort'}) and token.next and token.next.str == '(':
reportError(token, 21, 9)
def misra_21_11(data):
directive = findInclude(data.directives, '<tgmath.h>')
if directive:
reportError(directive, 21, 11)
def loadRuleTexts(filename):
num1 = 0
num2 = 0
for line in open(filename,'rt'):
line = line.replace('\r','').replace('\n','')
for line in open(filename, 'rt'):
line = line.replace('\r', '').replace('\n', '')
res = re.match(r'^Rule ([0-9]+).([0-9]+)', line)
if res:
num1 = int(res.group(1))
@ -986,7 +1073,7 @@ for arg in sys.argv[1:]:
VERIFY_ACTUAL = []
VERIFY_EXPECTED = []
for tok in data.rawTokens:
if tok.str.startswith('//') and tok.str.find('TODO')<0:
if tok.str.startswith('//') and 'TODO' not in tok.str:
for word in tok.str[2:].split(' '):
if re.match(r'[0-9]+\.[0-9]+', word):
VERIFY_EXPECTED.append(str(tok.linenr) + ':' + word)
@ -1075,10 +1162,10 @@ for arg in sys.argv[1:]:
if VERIFY:
for expected in VERIFY_EXPECTED:
if not expected in VERIFY_ACTUAL:
if expected not in VERIFY_ACTUAL:
print('Expected but not seen: ' + expected)
sys.exit(1)
for actual in VERIFY_ACTUAL:
if not actual in VERIFY_EXPECTED:
if actual not in VERIFY_EXPECTED:
print('Not expected: ' + actual)
sys.exit(1)

View File

@ -1,4 +1,4 @@
#/usr/bin/python
#!/usr/bin/env python
#
# cppcheck addon for naming conventions
#

View File

@ -1,4 +1,4 @@
#/usr/bin/python
#!/usr/bin/env python
#
# This script analyses Cppcheck dump files to locate threadsafety issues
# - warn about static local objects
@ -15,8 +15,8 @@ def reportError(token, severity, msg):
def checkstatic(data):
for var in data.variables:
if var.isStatic == True and var.isLocal == True and var.isClass == True:
reportError(var.typeStartToken, 'warning', ('Local static object: ' + var.nameToken.str) )
if var.isStatic and var.isLocal and var.isClass:
reportError(var.typeStartToken, 'warning', ('Local static object: ' + var.nameToken.str))
for arg in sys.argv[1:]:
print('Checking ' + arg + '...')

View File

@ -1,4 +1,4 @@
#! /usr/bin/python
#!/usr/bin/env python
#
# cppcheck addon for Y2038 safeness detection
#
@ -18,18 +18,17 @@ import cppcheckdata
import sys
import os
import re
import argparse
#---------------
# --------------
# Error handling
#---------------
# --------------
diagnostics = {}
def reportDiagnostic(template, configuration, file, line, severity, message):
# collect diagnostics by configuration
if not configuration in diagnostics:
if configuration not in diagnostics:
diagnostics[configuration] = []
# add error to this configuration
diagnostics[configuration].append(
@ -58,9 +57,9 @@ def reportTokDiag(template, cfg, token, severity, msg):
token.file, token.linenr,
severity, msg)
#---------------------------------------------
# --------------------------------------------
# #define/#undef detection regular expressions
#---------------------------------------------
# --------------------------------------------
# test for '#define _TIME_BITS 64'
re_define_time_bits_64 = re.compile(r'^\s*#\s*define\s+_TIME_BITS\s+64\s*$')
@ -77,14 +76,14 @@ re_define_use_time_bits64 = re.compile(r'^\s*#\s*define\s+_USE_TIME_BITS64\s*$')
# test for '#undef _USE_TIME_BITS64' (if it ever happens)
re_undef_use_time_bits64 = re.compile(r'^\s*#\s*undef\s+_USE_TIME_BITS64\s*$')
#---------------------------------
# --------------------------------
# List of Y2038-unsafe identifiers
#---------------------------------
# --------------------------------
# This is WIP. Eventually it should contain all identifiers (types
# and functions) which would be affected by the Y2038 bug.
id_Y2038 = [
id_Y2038 = {
# Y2038-unsafe types by definition
'time_t'
# Types using Y2038-unsafe types
@ -184,44 +183,42 @@ id_Y2038 = [
'getutxid',
'getutxline',
'pututxline'
]
}
# return all files ending in .dump among or under the given paths
def find_dump_files(paths):
dumpfiles = []
for path in paths:
if path.endswith('.dump'):
if not path in dumpfiles:
if path not in dumpfiles:
dumpfiles.append(path)
else:
for (top, subdirs, files) in os.walk(path):
for file in files:
if file.endswith('.dump'):
f = top + '/' + file
if not f in dumpfiles:
if f not in dumpfiles:
dumpfiles.append(f)
dumpfiles.sort()
return dumpfiles
#------------------
# -----------------
# Let's get to work
#------------------
# -----------------
# extend cppcheck parser with our own options
parser = cppcheckdata.ArgumentParser()
parser.add_argument('-q', '--quiet', action='store_true',
help='do not print "Checking ..." lines')
parser.add_argument('paths', nargs='+', metavar='path',
help='path to dump file or directory')
# parse command line
# parse command line
args = parser.parse_args()
# now operate on each file in turn
dumpfiles = find_dump_files(args.paths)
for dumpfile in dumpfiles:
@ -258,7 +255,7 @@ for dumpfile in dumpfiles:
if re_define_use_time_bits64.match(directive.str):
safe = int(srclinenr)
# warn about _TIME_BITS not being defined
if time_bits_defined == False:
if not time_bits_defined:
reportDirDiag(args.template,
cfg, srcfile, srclinenr, directive, 'warning',
'_USE_TIME_BITS64 is defined but _TIME_BITS was not')

View File

@ -7,9 +7,9 @@ with open('README.txt') as f:
setup(
name="cppcheck",
description='Python script to parse the XML (version 2) output of '
+ 'cppcheck and generate an HTML report using Pygments for syntax '
+ 'highlighting.',
description='Python script to parse the XML (version 2) output of ' +
'cppcheck and generate an HTML report using Pygments for syntax ' +
'highlighting.',
long_description=readme,
author='Henrik Nilsson',
url='https://github.com/danmar/cppcheck',

View File

@ -1,52 +1,54 @@
#!/usr/bin/env python
import os
import re
def hasresult(filename, result):
if not os.path.isfile(filename):
if not os.path.isfile(filename):
return False
for line in open(filename, 'rt'):
if result in line:
return True
return False
for line in open(filename, 'rt'):
if line.find(result) >= 0:
return True
return False
def parsefile(filename):
ret = []
linenr = 0
functionName = None
for line in open(filename,'rt'):
linenr = linenr + 1
res = re.match('^[a-z]+[ *]+([a-z0-9_]+)[(]', line)
if res:
functionName = res.group(1)
if line.startswith('}'):
functionName = ''
elif line.find('BUG')>0 or line.find('WARN')>0 or filename=='ub.c':
spaces = ''
for i in range(100):
spaces = spaces + ' '
s = filename + spaces
s = s[:15] + str(linenr) + spaces
s = s[:20] + functionName + spaces
s = s[:50]
if hasresult('cppcheck.txt', '[' + filename + ':' + str(linenr) + ']'):
s = s + ' X'
else:
s = s + ' '
if hasresult('clang.txt', filename + ':' + str(linenr)):
s = s + ' X'
else:
s = s + ' '
if hasresult('lint.txt', filename + ' ' + str(linenr)):
s = s + ' X'
else:
s = s + ' '
if hasresult('cov.txt', filename + ':' + str(linenr)):
s = s + ' X'
else:
s = s + ' '
ret.append(s)
return ret
ret = []
linenr = 0
functionName = None
for line in open(filename, 'rt'):
linenr = linenr + 1
res = re.match('^[a-z]+[ *]+([a-z0-9_]+)[(]', line)
if res:
functionName = res.group(1)
if line.startswith('}'):
functionName = ''
elif line.find('BUG') > 0 or line.find('WARN') > 0 or filename == 'ub.c':
spaces = ''
for i in range(100):
spaces = spaces + ' '
s = filename + spaces
s = s[:15] + str(linenr) + spaces
s = s[:20] + functionName + spaces
s = s[:50]
if hasresult('cppcheck.txt', '[' + filename + ':' + str(linenr) + ']'):
s = s + ' X'
else:
s = s + ' '
if hasresult('clang.txt', filename + ':' + str(linenr)):
s = s + ' X'
else:
s = s + ' '
if hasresult('lint.txt', filename + ' ' + str(linenr)):
s = s + ' X'
else:
s = s + ' '
if hasresult('cov.txt', filename + ':' + str(linenr)):
s = s + ' X'
else:
s = s + ' '
ret.append(s)
return ret
bugs = []
bugs.extend(parsefile('controlflow.c'))
@ -54,4 +56,4 @@ bugs.extend(parsefile('data.c'))
bugs.extend(parsefile('functions.c'))
bugs.extend(parsefile('ub.c'))
for bug in bugs:
print(bug)
print(bug)

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
# continuous integration
# build daily reports (doxygen,coverage,etc)
@ -8,9 +8,7 @@ import time
import subprocess
import pexpect
import glob
import os
import sys
import urllib
# Upload file to sourceforge web server using scp
@ -24,16 +22,11 @@ def upload(file_to_upload, destination):
child.expect('Password:')
child.sendline(password)
child.interact()
except IOError:
pass
except OSError:
pass
except pexpect.TIMEOUT:
except (IOError, OSError, pexpect.TIMEOUT):
pass
# git push
def gitpush():
try:
password = sys.argv[1]
@ -41,11 +34,7 @@ def gitpush():
child.expect("Enter passphrase for key '/home/daniel/.ssh/id_rsa':")
child.sendline(password)
child.interact()
except IOError:
pass
except OSError:
pass
except pexpect.TIMEOUT:
except (IOError, OSError, pexpect.TIMEOUT):
pass
@ -53,7 +42,7 @@ def iconv(filename):
p = subprocess.Popen(['file', '-i', filename],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
comm = p.communicate()
if comm[0].find('charset=iso-8859-1') >= 0:
if 'charset=iso-8859-1' in comm[0]:
subprocess.call(
["iconv", filename, "--from=ISO-8859-1", "--to=UTF-8", "-o", filename])
@ -83,11 +72,7 @@ def gitpull():
child.expect('Already up-to-date.')
child.interact()
except IOError:
pass
except OSError:
pass
except pexpect.TIMEOUT:
except (IOError, OSError, pexpect.TIMEOUT):
pass
except pexpect.EOF:
return True

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
#
# 1. Create a folder daca2-addons in your HOME folder
# 2. Put cppcheck-O2 in daca2-addons. It should be built with all optimisations.
@ -14,13 +14,13 @@ import os
import datetime
import time
DEBIAN = ['ftp://ftp.se.debian.org/debian/',
'ftp://ftp.debian.org/debian/']
DEBIAN = ('ftp://ftp.se.debian.org/debian/',
'ftp://ftp.debian.org/debian/')
def wget(filepath):
filename = filepath
if filepath.find('/') >= 0:
if '/' in filepath:
filename = filename[filename.rfind('/') + 1:]
for d in DEBIAN:
subprocess.call(
@ -126,8 +126,7 @@ def dumpfiles(path):
if os.path.islink(g):
continue
if os.path.isdir(g):
for df in dumpfiles(path + g + '/'):
ret.append(df)
ret.extend(dumpfiles(path + g + '/'))
elif os.path.isfile(g) and g[-5:] == '.dump':
ret.append(g)
return ret
@ -166,7 +165,8 @@ def scanarchive(filepath, jobs):
# gcc-arm - no ticket. Reproducible timeout in daca2 though as of 1.73/early 2016.
#
if filename[:5] == 'flite' or filename[:5] == 'boost' or filename[:7] == 'insight' or filename[:8] == 'valgrind' or filename[:7] == 'gcc-arm':
if filename[:5] == 'flite' or filename[:5] == 'boost' or filename[:7] == 'insight' or\
filename[:8] == 'valgrind' or filename[:7] == 'gcc-arm':
results = open('results.txt', 'at')
results.write('fixme: skipped package to avoid hang\n')
results.close()

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
#
# Downloads all daca2 source code packages.
#
@ -11,16 +11,15 @@ import sys
import shutil
import glob
import os
import datetime
import time
DEBIAN = ['ftp://ftp.se.debian.org/debian/',
'ftp://ftp.debian.org/debian/']
DEBIAN = ('ftp://ftp.se.debian.org/debian/',
'ftp://ftp.debian.org/debian/')
def wget(filepath):
filename = filepath
if filepath.find('/') >= 0:
if '/' in filepath:
filename = filename[filename.rfind('/') + 1:]
for d in DEBIAN:
subprocess.call(
@ -121,7 +120,8 @@ def removeLargeFiles(path):
os.remove(g)
# remove non-source files
elif g[-2:] != '.C' and g[-2:] != '.c' and g[-4:] != '.cc' and g[-4:] != '.cpp' and g[-4:] != '.cxx' and g[-2:] != '.h' and g[-2:] != '.H' and g[-4:] != '.c++' and g[-4:] != '.hpp' and g[-4:] != '.tpp' and g[-4:] != '.t++':
elif g[-2:] not in {'.C', '.c', '.H', '.h'} and g[-3:] != '.cc' and\
g[-4:] not in {'.cpp', '.cxx', '.c++', '.hpp', '.tpp', '.t++'}:
os.remove(g)

View File

@ -1,6 +1,7 @@
#!/usr/bin/env python
import os
import sys
import subprocess
def readdate(data):
datepos = -1
@ -54,7 +55,8 @@ mainpage.write('</head>\n')
mainpage.write('<body>\n')
mainpage.write('<h1>DACA2</h1>\n')
mainpage.write('<p>Results when running latest (git head) Cppcheck on Debian.</p>\n')
mainpage.write('<p>For performance reasons the analysis is limited. Files larger than 1mb are skipped. If analysis of a file takes more than 10 minutes it may be stopped.</p>\n')
mainpage.write('<p>For performance reasons the analysis is limited. Files larger than 1mb are skipped. ' +
'If analysis of a file takes more than 10 minutes it may be stopped.</p>\n')
mainpage.write('<table class="sortable">\n')
mainpage.write(
'<tr>' +
@ -72,9 +74,9 @@ lastupdate = None
recent = []
daca2 = daca2folder
for lib in [False, True]:
for lib in (False, True):
for a in "0123456789abcdefghijklmnopqrstuvwxyz":
if lib == True:
if lib:
a = "lib" + a
if not os.path.isfile(daca2 + a + '/results.txt'):
continue
@ -83,7 +85,7 @@ for lib in [False, True]:
data = f.read()
f.close()
if data.find('ftp://')<0:
if 'ftp://' not in data:
continue
datestr = readdate(data)

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
#
# 1. Create a folder daca2 in your HOME folder
# 2. Put cppcheck-O2 in daca2. It should be built with all optimisations.
@ -16,13 +16,13 @@ import datetime
import time
import logging
DEBIAN = ['ftp://ftp.se.debian.org/debian/',
'ftp://ftp.debian.org/debian/']
DEBIAN = ('ftp://ftp.se.debian.org/debian/',
'ftp://ftp.debian.org/debian/')
def wget(filepath):
filename = filepath
if filepath.find('/') >= 0:
if '/' in filepath:
filename = filename[filename.rfind('/') + 1:]
for d in DEBIAN:
subprocess.call(
@ -76,10 +76,8 @@ def handleRemoveReadonly(func, path, exc):
def removeAllExceptResults():
filenames = []
for g in glob.glob('[A-Za-z0-9]*'):
filenames.append(g)
for g in glob.glob('.[a-z]*'):
filenames.append(g)
filenames.extend(glob.glob('[A-Za-z0-9]*'))
filenames.extend(glob.glob('.[a-z]*'))
for filename in filenames:
count = 5
@ -104,7 +102,7 @@ def removeAllExceptResults():
def removeLargeFiles(path):
for g in glob.glob(path + '*'):
if g == '.' or g == '..':
if g in {'.', '..'}:
continue
if os.path.islink(g):
continue
@ -122,9 +120,11 @@ def removeLargeFiles(path):
except OSError as err:
logging.error('Failed to remove {}: {}'.format(g, err))
def strfCurrTime(fmt):
return datetime.time.strftime(datetime.datetime.now().time(), fmt)
def scanarchive(filepath, jobs, cpulimit):
# remove all files/folders except RESULTS_FILENAME
removeAllExceptResults()
@ -152,7 +152,8 @@ def scanarchive(filepath, jobs, cpulimit):
cmd = 'cpulimit --limit=' + cpulimit
else:
cmd = 'nice --adjustment=1000'
cmd = cmd + ' ../cppcheck-O2 -D__GCC__ --enable=style --inconclusive --error-exitcode=0 --exception-handling=stderr ' + jobs + ' --template=daca2 .'
cmd = cmd + ' ../cppcheck-O2 -D__GCC__ --enable=style --inconclusive --error-exitcode=0 ' +\
'--exception-handling=stderr ' + jobs + ' --template=daca2 .'
cmds = cmd.split()
p = subprocess.Popen(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@ -160,7 +161,7 @@ def scanarchive(filepath, jobs, cpulimit):
if p.returncode == 0:
logging.info(comm[1] + strfCurrTime('[%H:%M]'))
elif comm[0].find('cppcheck: error: could not find or open any of the paths given.') < 0:
elif 'cppcheck: error: could not find or open any of the paths given.' not in comm[0]:
logging.error(comm[1] + strfCurrTime('[%H:%M]'))
logging.error('Exit code is not zero! Crash?\n')
@ -188,9 +189,9 @@ RESULTS_FILENAME = 'results.txt'
RESULTS_FILE = os.path.join(workdir, RESULTS_FILENAME)
logging.basicConfig(
filename=RESULTS_FILE,
level=logging.INFO,
format='%(message)s')
filename=RESULTS_FILE,
level=logging.INFO,
format='%(message)s')
print(workdir)

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
#
# Cppcheck - A tool for static C/C++ code analysis
# Copyright (C) 2007-2016 Cppcheck team.
@ -163,8 +163,8 @@ def writeHtmlFile(nodes, functionName, filename, errorsOnly):
if len(sys.argv) <= 1 or '--help' in sys.argv:
print ('Extract test cases from test file')
print (
print('Extract test cases from test file')
print(
'Syntax: extracttests.py [--html=folder] [--xml] [--code=folder] path/testfile.cpp')
sys.exit(0)
@ -183,7 +183,7 @@ for arg in sys.argv[1:]:
elif arg.endswith('.cpp'):
filename = arg
else:
print ('Invalid option: ' + arg)
print('Invalid option: ' + arg)
sys.exit(1)
@ -195,8 +195,8 @@ if filename is not None:
# generate output
if xml:
print ('<?xml version="1.0"?>')
print ('<tree>')
print('<?xml version="1.0"?>')
print('<tree>')
count = 0
for node in e.nodes:
s = ' <node'
@ -204,8 +204,8 @@ if filename is not None:
s += ' code="' + strtoxml(node['code']) + '"'
s += ' expected="' + strtoxml(node['expected']) + '"'
s += '/>'
print (s)
print ('</tree>')
print(s)
print('</tree>')
elif htmldir is not None:
if not htmldir.endswith('/'):
htmldir += '/'
@ -311,4 +311,4 @@ if filename is not None:
errors.close()
else:
for node in e.nodes:
print (node['functionName'])
print(node['functionName'])

View File

@ -1,12 +1,12 @@
#!/usr/bin/python
#!/usr/bin/env python
import argparse
import xml.etree.ElementTree as ET
def main():
parser = argparse.ArgumentParser(description="List all error without a CWE assigned in CSV format")
parser.add_argument("-F", metavar="filename", required=True, help="XML file containing output from: ./cppcheck --errorlist --xml-version=2")
parser.add_argument("-F", metavar="filename", required=True,
help="XML file containing output from: ./cppcheck --errorlist --xml-version=2")
parsed = parser.parse_args()
tree = ET.parse(vars(parsed)["F"])

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
#
# Cppcheck - A tool for static C/C++ code analysis
# Copyright (C) 2007-2016 Cppcheck team.
@ -150,7 +150,8 @@ class MatchCompiler:
# if varid is provided, check that it's non-zero on first use
if varid and tok.find('%varid%') != -1 and checked_varid is False:
ret += ' if (varid==0U)\n'
ret += ' throw InternalError(tok, "Internal error. Token::Match called with varid 0. Please report this to Cppcheck developers");\n'
ret += ' throw InternalError(tok, "Internal error. Token::Match called with varid 0. ' +\
'Please report this to Cppcheck developers");\n'
checked_varid = True
# [abc]
@ -189,7 +190,7 @@ class MatchCompiler:
gotoNextToken = ' tok = tok ? tok->next() : NULL;\n'
else:
negatedTok ="!" + self._compileCmd(tok)
negatedTok = "!" + self._compileCmd(tok)
# fold !true => false ; !false => true
# this avoids cppcheck warnings about condition always being true/false
if (negatedTok == "!false"):
@ -323,12 +324,14 @@ class MatchCompiler:
# Don't use assert() here, it's disabled for optimized builds.
# We also need to verify builds in 'release' mode
ret += ' if (res_parsed_match != res_compiled_match) {\n'
# ret += ' std::cout << "res_parsed_match' + str(verifyNumber) + ': " << res_parsed_match << ", res_compiled_match: " << res_compiled_match << "\\n";\n'
# ret += ' if (tok)\n'
# ret += ' std::cout << "tok: " << tok->str();\n'
# ret += ' if (tok->next())\n'
# ret += ' std::cout << "tok next: " << tok->next()->str();\n'
ret += ' throw InternalError(tok, "Internal error. compiled match returned different result than parsed match: ' + pattern + '");\n'
# ret += ' std::cout << "res_parsed_match' + str(verifyNumber) +\
# ': " << res_parsed_match << ", res_compiled_match: " << res_compiled_match << "\\n";\n'
# ret += ' if (tok)\n'
# ret += ' std::cout << "tok: " << tok->str();\n'
# ret += ' if (tok->next())\n'
# ret += ' std::cout << "tok next: " << tok->next()->str();\n'
ret += ' throw InternalError(tok, "Internal error.' +\
'compiled match returned different result than parsed match: ' + pattern + '");\n'
ret += ' }\n'
ret += ' return res_compiled_match;\n'
ret += '}\n'
@ -403,7 +406,7 @@ class MatchCompiler:
res = re.match(r'\s*"((?:.|\\")*?)"\s*$', raw_pattern)
if res is None:
if self._showSkipped:
print(filename +":" + str(linenr) +" skipping match pattern:" + raw_pattern)
print(filename + ":" + str(linenr) + " skipping match pattern:" + raw_pattern)
break # Non-const pattern - bailout
pattern = res.group(1)
@ -454,7 +457,8 @@ class MatchCompiler:
# Don't use assert() here, it's disabled for optimized builds.
# We also need to verify builds in 'release' mode
ret += ' if (res_parsed_findmatch != res_compiled_findmatch) {\n'
ret += ' throw InternalError(tok, "Internal error. compiled findmatch returned different result than parsed findmatch: ' + pattern + '");\n'
ret += ' throw InternalError(tok, "Internal error. ' +\
'compiled findmatch returned different result than parsed findmatch: ' + pattern + '");\n'
ret += ' }\n'
ret += ' return res_compiled_findmatch;\n'
ret += '}\n'
@ -524,9 +528,8 @@ class MatchCompiler:
if res is None:
break
# assert that Token::find(simple)match has either 2, 3 or 4 arguments
assert(len(res) >= 3 or len(res) < 6)
# assert that Token::find(simple)match has either 2, 3 or 4
# arguments
g0 = res[0]
tok = res[1]
@ -556,7 +559,7 @@ class MatchCompiler:
res = re.match(r'\s*"((?:.|\\")*?)"\s*$', pattern)
if res is None:
if self._showSkipped:
print(filename +":" + str(linenr) +" skipping findmatch pattern:" + pattern)
print(filename + ":" + str(linenr) + " skipping findmatch pattern:" + pattern)
break # Non-const pattern - bailout
pattern = res.group(1)
@ -588,7 +591,8 @@ class MatchCompiler:
startPos = res[0]
endPos = res[1]
text = line[startPos + 1:endPos - 1]
line = line[:startPos] + 'MatchCompiler::makeConstStringBegin' + text + 'MatchCompiler::makeConstStringEnd' + line[endPos:]
line = line[:startPos] + 'MatchCompiler::makeConstStringBegin' +\
text + 'MatchCompiler::makeConstStringEnd' + line[endPos:]
line = line.replace('MatchCompiler::makeConstStringBegin', 'MatchCompiler::makeConstString("')
line = line.replace('MatchCompiler::makeConstStringEnd', '")')
return line

View File

@ -1,3 +1,4 @@
#!/usr/bin/env python
import glob
import os
@ -44,24 +45,24 @@ def parseheader(cppcheckpath, filename):
posixcfg = f.read()
f.close()
while data.find('/*') >= 0:
while '/*' in data:
pos1 = data.find('/*')
pos2 = data.find('*/', pos1 + 2)
data = data[:pos1] + data[pos2 + 2:]
data = data.replace('\\\n', '')
while data.find('\n#') >= 0:
while '\n#' in data:
pos1 = data.find('\n#')
pos2 = data.find('\n', pos1 + 1)
data = data[:pos1] + data[pos2:]
while data.find('\n__BEGIN') >= 0:
while '\n__BEGIN' in data:
pos1 = data.find('\n__BEGIN')
pos2 = data.find('\n', pos1 + 1)
data = data[:pos1] + data[pos2:]
while data.find('\n__END') >= 0:
while '\n__END' in data:
pos1 = data.find('\n__END')
pos2 = data.find('\n', pos1 + 1)
data = data[:pos1] + data[pos2:]
@ -75,24 +76,24 @@ def parseheader(cppcheckpath, filename):
output = []
for line in data.split('\n'):
if (line[:7] != 'extern ' and line.find(' extern ') < 0) or line[-1] != ';':
if (line[:7] != 'extern ' and ' extern ' not in line) or line[-1] != ';':
continue
functionNameEnd = line.find('(') - 1
if functionNameEnd < 0:
continue
while line[functionNameEnd] == ' ':
functionNameEnd = functionNameEnd - 1
functionNameEnd -= 1
if functionNameEnd < 10:
continue
functionNameStart = functionNameEnd
while line[functionNameStart] == '_' or line[functionNameStart].isalnum():
functionNameStart = functionNameStart - 1
functionNameStart -= 1
if functionNameStart < 10:
continue
if line[functionNameStart] != '*' and line[functionNameStart] != ' ':
continue
functionNameStart = functionNameStart + 1
functionNameStart += 1
if not line[functionNameStart].isalpha():
continue
@ -102,13 +103,13 @@ def parseheader(cppcheckpath, filename):
nonnullStart = line.find('__nonnull')
if nonnullStart > 0:
nonnullStart = nonnullStart + 9
nonnullStart += 9
while nonnullStart < len(line) and line[nonnullStart] == ' ':
nonnullStart = nonnullStart + 1
nonnullStart += 1
if nonnullStart >= len(line) or line[nonnullStart] != '(':
continue
while line[nonnullStart] == '(':
nonnullStart = nonnullStart + 1
nonnullStart += 1
nonnullEnd = line.find(')', nonnullStart)
nonnull = line[nonnullStart:nonnullEnd]

View File

@ -1,6 +1,5 @@
#!/usr/bin/env python
import subprocess
import os
import sys
CMD = None
@ -23,7 +22,7 @@ if CMD is None:
print('Abort: No --cmd')
sys.exit(1)
if SEGFAULT == False and EXPECTED is None:
if not SEGFAULT and EXPECTED is None:
print('Abort: No --expected')
sys.exit(1)
@ -38,6 +37,7 @@ else:
print('EXPECTED=' + EXPECTED)
print('FILE=' + FILE)
def runtool():
p = subprocess.Popen(CMD.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
comm = p.communicate()
@ -46,22 +46,24 @@ def runtool():
return True
elif p.returncode == 0:
out = comm[0] + '\n' + comm[1]
if (out.find('error:') < 0) and (out.find(EXPECTED) > 0):
if ('error:' not in out) and (out.find(EXPECTED) > 0):
return True
return False
def writefile(filename, filedata):
f = open(filename, 'wt')
for line in filedata:
f.write(line)
f.close()
def replaceandrun(what, filedata, i, line):
print(what + ' ' + str(i + 1) + '/' + str(len(filedata)) + '..')
bak = filedata[i]
filedata[i] = line
writefile(FILE, filedata)
if runtool() == True:
if runtool():
print('pass')
writefile(BACKUPFILE, filedata)
return True
@ -69,6 +71,7 @@ def replaceandrun(what, filedata, i, line):
filedata[i] = bak
return False
def replaceandrun2(what, filedata, i, line1, line2):
print(what + ' ' + str(i + 1) + '/' + str(len(filedata)) + '..')
bak1 = filedata[i]
@ -76,7 +79,7 @@ def replaceandrun2(what, filedata, i, line1, line2):
filedata[i] = line1
filedata[i + 1] = line2
writefile(FILE, filedata)
if runtool() == True:
if runtool():
print('pass')
writefile(BACKUPFILE, filedata)
else:
@ -84,6 +87,7 @@ def replaceandrun2(what, filedata, i, line1, line2):
filedata[i] = bak1
filedata[i + 1] = bak2
def clearandrun(what, filedata, i1, i2):
print(what + ' ' + str(i1 + 1) + '/' + str(len(filedata)) + '..')
filedata2 = list(filedata)
@ -92,19 +96,21 @@ def clearandrun(what, filedata, i1, i2):
filedata2[i] = ''
i = i + 1
writefile(FILE, filedata2)
if runtool() == True:
if runtool():
print('pass')
writefile(BACKUPFILE, filedata2)
return filedata2
print('fail')
return filedata
def removecomments(filedata):
for i in range(len(filedata)):
line = filedata[i]
if line.find('//') >= 0:
if '//' in line:
replaceandrun('remove comment', filedata, i, line[:line.find('//')].rstrip())
def checkpar(line):
par = 0
for c in line:
@ -116,6 +122,7 @@ def checkpar(line):
return False
return par == 0
def combinelines(filedata):
if len(filedata) < 3:
return
@ -156,11 +163,13 @@ def combinelines(filedata):
fd2 = filedata[line + 1].lstrip()
replaceandrun2('combine lines', filedata, line, fd1 + fd2, '')
def removedirectives(filedata):
for i in range(len(filedata)):
if filedata[i].lstrip().startswith('#'):
replaceandrun('remove preprocessor directive', filedata, i, '')
def removeblocks(filedata):
if len(filedata) < 3:
return filedata
@ -169,7 +178,7 @@ def removeblocks(filedata):
strippedline = filedata[i].strip()
if len(strippedline) == 0:
continue
if ';{}'.find(strippedline[-1]) < 0:
if strippedline[-1] not in ';{}':
continue
i1 = i + 1
@ -201,6 +210,7 @@ def removeblocks(filedata):
return filedata
def removeline(filedata):
stmt = True
for i in range(len(filedata)):
@ -210,13 +220,13 @@ def removeline(filedata):
if len(strippedline) == 0:
continue
if stmt and strippedline[-1] == ';' and checkpar(line) and line.find('{') < 0 and line.find('}') < 0:
if stmt and strippedline[-1] == ';' and checkpar(line) and '{' not in line and '}' not in line:
replaceandrun('remove line', filedata, i, '')
elif stmt and strippedline.find('{') > 0 and strippedline.find('}') == len(strippedline) - 1:
replaceandrun('remove line', filedata, i, '')
if ';{}'.find(strippedline[-1]) >= 0:
if strippedline[-1] in ';{}':
stmt = True
else:
stmt = False
@ -224,7 +234,7 @@ def removeline(filedata):
# reduce..
print('Make sure error can be reproduced...')
if runtool() == False:
if not runtool():
print("Cannot reproduce")
sys.exit(1)

View File

@ -1,10 +1,8 @@
#!/usr/bin/python
#!/usr/bin/env python
import subprocess
import pexpect
import os
import shutil
import time
import sys
START = 0
@ -18,37 +16,37 @@ for arg in sys.argv[1:]:
PASSWORD = arg
def compilecppcheck(CPPFLAGS):
subprocess.call(['nice', 'make', 'clean'])
subprocess.call(['nice', 'make', 'SRCDIR=build', 'CFGDIR=' + os.path.expanduser('~/cppcheck/cfg'), 'CXXFLAGS=-g -O2', 'CPPFLAGS=' + CPPFLAGS])
subprocess.call(['nice', 'make', 'SRCDIR=build', 'CFGDIR=' +
os.path.expanduser('~/cppcheck/cfg'), 'CXXFLAGS=-g -O2', 'CPPFLAGS=' + CPPFLAGS])
subprocess.call(['cp', 'cppcheck', os.path.expanduser('~/daca2/cppcheck-O2')])
def runcppcheck(rev, folder):
subprocess.call(['rm', '-rf', os.path.expanduser('~/daca2/' + folder)])
subprocess.call(['nice', '--adjustment=19', 'python', os.path.expanduser('~/cppcheck/tools/daca2.py'), folder, '--rev=' + rev])
subprocess.call(['nice', '--adjustment=19', 'python',
os.path.expanduser('~/cppcheck/tools/daca2.py'), folder, '--rev=' + rev])
def daca2report(reportfolder):
subprocess.call(['rm', '-rf', reportfolder])
subprocess.call(['mkdir', reportfolder])
subprocess.call(['python', os.path.expanduser('~/cppcheck/tools/daca2-report.py'), reportfolder])
# Upload file to sourceforge server using scp
def upload(localfolder, webfolder):
if len(PASSWORD)<3:
if len(PASSWORD) < 3:
return
try:
child = pexpect.spawn(
'scp -r ' + localfolder + ' danielmarjamaki,cppcheck@web.sf.net:htdocs/' + webfolder)
#child.expect('upload@trac.cppcheck.net\'s password:')
# child.expect('upload@trac.cppcheck.net\'s password:')
child.expect('Password:')
child.sendline(PASSWORD)
child.interact()
except IOError:
pass
except OSError:
pass
except pexpect.TIMEOUT:
except (IOError, OSError, pexpect.TIMEOUT):
pass

View File

@ -1,4 +1,4 @@
#!/usr/bin/python
#!/usr/bin/env python
#
# Cppcheck - A tool for static C/C++ code analysis
# Copyright (C) 2007-2016 Cppcheck team.
@ -31,7 +31,8 @@ class MatchCompilerTest(unittest.TestCase):
self.assertEqual(self.mc.parseMatch(' Token::Match(tok,', 2), None)
# multiline Token::Match is not supported yet
self.assertEqual(self.mc.parseMatch(' Token::Match(Token::findsimplematch(tok,")"), ";")', 2), [
'Token::Match(Token::findsimplematch(tok,")"), ";")', 'Token::findsimplematch(tok,")")', ' ";"']) # inner function call
'Token::Match(Token::findsimplematch(tok,")"), ";")',
'Token::findsimplematch(tok,")")', ' ";"']) # inner function call
def test_replaceTokenMatch(self):
input = 'if (Token::Match(tok, "foobar")) {'
@ -140,12 +141,14 @@ class MatchCompilerTest(unittest.TestCase):
# offset '5' is chosen as an abritary start offset to look for
res = self.mc._parseStringComparison(input, 5)
self.assertEqual(2, len(res))
self.assertEqual('str == MatchCompiler::makeConstString("abc")', input[:res[0]] + "MatchCompiler::makeConstString(" + input[res[0]:res[1]] + ")" + input[res[1]:])
self.assertEqual('str == MatchCompiler::makeConstString("abc")', input[:res[0]] +
"MatchCompiler::makeConstString(" + input[res[0]:res[1]] + ")" + input[res[1]:])
input = 'str == "a\\"b\\"c"'
res = self.mc._parseStringComparison(input, 5)
self.assertEqual(2, len(res))
self.assertEqual('str == MatchCompiler::makeConstString("a\\"b\\"c")', input[:res[0]] + "MatchCompiler::makeConstString(" + input[res[0]:res[1]] + ")" + input[res[1]:])
self.assertEqual('str == MatchCompiler::makeConstString("a\\"b\\"c")', input[:res[0]] +
"MatchCompiler::makeConstString(" + input[res[0]:res[1]] + ")" + input[res[1]:])
def test_replaceCStrings(self):
# str() ==
@ -161,7 +164,9 @@ class MatchCompilerTest(unittest.TestCase):
# strAt()
input = 'if (match16(parent->tokAt(-3)) && tok->strAt(1) == ")")'
output = self.mc._replaceCStrings(input)
self.assertEqual('if (match16(parent->tokAt(-3)) && tok->strAt(1) == MatchCompiler::makeConstString(")"))', output)
self.assertEqual(
'if (match16(parent->tokAt(-3)) && tok->strAt(1) == MatchCompiler::makeConstString(")"))',
output)
if __name__ == '__main__':
unittest.main()

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python3
# Times script using Visual Studio compiler in Windows
#
# This script assumes that you have:
@ -49,7 +49,8 @@ for rev in range(rev1, rev2):
subprocess.call([r'c:\cygwin64\bin\sed.exe', '-i', 's/140/120/', vcxproj])
subprocess.call('msbuild cppcheck.sln /t:build /p:configuration=Release,platform=x64'.split())
print('Revision:' + str(rev))
p = subprocess.Popen(r'bin\cppcheck.exe src -q --showtime=summary'.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p = subprocess.Popen(r'bin\cppcheck.exe src -q --showtime=summary'.split(),
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
comm = p.communicate()
f = open('results.txt', 'at')
f.write('\nREV ' + str(rev) + '\n')

View File

@ -1,5 +1,4 @@
import glob
#!/usr/bin/env python
import sys
import re
@ -22,11 +21,12 @@ fin = open(resultfile, 'rt')
results = fin.read()
fin.close()
out = {}
out['untriaged'] = ''
out['fn'] = ''
out['fp'] = ''
out['tp'] = ''
out = {
'untriaged': '',
'fn': '',
'fp': '',
'tp': ''
}
numberOfFalsePositives = 0
numberOfTruePositives = 0
@ -71,7 +71,7 @@ for result in results.split('\n'):
f = open(project + '/true-positives.txt', 'rt')
for line in f.readlines():
line = line.strip()
if line.find('] -> [') > 0 or line.find('(error)') < 0:
if line.find('] -> [') > 0 or '(error)' not in line:
continue
res = re.match('\\[(' + project + '.+):([0-9]+)\\]:\s+[(][a-z]+[)] (.+)', line)
@ -107,7 +107,8 @@ else:
project2 = project
fout = open('report.html', 'wt')
fout.write('<html><head><title>Cppcheck results for ' + project + '</title><link rel="stylesheet" type="text/css" href="theme1.css"></head><body>\n')
fout.write('<html><head><title>Cppcheck results for ' + project +
'</title><link rel="stylesheet" type="text/css" href="theme1.css"></head><body>\n')
fout.write('<h1>Cppcheck results for ' + project + '</h1>\n')
fout.write('<p>Number of false negatives: ' + str(numberOfFalseNegatives) + '</p>\n')
fout.write('<p>Number of true positives: ' + str(numberOfTruePositives) + '</p>\n')