Improve Python code
This commit is contained in:
parent
62c22a8e06
commit
2dd6168258
|
@ -1,4 +1,4 @@
|
||||||
#/usr/bin/python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# Cert: Some extra CERT checkers
|
# Cert: Some extra CERT checkers
|
||||||
#
|
#
|
||||||
|
@ -43,11 +43,11 @@ def isLocalUnpackedStruct(arg):
|
||||||
|
|
||||||
|
|
||||||
def isBitwiseOp(token):
|
def isBitwiseOp(token):
|
||||||
return token and (token.str in ['&', '|', '^'])
|
return token and (token.str in {'&', '|', '^'})
|
||||||
|
|
||||||
|
|
||||||
def isComparisonOp(token):
|
def isComparisonOp(token):
|
||||||
return token and (token.str in ['==', '!=', '>', '>=', '<', '<='])
|
return token and (token.str in {'==', '!=', '>', '>=', '<', '<='})
|
||||||
|
|
||||||
|
|
||||||
# EXP42-C
|
# EXP42-C
|
||||||
|
@ -66,13 +66,13 @@ def exp42(data):
|
||||||
|
|
||||||
if token.astOperand1.str == 'memcmp' and (isLocalUnpackedStruct(arg1) or isLocalUnpackedStruct(arg2)):
|
if token.astOperand1.str == 'memcmp' and (isLocalUnpackedStruct(arg1) or isLocalUnpackedStruct(arg2)):
|
||||||
reportError(
|
reportError(
|
||||||
token, 'style', 'EXP42-C Comparison of struct padding data (fix either by packing the struct using \'#pragma pack\' or by rewriting the comparison)')
|
token, 'style', "EXP42-C Comparison of struct padding data " +
|
||||||
|
"(fix either by packing the struct using '#pragma pack' or by rewriting the comparison)")
|
||||||
|
|
||||||
|
|
||||||
# EXP46-C
|
# EXP46-C
|
||||||
# Do not use a bitwise operator with a Boolean-like operand
|
# Do not use a bitwise operator with a Boolean-like operand
|
||||||
# int x = (a == b) & c;
|
# int x = (a == b) & c;
|
||||||
|
|
||||||
|
|
||||||
def exp46(data):
|
def exp46(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if isBitwiseOp(token) and (isComparisonOp(token.astOperand1) or isComparisonOp(token.astOperand2)):
|
if isBitwiseOp(token) and (isComparisonOp(token.astOperand1) or isComparisonOp(token.astOperand2)):
|
||||||
|
|
|
@ -37,6 +37,7 @@ class Directive:
|
||||||
self.file = element.get('file')
|
self.file = element.get('file')
|
||||||
self.linenr = element.get('linenr')
|
self.linenr = element.get('linenr')
|
||||||
|
|
||||||
|
|
||||||
class ValueType:
|
class ValueType:
|
||||||
"""
|
"""
|
||||||
ValueType class. Contains (promoted) type information for each node in the AST.
|
ValueType class. Contains (promoted) type information for each node in the AST.
|
||||||
|
@ -70,10 +71,11 @@ class ValueType:
|
||||||
self.typeScope = IdMap[self.typeScopeId]
|
self.typeScope = IdMap[self.typeScopeId]
|
||||||
|
|
||||||
def isIntegral(self):
|
def isIntegral(self):
|
||||||
return self.type == 'bool' or self.type == 'char' or self.type == 'short' or self.type == 'int' or self.type == 'long' or self.type == 'long long'
|
return self.type in {'bool', 'char', 'short', 'int', 'long', 'long long'}
|
||||||
|
|
||||||
def isFloat(self):
|
def isFloat(self):
|
||||||
return self.type == 'float' or self.type == 'double' or self.type == 'long double'
|
return self.type in {'float', 'double', 'long double'}
|
||||||
|
|
||||||
|
|
||||||
class Token:
|
class Token:
|
||||||
"""
|
"""
|
||||||
|
@ -129,7 +131,6 @@ class Token:
|
||||||
@endcode
|
@endcode
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
Id = None
|
Id = None
|
||||||
str = None
|
str = None
|
||||||
next = None
|
next = None
|
||||||
|
@ -302,7 +303,6 @@ class Scope:
|
||||||
self.nestedIn = IdMap[self.nestedInId]
|
self.nestedIn = IdMap[self.nestedInId]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class Function:
|
class Function:
|
||||||
"""
|
"""
|
||||||
Information about a function
|
Information about a function
|
||||||
|
@ -391,7 +391,6 @@ class Variable:
|
||||||
self.typeEndToken = IdMap[self.typeEndTokenId]
|
self.typeEndToken = IdMap[self.typeEndTokenId]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class ValueFlow:
|
class ValueFlow:
|
||||||
"""
|
"""
|
||||||
ValueFlow::Value class
|
ValueFlow::Value class
|
||||||
|
@ -404,7 +403,6 @@ class ValueFlow:
|
||||||
values Possible values
|
values Possible values
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
Id = None
|
Id = None
|
||||||
values = None
|
values = None
|
||||||
|
|
||||||
|
@ -438,7 +436,6 @@ class ValueFlow:
|
||||||
self.values.append(ValueFlow.Value(value))
|
self.values.append(ValueFlow.Value(value))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class Configuration:
|
class Configuration:
|
||||||
"""
|
"""
|
||||||
Configuration class
|
Configuration class
|
||||||
|
@ -619,20 +616,16 @@ class CppcheckData:
|
||||||
tok = Token(node)
|
tok = Token(node)
|
||||||
tok.file = files[int(node.get('fileIndex'))]
|
tok.file = files[int(node.get('fileIndex'))]
|
||||||
self.rawTokens.append(tok)
|
self.rawTokens.append(tok)
|
||||||
for i in range(len(self.rawTokens)):
|
for i in range(len(self.rawTokens) - 1):
|
||||||
if i > 0:
|
self.rawTokens[i + 1].previous = self.rawTokens[i]
|
||||||
self.rawTokens[i].previous = self.rawTokens[i-1]
|
self.rawTokens[i].next = self.rawTokens[i + 1]
|
||||||
if i + 1 < len(self.rawTokens):
|
|
||||||
self.rawTokens[i].next = self.rawTokens[i+1]
|
|
||||||
|
|
||||||
|
|
||||||
# root is 'dumps' node, each config has its own 'dump' subnode.
|
# root is 'dumps' node, each config has its own 'dump' subnode.
|
||||||
for cfgnode in data.getroot():
|
for cfgnode in data.getroot():
|
||||||
if cfgnode.tag=='dump':
|
if cfgnode.tag == 'dump':
|
||||||
self.configurations.append(Configuration(cfgnode))
|
self.configurations.append(Configuration(cfgnode))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def parsedump(filename):
|
def parsedump(filename):
|
||||||
"""
|
"""
|
||||||
parse a cppcheck dump file
|
parse a cppcheck dump file
|
||||||
|
@ -640,7 +633,6 @@ def parsedump(filename):
|
||||||
return CppcheckData(filename)
|
return CppcheckData(filename)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def astIsFloat(token):
|
def astIsFloat(token):
|
||||||
"""
|
"""
|
||||||
Check if type of ast node is float/double
|
Check if type of ast node is float/double
|
||||||
|
@ -651,9 +643,7 @@ def astIsFloat(token):
|
||||||
if token.str == '.':
|
if token.str == '.':
|
||||||
return astIsFloat(token.astOperand2)
|
return astIsFloat(token.astOperand2)
|
||||||
if '+-*/%'.find(token.str) == 0:
|
if '+-*/%'.find(token.str) == 0:
|
||||||
if True == astIsFloat(token.astOperand1):
|
return astIsFloat(token.astOperand1) or astIsFloat(token.astOperand2)
|
||||||
return True
|
|
||||||
return astIsFloat(token.astOperand2)
|
|
||||||
if not token.variable:
|
if not token.variable:
|
||||||
# float literal?
|
# float literal?
|
||||||
if token.str[0].isdigit():
|
if token.str[0].isdigit():
|
||||||
|
@ -682,6 +672,7 @@ class CppCheckFormatter(argparse.HelpFormatter):
|
||||||
return text[2:].splitlines()
|
return text[2:].splitlines()
|
||||||
return argparse.HelpFormatter._split_lines(self, text, width)
|
return argparse.HelpFormatter._split_lines(self, text, width)
|
||||||
|
|
||||||
|
|
||||||
def ArgumentParser():
|
def ArgumentParser():
|
||||||
"""
|
"""
|
||||||
Returns an argparse argument parser with an already-added
|
Returns an argparse argument parser with an already-added
|
||||||
|
@ -697,6 +688,7 @@ def ArgumentParser():
|
||||||
"Pre-defined templates: gcc, vs, edit")
|
"Pre-defined templates: gcc, vs, edit")
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
||||||
def reportError(template, callstack=[], severity='', message='', id=''):
|
def reportError(template, callstack=[], severity='', message='', id=''):
|
||||||
"""
|
"""
|
||||||
Format an error message according to the template.
|
Format an error message according to the template.
|
||||||
|
@ -715,7 +707,7 @@ def reportError(template, callstack=[], severity='', message='', id=''):
|
||||||
elif template == 'edit':
|
elif template == 'edit':
|
||||||
template = '{file} +{line}: {severity}: {message}'
|
template = '{file} +{line}: {severity}: {message}'
|
||||||
# compute 'callstack}, {file} and {line} replacements
|
# compute 'callstack}, {file} and {line} replacements
|
||||||
stack = ' -> '.join(['[' + f + ':' + str(l) + ']' for (f, l) in callstack])
|
stack = ' -> '.join('[' + f + ':' + str(l) + ']' for (f, l) in callstack)
|
||||||
file = callstack[-1][0]
|
file = callstack[-1][0]
|
||||||
line = str(callstack[-1][1])
|
line = str(callstack[-1][1])
|
||||||
# format message
|
# format message
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#/usr/bin/python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# Locate casts in the code
|
# Locate casts in the code
|
||||||
#
|
#
|
||||||
|
@ -6,7 +6,7 @@
|
||||||
import cppcheckdata
|
import cppcheckdata
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
messages = []
|
messages = set()
|
||||||
|
|
||||||
for arg in sys.argv[1:]:
|
for arg in sys.argv[1:]:
|
||||||
print('Checking ' + arg + '...')
|
print('Checking ' + arg + '...')
|
||||||
|
@ -35,6 +35,6 @@ for arg in sys.argv[1:]:
|
||||||
|
|
||||||
msg = '[' + token.file + ':' + str(
|
msg = '[' + token.file + ':' + str(
|
||||||
token.linenr) + '] (information) findcasts.py: found a cast\n'
|
token.linenr) + '] (information) findcasts.py: found a cast\n'
|
||||||
if not msg in messages:
|
if msg not in messages:
|
||||||
messages.append(msg)
|
messages.add(msg)
|
||||||
sys.stderr.write(msg)
|
sys.stderr.write(msg)
|
||||||
|
|
363
addons/misra.py
363
addons/misra.py
|
@ -1,4 +1,4 @@
|
||||||
#/usr/bin/python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# MISRA C 2012 checkers
|
# MISRA C 2012 checkers
|
||||||
#
|
#
|
||||||
|
@ -15,12 +15,13 @@ import cppcheckdata
|
||||||
import sys
|
import sys
|
||||||
import re
|
import re
|
||||||
|
|
||||||
ruleTexts={}
|
ruleTexts = {}
|
||||||
|
|
||||||
VERIFY = False
|
VERIFY = False
|
||||||
VERIFY_EXPECTED = []
|
VERIFY_EXPECTED = []
|
||||||
VERIFY_ACTUAL = []
|
VERIFY_ACTUAL = []
|
||||||
|
|
||||||
|
|
||||||
def reportError(location, num1, num2):
|
def reportError(location, num1, num2):
|
||||||
if VERIFY:
|
if VERIFY:
|
||||||
VERIFY_ACTUAL.append(str(location.linenr) + ':' + str(num1) + '.' + str(num2))
|
VERIFY_ACTUAL.append(str(location.linenr) + ':' + str(num1) + '.' + str(num2))
|
||||||
|
@ -28,11 +29,13 @@ def reportError(location, num1, num2):
|
||||||
errmsg = None
|
errmsg = None
|
||||||
num = num1 * 100 + num2
|
num = num1 * 100 + num2
|
||||||
if num in ruleTexts:
|
if num in ruleTexts:
|
||||||
errmsg = ruleTexts[num] + ' [misra-c2012-'+str(num1)+'.'+str(num2)+']'
|
errmsg = ruleTexts[num] + ' [misra-c2012-' + str(num1) + '.' + str(num2) + ']'
|
||||||
else:
|
else:
|
||||||
errmsg = 'misra rule ' + str(num1) + '.' + str(num2) + ' violation (use --rule-texts=<file> to get proper output)'
|
errmsg = 'misra rule ' + str(num1) + '.' + str(num2) +\
|
||||||
|
' violation (use --rule-texts=<file> to get proper output)'
|
||||||
sys.stderr.write('[' + location.file + ':' + str(location.linenr) + '] ' + errmsg + '\n')
|
sys.stderr.write('[' + location.file + ':' + str(location.linenr) + '] ' + errmsg + '\n')
|
||||||
|
|
||||||
|
|
||||||
def simpleMatch(token, pattern):
|
def simpleMatch(token, pattern):
|
||||||
for p in pattern.split(' '):
|
for p in pattern.split(' '):
|
||||||
if not token or token.str != p:
|
if not token or token.str != p:
|
||||||
|
@ -48,38 +51,41 @@ LONG_BIT = 0
|
||||||
LONG_LONG_BIT = 0
|
LONG_LONG_BIT = 0
|
||||||
POINTER_BIT = 0
|
POINTER_BIT = 0
|
||||||
|
|
||||||
KEYWORDS = ['auto',
|
KEYWORDS = {
|
||||||
'break',
|
'auto',
|
||||||
'case',
|
'break',
|
||||||
'char',
|
'case',
|
||||||
'const',
|
'char',
|
||||||
'continue',
|
'const',
|
||||||
'default',
|
'continue',
|
||||||
'do',
|
'default',
|
||||||
'double',
|
'do',
|
||||||
'else',
|
'double',
|
||||||
'enum',
|
'else',
|
||||||
'extern',
|
'enum',
|
||||||
'float',
|
'extern',
|
||||||
'for',
|
'float',
|
||||||
'goto',
|
'for',
|
||||||
'if',
|
'goto',
|
||||||
'int',
|
'if',
|
||||||
'long',
|
'int',
|
||||||
'register',
|
'long',
|
||||||
'return',
|
'register',
|
||||||
'short',
|
'return',
|
||||||
'signed',
|
'short',
|
||||||
'sizeof',
|
'signed',
|
||||||
'static',
|
'sizeof',
|
||||||
'struct',
|
'static',
|
||||||
'switch',
|
'struct',
|
||||||
'typedef',
|
'switch',
|
||||||
'union',
|
'typedef',
|
||||||
'unsigned',
|
'union',
|
||||||
'void',
|
'unsigned',
|
||||||
'volatile',
|
'void',
|
||||||
'while']
|
'volatile',
|
||||||
|
'while'
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def getEssentialType(expr):
|
def getEssentialType(expr):
|
||||||
if not expr:
|
if not expr:
|
||||||
|
@ -87,11 +93,11 @@ def getEssentialType(expr):
|
||||||
if expr.variable:
|
if expr.variable:
|
||||||
typeToken = expr.variable.typeStartToken
|
typeToken = expr.variable.typeStartToken
|
||||||
while typeToken and typeToken.isName:
|
while typeToken and typeToken.isName:
|
||||||
if typeToken.str in ['char', 'short', 'int', 'long', 'float', 'double']:
|
if typeToken.str in {'char', 'short', 'int', 'long', 'float', 'double'}:
|
||||||
return typeToken.str
|
return typeToken.str
|
||||||
typeToken = typeToken.next
|
typeToken = typeToken.next
|
||||||
|
|
||||||
elif expr.astOperand1 and expr.astOperand2 and expr.str in ['+', '-', '*', '/', '%', '&', '|', '^']:
|
elif expr.astOperand1 and expr.astOperand2 and expr.str in {'+', '-', '*', '/', '%', '&', '|', '^'}:
|
||||||
e1 = getEssentialType(expr.astOperand1)
|
e1 = getEssentialType(expr.astOperand1)
|
||||||
e2 = getEssentialType(expr.astOperand2)
|
e2 = getEssentialType(expr.astOperand2)
|
||||||
if not e1 or not e2:
|
if not e1 or not e2:
|
||||||
|
@ -108,6 +114,7 @@ def getEssentialType(expr):
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def bitsOfEssentialType(expr):
|
def bitsOfEssentialType(expr):
|
||||||
type = getEssentialType(expr)
|
type = getEssentialType(expr)
|
||||||
if type is None:
|
if type is None:
|
||||||
|
@ -124,13 +131,15 @@ def bitsOfEssentialType(expr):
|
||||||
return LONG_LONG_BIT
|
return LONG_LONG_BIT
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
|
|
||||||
def isCast(expr):
|
def isCast(expr):
|
||||||
if not expr or expr.str != '(' or not expr.astOperand1 or expr.astOperand2:
|
if not expr or expr.str != '(' or not expr.astOperand1 or expr.astOperand2:
|
||||||
return False
|
return False
|
||||||
if simpleMatch(expr,'( )'):
|
if simpleMatch(expr, '( )'):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def isFunctionCall(expr):
|
def isFunctionCall(expr):
|
||||||
if not expr:
|
if not expr:
|
||||||
return False
|
return False
|
||||||
|
@ -142,14 +151,16 @@ def isFunctionCall(expr):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def countSideEffects(expr):
|
def countSideEffects(expr):
|
||||||
if not expr or expr.str in [',', ';']:
|
if not expr or expr.str in {',', ';'}:
|
||||||
return 0
|
return 0
|
||||||
ret = 0
|
ret = 0
|
||||||
if expr.str in ['++', '--', '=']:
|
if expr.str in {'++', '--', '='}:
|
||||||
ret = 1
|
ret = 1
|
||||||
return ret + countSideEffects(expr.astOperand1) + countSideEffects(expr.astOperand2)
|
return ret + countSideEffects(expr.astOperand1) + countSideEffects(expr.astOperand2)
|
||||||
|
|
||||||
|
|
||||||
def getForLoopExpressions(forToken):
|
def getForLoopExpressions(forToken):
|
||||||
if not forToken or forToken.str != 'for':
|
if not forToken or forToken.str != 'for':
|
||||||
return None
|
return None
|
||||||
|
@ -160,7 +171,9 @@ def getForLoopExpressions(forToken):
|
||||||
return None
|
return None
|
||||||
if not lpar.astOperand2.astOperand2 or lpar.astOperand2.astOperand2.str != ';':
|
if not lpar.astOperand2.astOperand2 or lpar.astOperand2.astOperand2.str != ';':
|
||||||
return None
|
return None
|
||||||
return [lpar.astOperand2.astOperand1, lpar.astOperand2.astOperand2.astOperand1, lpar.astOperand2.astOperand2.astOperand2]
|
return [lpar.astOperand2.astOperand1,
|
||||||
|
lpar.astOperand2.astOperand2.astOperand1,
|
||||||
|
lpar.astOperand2.astOperand2.astOperand2]
|
||||||
|
|
||||||
|
|
||||||
def hasFloatComparison(expr):
|
def hasFloatComparison(expr):
|
||||||
|
@ -173,6 +186,7 @@ def hasFloatComparison(expr):
|
||||||
return cppcheckdata.astIsFloat(expr.astOperand1) or cppcheckdata.astIsFloat(expr.astOperand2)
|
return cppcheckdata.astIsFloat(expr.astOperand1) or cppcheckdata.astIsFloat(expr.astOperand2)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def hasSideEffectsRecursive(expr):
|
def hasSideEffectsRecursive(expr):
|
||||||
if not expr:
|
if not expr:
|
||||||
return False
|
return False
|
||||||
|
@ -180,13 +194,15 @@ def hasSideEffectsRecursive(expr):
|
||||||
prev = expr.astOperand1.previous
|
prev = expr.astOperand1.previous
|
||||||
if prev and (prev.str == '{' or prev.str == '{'):
|
if prev and (prev.str == '{' or prev.str == '{'):
|
||||||
return hasSideEffectsRecursive(expr.astOperand2)
|
return hasSideEffectsRecursive(expr.astOperand2)
|
||||||
if expr.str in ['++', '--', '=']:
|
if expr.str in {'++', '--', '='}:
|
||||||
return True
|
return True
|
||||||
# Todo: Check function calls
|
# Todo: Check function calls
|
||||||
return hasSideEffectsRecursive(expr.astOperand1) or hasSideEffectsRecursive(expr.astOperand2)
|
return hasSideEffectsRecursive(expr.astOperand1) or hasSideEffectsRecursive(expr.astOperand2)
|
||||||
|
|
||||||
|
|
||||||
def isBoolExpression(expr):
|
def isBoolExpression(expr):
|
||||||
return expr and expr.str in ['!', '==', '!=', '<', '<=', '>', '>=', '&&', '||']
|
return expr and expr.str in {'!', '==', '!=', '<', '<=', '>', '>=', '&&', '||'}
|
||||||
|
|
||||||
|
|
||||||
def isConstantExpression(expr):
|
def isConstantExpression(expr):
|
||||||
if expr.isNumber:
|
if expr.isNumber:
|
||||||
|
@ -201,30 +217,32 @@ def isConstantExpression(expr):
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def isUnsignedInt(expr):
|
def isUnsignedInt(expr):
|
||||||
# TODO this function is very incomplete. use ValueType?
|
# TODO this function is very incomplete. use ValueType?
|
||||||
if not expr:
|
if not expr:
|
||||||
return False
|
return False
|
||||||
if expr.isNumber:
|
if expr.isNumber:
|
||||||
return expr.str.find('u')>0 or expr.str.find('U')>0
|
return expr.str.find('u') > 0 or expr.str.find('U') > 0
|
||||||
if expr.str in ['+','-','*','/','%']:
|
if expr.str in {'+', '-', '*', '/', '%'}:
|
||||||
return isUnsignedInt(expr.astOperand1) or isUnsignedInt(expr.astOperand2)
|
return isUnsignedInt(expr.astOperand1) or isUnsignedInt(expr.astOperand2)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def getPrecedence(expr):
|
def getPrecedence(expr):
|
||||||
if not expr:
|
if not expr:
|
||||||
return 16
|
return 16
|
||||||
if not expr.astOperand1 or not expr.astOperand2:
|
if not expr.astOperand1 or not expr.astOperand2:
|
||||||
return 16
|
return 16
|
||||||
if expr.str in ['*', '/', '%']:
|
if expr.str in {'*', '/', '%'}:
|
||||||
return 12
|
return 12
|
||||||
if expr.str in ['+', '-']:
|
if expr.str in {'+', '-'}:
|
||||||
return 11
|
return 11
|
||||||
if expr.str in ['<<', '>>']:
|
if expr.str in {'<<', '>>'}:
|
||||||
return 10
|
return 10
|
||||||
if expr.str in ['<', '>', '<=', '>=']:
|
if expr.str in {'<', '>', '<=', '>='}:
|
||||||
return 9
|
return 9
|
||||||
if expr.str in ['==', '!=']:
|
if expr.str in {'==', '!='}:
|
||||||
return 8
|
return 8
|
||||||
if expr.str == '&':
|
if expr.str == '&':
|
||||||
return 7
|
return 7
|
||||||
|
@ -236,7 +254,7 @@ def getPrecedence(expr):
|
||||||
return 4
|
return 4
|
||||||
if expr.str == '||':
|
if expr.str == '||':
|
||||||
return 3
|
return 3
|
||||||
if expr.str in ['?',':']:
|
if expr.str in {'?', ':'}:
|
||||||
return 2
|
return 2
|
||||||
if expr.isAssignmentOp:
|
if expr.isAssignmentOp:
|
||||||
return 1
|
return 1
|
||||||
|
@ -244,6 +262,7 @@ def getPrecedence(expr):
|
||||||
return 0
|
return 0
|
||||||
return -1
|
return -1
|
||||||
|
|
||||||
|
|
||||||
def noParentheses(tok1, tok2):
|
def noParentheses(tok1, tok2):
|
||||||
while tok1 and tok1 != tok2:
|
while tok1 and tok1 != tok2:
|
||||||
if tok1.str == '(' or tok1.str == ')':
|
if tok1.str == '(' or tok1.str == ')':
|
||||||
|
@ -251,6 +270,7 @@ def noParentheses(tok1, tok2):
|
||||||
tok1 = tok1.next
|
tok1 = tok1.next
|
||||||
return tok1 == tok2
|
return tok1 == tok2
|
||||||
|
|
||||||
|
|
||||||
def findGotoLabel(gotoToken):
|
def findGotoLabel(gotoToken):
|
||||||
label = gotoToken.next.str
|
label = gotoToken.next.str
|
||||||
tok = gotoToken.next.next
|
tok = gotoToken.next.next
|
||||||
|
@ -262,42 +282,46 @@ def findGotoLabel(gotoToken):
|
||||||
tok = tok.next
|
tok = tok.next
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def findInclude(directives, header):
|
def findInclude(directives, header):
|
||||||
for directive in directives:
|
for directive in directives:
|
||||||
if directive.str == '#include ' + header:
|
if directive.str == '#include ' + header:
|
||||||
return directive
|
return directive
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def misra_3_1(rawTokens):
|
def misra_3_1(rawTokens):
|
||||||
for token in rawTokens:
|
for token in rawTokens:
|
||||||
if token.str.startswith('/*') or token.str.startswith('//'):
|
if token.str.startswith('/*') or token.str.startswith('//'):
|
||||||
if token.str[2:].find('//')>=0 or token.str[2:].find('/*')>=0:
|
if '//' in token.str[2:] or '/*' in token.str[2:]:
|
||||||
reportError(token, 3, 1)
|
reportError(token, 3, 1)
|
||||||
|
|
||||||
|
|
||||||
def misra_5_1(data):
|
def misra_5_1(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.isName and len(token.str) > 31:
|
if token.isName and len(token.str) > 31:
|
||||||
reportError(token, 5, 1)
|
reportError(token, 5, 1)
|
||||||
|
|
||||||
|
|
||||||
def misra_5_3(data):
|
def misra_5_3(data):
|
||||||
scopeVars = {}
|
scopeVars = {}
|
||||||
for var in data.variables:
|
for var in data.variables:
|
||||||
if var.isArgument:
|
if var.isArgument:
|
||||||
# TODO
|
# TODO
|
||||||
continue
|
continue
|
||||||
if not var.nameToken.scope in scopeVars:
|
if var.nameToken.scope not in scopeVars:
|
||||||
scopeVars[var.nameToken.scope] = []
|
scopeVars[var.nameToken.scope] = []
|
||||||
scopeVars[var.nameToken.scope].append(var)
|
scopeVars[var.nameToken.scope].append(var)
|
||||||
|
|
||||||
for innerScope in data.scopes:
|
for innerScope in data.scopes:
|
||||||
if innerScope.type == 'Global':
|
if innerScope.type == 'Global':
|
||||||
continue
|
continue
|
||||||
if not innerScope in scopeVars:
|
if innerScope not in scopeVars:
|
||||||
continue
|
continue
|
||||||
for innerVar in scopeVars[innerScope]:
|
for innerVar in scopeVars[innerScope]:
|
||||||
outerScope = innerScope.nestedIn
|
outerScope = innerScope.nestedIn
|
||||||
while outerScope:
|
while outerScope:
|
||||||
if not outerScope in scopeVars:
|
if outerScope not in scopeVars:
|
||||||
outerScope = outerScope.nestedIn
|
outerScope = outerScope.nestedIn
|
||||||
continue
|
continue
|
||||||
found = False
|
found = False
|
||||||
|
@ -310,11 +334,13 @@ def misra_5_3(data):
|
||||||
break
|
break
|
||||||
outerScope = outerScope.nestedIn
|
outerScope = outerScope.nestedIn
|
||||||
|
|
||||||
|
|
||||||
def misra_5_4(data):
|
def misra_5_4(data):
|
||||||
for dir in data.directives:
|
for dir in data.directives:
|
||||||
if re.match(r'#define [a-zA-Z0-9_]{64,}', dir.str):
|
if re.match(r'#define [a-zA-Z0-9_]{64,}', dir.str):
|
||||||
reportError(dir, 5, 4)
|
reportError(dir, 5, 4)
|
||||||
|
|
||||||
|
|
||||||
def misra_5_5(data):
|
def misra_5_5(data):
|
||||||
macroNames = []
|
macroNames = []
|
||||||
for dir in data.directives:
|
for dir in data.directives:
|
||||||
|
@ -325,21 +351,25 @@ def misra_5_5(data):
|
||||||
if var.nameToken.str in macroNames:
|
if var.nameToken.str in macroNames:
|
||||||
reportError(var.nameToken, 5, 5)
|
reportError(var.nameToken, 5, 5)
|
||||||
|
|
||||||
|
|
||||||
def misra_7_1(rawTokens):
|
def misra_7_1(rawTokens):
|
||||||
for tok in rawTokens:
|
for tok in rawTokens:
|
||||||
if re.match(r'^0[0-7]+$', tok.str):
|
if re.match(r'^0[0-7]+$', tok.str):
|
||||||
reportError(tok, 7, 1)
|
reportError(tok, 7, 1)
|
||||||
|
|
||||||
|
|
||||||
def misra_7_3(rawTokens):
|
def misra_7_3(rawTokens):
|
||||||
for tok in rawTokens:
|
for tok in rawTokens:
|
||||||
if re.match(r'^[0-9]+l', tok.str):
|
if re.match(r'^[0-9]+l', tok.str):
|
||||||
reportError(tok, 7, 3)
|
reportError(tok, 7, 3)
|
||||||
|
|
||||||
|
|
||||||
def misra_8_11(data):
|
def misra_8_11(data):
|
||||||
for var in data.variables:
|
for var in data.variables:
|
||||||
if var.isExtern and simpleMatch(var.nameToken.next, '[ ]') and var.nameToken.scope.type == 'Global':
|
if var.isExtern and simpleMatch(var.nameToken.next, '[ ]') and var.nameToken.scope.type == 'Global':
|
||||||
reportError(var.nameToken, 8, 11)
|
reportError(var.nameToken, 8, 11)
|
||||||
|
|
||||||
|
|
||||||
def misra_8_12(data):
|
def misra_8_12(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str != '{':
|
if token.str != '{':
|
||||||
|
@ -353,26 +383,29 @@ def misra_8_12(data):
|
||||||
break
|
break
|
||||||
if etok.str == '=':
|
if etok.str == '=':
|
||||||
rhsValues = etok.astOperand2.values
|
rhsValues = etok.astOperand2.values
|
||||||
if rhsValues and len(rhsValues)==1:
|
if rhsValues and len(rhsValues) == 1:
|
||||||
if rhsValues[0].intvalue in values:
|
if rhsValues[0].intvalue in values:
|
||||||
reportError(etok, 8, 12)
|
reportError(etok, 8, 12)
|
||||||
break
|
break
|
||||||
values.append(rhsValues[0].intvalue)
|
values.append(rhsValues[0].intvalue)
|
||||||
etok = etok.next
|
etok = etok.next
|
||||||
|
|
||||||
|
|
||||||
def misra_8_14(rawTokens):
|
def misra_8_14(rawTokens):
|
||||||
for token in rawTokens:
|
for token in rawTokens:
|
||||||
if token.str == 'restrict':
|
if token.str == 'restrict':
|
||||||
reportError(token, 8, 14)
|
reportError(token, 8, 14)
|
||||||
|
|
||||||
|
|
||||||
def misra_9_5(rawTokens):
|
def misra_9_5(rawTokens):
|
||||||
for token in rawTokens:
|
for token in rawTokens:
|
||||||
if simpleMatch(token, '[ ] = { ['):
|
if simpleMatch(token, '[ ] = { ['):
|
||||||
reportError(token, 9, 5)
|
reportError(token, 9, 5)
|
||||||
|
|
||||||
|
|
||||||
def misra_10_4(data):
|
def misra_10_4(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if not token.str in ['+','-','*','/','%','&','|','^'] and not token.isComparisonOp:
|
if token.str not in {'+', '-', '*', '/', '%', '&', '|', '^'} and not token.isComparisonOp:
|
||||||
continue
|
continue
|
||||||
if not token.astOperand1 or not token.astOperand2:
|
if not token.astOperand1 or not token.astOperand2:
|
||||||
continue
|
continue
|
||||||
|
@ -385,15 +418,16 @@ def misra_10_4(data):
|
||||||
if e1 and e2 and e1 != e2:
|
if e1 and e2 and e1 != e2:
|
||||||
reportError(token, 10, 4)
|
reportError(token, 10, 4)
|
||||||
|
|
||||||
|
|
||||||
def misra_10_6(data):
|
def misra_10_6(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str != '=' or not token.astOperand1 or not token.astOperand2:
|
if token.str != '=' or not token.astOperand1 or not token.astOperand2:
|
||||||
continue
|
continue
|
||||||
vt1 = token.astOperand1.valueType
|
vt1 = token.astOperand1.valueType
|
||||||
vt2 = token.astOperand2.valueType
|
vt2 = token.astOperand2.valueType
|
||||||
if not vt1 or vt1.pointer>0:
|
if not vt1 or vt1.pointer > 0:
|
||||||
continue
|
continue
|
||||||
if not vt2 or vt2.pointer>0:
|
if not vt2 or vt2.pointer > 0:
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
intTypes = ['char', 'short', 'int', 'long', 'long long']
|
intTypes = ['char', 'short', 'int', 'long', 'long long']
|
||||||
|
@ -407,13 +441,14 @@ def misra_10_6(data):
|
||||||
except ValueError:
|
except ValueError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def misra_10_8(data):
|
def misra_10_8(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if not isCast(token):
|
if not isCast(token):
|
||||||
continue
|
continue
|
||||||
if not token.valueType or token.valueType.pointer>0:
|
if not token.valueType or token.valueType.pointer > 0:
|
||||||
continue
|
continue
|
||||||
if not token.astOperand1.valueType or token.astOperand1.valueType.pointer>0:
|
if not token.astOperand1.valueType or token.astOperand1.valueType.pointer > 0:
|
||||||
continue
|
continue
|
||||||
if not token.astOperand1.astOperand1:
|
if not token.astOperand1.astOperand1:
|
||||||
continue
|
continue
|
||||||
|
@ -429,6 +464,7 @@ def misra_10_8(data):
|
||||||
except ValueError:
|
except ValueError:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def misra_11_3(data):
|
def misra_11_3(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if not isCast(token):
|
if not isCast(token):
|
||||||
|
@ -437,9 +473,11 @@ def misra_11_3(data):
|
||||||
vt2 = token.astOperand1.valueType
|
vt2 = token.astOperand1.valueType
|
||||||
if not vt1 or not vt2:
|
if not vt1 or not vt2:
|
||||||
continue
|
continue
|
||||||
if vt1.pointer==vt2.pointer and vt1.pointer>0 and vt1.type != vt2.type and vt1.isIntegral() and vt2.isIntegral() and vt1.type != 'char':
|
if vt1.pointer == vt2.pointer and vt1.pointer > 0 and vt1.type != vt2.type and\
|
||||||
|
vt1.isIntegral() and vt2.isIntegral() and vt1.type != 'char':
|
||||||
reportError(token, 11, 3)
|
reportError(token, 11, 3)
|
||||||
|
|
||||||
|
|
||||||
def misra_11_4(data):
|
def misra_11_4(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if not isCast(token):
|
if not isCast(token):
|
||||||
|
@ -448,9 +486,10 @@ def misra_11_4(data):
|
||||||
vt2 = token.astOperand1.valueType
|
vt2 = token.astOperand1.valueType
|
||||||
if not vt1 or not vt2:
|
if not vt1 or not vt2:
|
||||||
continue
|
continue
|
||||||
if vt1.pointer==0 and vt2.pointer>0 and vt2.type != 'void':
|
if vt1.pointer == 0 and vt2.pointer > 0 and vt2.type != 'void':
|
||||||
reportError(token, 11, 4)
|
reportError(token, 11, 4)
|
||||||
|
|
||||||
|
|
||||||
def misra_11_5(data):
|
def misra_11_5(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if not isCast(token):
|
if not isCast(token):
|
||||||
|
@ -459,9 +498,10 @@ def misra_11_5(data):
|
||||||
vt2 = token.astOperand1.valueType
|
vt2 = token.astOperand1.valueType
|
||||||
if not vt1 or not vt2:
|
if not vt1 or not vt2:
|
||||||
continue
|
continue
|
||||||
if vt1.pointer>0 and vt1.type != 'void' and vt2.pointer==vt1.pointer and vt2.type == 'void':
|
if vt1.pointer > 0 and vt1.type != 'void' and vt2.pointer == vt1.pointer and vt2.type == 'void':
|
||||||
reportError(token, 11, 5)
|
reportError(token, 11, 5)
|
||||||
|
|
||||||
|
|
||||||
def misra_11_6(data):
|
def misra_11_6(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if not isCast(token):
|
if not isCast(token):
|
||||||
|
@ -470,11 +510,12 @@ def misra_11_6(data):
|
||||||
vt2 = token.astOperand1.valueType
|
vt2 = token.astOperand1.valueType
|
||||||
if not vt1 or not vt2:
|
if not vt1 or not vt2:
|
||||||
continue
|
continue
|
||||||
if vt1.pointer==1 and vt1.type=='void' and vt2.pointer==0:
|
if vt1.pointer == 1 and vt1.type == 'void' and vt2.pointer == 0:
|
||||||
reportError(token, 11, 6)
|
reportError(token, 11, 6)
|
||||||
elif vt1.pointer==0 and vt2.pointer==1 and vt2.type=='void':
|
elif vt1.pointer == 0 and vt2.pointer == 1 and vt2.type == 'void':
|
||||||
reportError(token, 11, 6)
|
reportError(token, 11, 6)
|
||||||
|
|
||||||
|
|
||||||
def misra_11_7(data):
|
def misra_11_7(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if not isCast(token):
|
if not isCast(token):
|
||||||
|
@ -483,20 +524,23 @@ def misra_11_7(data):
|
||||||
vt2 = token.astOperand1.valueType
|
vt2 = token.astOperand1.valueType
|
||||||
if not vt1 or not vt2:
|
if not vt1 or not vt2:
|
||||||
continue
|
continue
|
||||||
if vt1.pointer>0 and vt1.type=='record' and vt2.pointer>0 and vt2.type=='record' and vt1.typeScopeId != vt2.typeScopeId:
|
if vt1.pointer > 0 and vt1.type == 'record' and\
|
||||||
|
vt2.pointer > 0 and vt2.type == 'record' and vt1.typeScopeId != vt2.typeScopeId:
|
||||||
reportError(token, 11, 7)
|
reportError(token, 11, 7)
|
||||||
|
|
||||||
|
|
||||||
def misra_11_8(data):
|
def misra_11_8(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if not isCast(token):
|
if not isCast(token):
|
||||||
continue
|
continue
|
||||||
if not token.valueType or not token.astOperand1.valueType:
|
if not token.valueType or not token.astOperand1.valueType:
|
||||||
continue
|
continue
|
||||||
if token.valueType.pointer==0 or token.valueType.pointer==0:
|
if token.valueType.pointer == 0 or token.valueType.pointer == 0:
|
||||||
continue
|
continue
|
||||||
if token.valueType.constness==0 and token.astOperand1.valueType.constness>0:
|
if token.valueType.constness == 0 and token.astOperand1.valueType.constness > 0:
|
||||||
reportError(token, 11, 8)
|
reportError(token, 11, 8)
|
||||||
|
|
||||||
|
|
||||||
def misra_11_9(data):
|
def misra_11_9(data):
|
||||||
for directive in data.directives:
|
for directive in data.directives:
|
||||||
res1 = re.match(r'#define ([A-Za-z_][A-Za-z_0-9]*) (.*)', directive.str)
|
res1 = re.match(r'#define ([A-Za-z_][A-Za-z_0-9]*) (.*)', directive.str)
|
||||||
|
@ -505,27 +549,29 @@ def misra_11_9(data):
|
||||||
name = res1.group(1)
|
name = res1.group(1)
|
||||||
if name == 'NULL':
|
if name == 'NULL':
|
||||||
continue
|
continue
|
||||||
value = res1.group(2).replace(' ','')
|
value = res1.group(2).replace(' ', '')
|
||||||
if value == '((void*)0)':
|
if value == '((void*)0)':
|
||||||
reportError(directive, 11, 9)
|
reportError(directive, 11, 9)
|
||||||
|
|
||||||
|
|
||||||
def misra_12_1_sizeof(rawTokens):
|
def misra_12_1_sizeof(rawTokens):
|
||||||
state = 0
|
state = 0
|
||||||
for tok in rawTokens:
|
for tok in rawTokens:
|
||||||
if tok.str.startswith('//') or tok.str.startswith('/*'):
|
if tok.str.startswith('//') or tok.str.startswith('/*'):
|
||||||
continue
|
continue
|
||||||
if tok.str == 'sizeof':
|
if tok.str == 'sizeof':
|
||||||
state = 1
|
state = 1
|
||||||
elif state == 1:
|
elif state == 1:
|
||||||
if re.match(r'^[a-zA-Z_]',tok.str):
|
if re.match(r'^[a-zA-Z_]', tok.str):
|
||||||
state = 2
|
state = 2
|
||||||
else:
|
else:
|
||||||
state = 0
|
state = 0
|
||||||
elif state == 2:
|
elif state == 2:
|
||||||
if tok.str in ['+','-','*','/','%']:
|
if tok.str in {'+', '-', '*', '/', '%'}:
|
||||||
reportError(tok, 12, 1)
|
reportError(tok, 12, 1)
|
||||||
else:
|
else:
|
||||||
state = 0
|
state = 0
|
||||||
|
|
||||||
|
|
||||||
def misra_12_1(data):
|
def misra_12_1(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
|
@ -533,7 +579,7 @@ def misra_12_1(data):
|
||||||
if p < 2 or p > 12:
|
if p < 2 or p > 12:
|
||||||
continue
|
continue
|
||||||
p1 = getPrecedence(token.astOperand1)
|
p1 = getPrecedence(token.astOperand1)
|
||||||
if p1 <= 12 and p1 > p and noParentheses(token.astOperand1,token):
|
if p1 <= 12 and p1 > p and noParentheses(token.astOperand1, token):
|
||||||
reportError(token, 12, 1)
|
reportError(token, 12, 1)
|
||||||
continue
|
continue
|
||||||
p2 = getPrecedence(token.astOperand2)
|
p2 = getPrecedence(token.astOperand2)
|
||||||
|
@ -541,31 +587,34 @@ def misra_12_1(data):
|
||||||
reportError(token, 12, 1)
|
reportError(token, 12, 1)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
||||||
def misra_12_2(data):
|
def misra_12_2(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if not (token.str in ['<<','>>']):
|
if not (token.str in {'<<', '>>'}):
|
||||||
continue
|
continue
|
||||||
if (not token.astOperand2) or (not token.astOperand2.values):
|
if (not token.astOperand2) or (not token.astOperand2.values):
|
||||||
continue
|
continue
|
||||||
maxval = 0
|
maxval = 0
|
||||||
for val in token.astOperand2.values:
|
for val in token.astOperand2.values:
|
||||||
if val.intvalue > maxval:
|
if val.intvalue > maxval:
|
||||||
maxval = val.intvalue
|
maxval = val.intvalue
|
||||||
if maxval == 0:
|
if maxval == 0:
|
||||||
continue
|
continue
|
||||||
sz = bitsOfEssentialType(token.astOperand1)
|
sz = bitsOfEssentialType(token.astOperand1)
|
||||||
if sz <= 0:
|
if sz <= 0:
|
||||||
continue
|
continue
|
||||||
if maxval >= sz:
|
if maxval >= sz:
|
||||||
reportError(token, 12, 2)
|
reportError(token, 12, 2)
|
||||||
|
|
||||||
|
|
||||||
def misra_12_3(data):
|
def misra_12_3(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str != ',' or token.scope.type == 'Enum':
|
if token.str != ',' or token.scope.type == 'Enum':
|
||||||
continue
|
continue
|
||||||
if token.astParent and (token.astParent.str in ['(', ',', '{']):
|
if token.astParent and token.astParent.str in {'(', ',', '{'}:
|
||||||
continue
|
continue
|
||||||
reportError(token, 12, 3)
|
reportError(token, 12, 3)
|
||||||
|
|
||||||
|
|
||||||
def misra_12_4(data):
|
def misra_12_4(data):
|
||||||
max_uint = 0
|
max_uint = 0
|
||||||
|
@ -586,23 +635,26 @@ def misra_12_4(data):
|
||||||
reportError(token, 12, 4)
|
reportError(token, 12, 4)
|
||||||
break
|
break
|
||||||
|
|
||||||
|
|
||||||
def misra_13_1(data):
|
def misra_13_1(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str != '=':
|
if token.str != '=':
|
||||||
continue
|
continue
|
||||||
init = token.next
|
init = token.next
|
||||||
if init and init.str == '{' and hasSideEffectsRecursive(init):
|
if init and init.str == '{' and hasSideEffectsRecursive(init):
|
||||||
reportError(init,13,1)
|
reportError(init, 13, 1)
|
||||||
|
|
||||||
|
|
||||||
def misra_13_3(data):
|
def misra_13_3(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if not token.str in ['++', '--']:
|
if token.str not in {'++', '--'}:
|
||||||
continue
|
continue
|
||||||
astTop = token
|
astTop = token
|
||||||
while astTop.astParent and not astTop.astParent.str in [',', ';']:
|
while astTop.astParent and astTop.astParent.str not in {',', ';'}:
|
||||||
astTop = astTop.astParent
|
astTop = astTop.astParent
|
||||||
if countSideEffects(astTop) >= 2:
|
if countSideEffects(astTop) >= 2:
|
||||||
reportError(astTop, 13, 3)
|
reportError(astTop, 13, 3)
|
||||||
|
|
||||||
|
|
||||||
def misra_13_4(data):
|
def misra_13_4(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
|
@ -610,21 +662,24 @@ def misra_13_4(data):
|
||||||
continue
|
continue
|
||||||
if not token.astParent:
|
if not token.astParent:
|
||||||
continue
|
continue
|
||||||
if token.astOperand1.str == '[' and (token.astOperand1.previous.str=='{' or token.astOperand1.previous.str==','):
|
if token.astOperand1.str == '[' and token.astOperand1.previous.str in {'{', ','}:
|
||||||
continue
|
continue
|
||||||
if not (token.astParent.str in [',', ';']):
|
if not (token.astParent.str in [',', ';']):
|
||||||
reportError(token, 13, 4)
|
reportError(token, 13, 4)
|
||||||
|
|
||||||
|
|
||||||
def misra_13_5(data):
|
def misra_13_5(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.isLogicalOp and hasSideEffectsRecursive(token.astOperand2):
|
if token.isLogicalOp and hasSideEffectsRecursive(token.astOperand2):
|
||||||
reportError(token, 13, 5)
|
reportError(token, 13, 5)
|
||||||
|
|
||||||
|
|
||||||
def misra_13_6(data):
|
def misra_13_6(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str == 'sizeof' and hasSideEffectsRecursive(token.next):
|
if token.str == 'sizeof' and hasSideEffectsRecursive(token.next):
|
||||||
reportError(token, 13, 6)
|
reportError(token, 13, 6)
|
||||||
|
|
||||||
|
|
||||||
def misra_14_1(data):
|
def misra_14_1(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str != 'for':
|
if token.str != 'for':
|
||||||
|
@ -633,6 +688,7 @@ def misra_14_1(data):
|
||||||
if exprs and hasFloatComparison(exprs[1]):
|
if exprs and hasFloatComparison(exprs[1]):
|
||||||
reportError(token, 14, 1)
|
reportError(token, 14, 1)
|
||||||
|
|
||||||
|
|
||||||
def misra_14_2(data):
|
def misra_14_2(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
expressions = getForLoopExpressions(token)
|
expressions = getForLoopExpressions(token)
|
||||||
|
@ -648,16 +704,18 @@ def misra_14_4(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str != '(':
|
if token.str != '(':
|
||||||
continue
|
continue
|
||||||
if not token.astOperand1 or not (token.astOperand1.str in ['if', 'while']):
|
if not token.astOperand1 or not (token.astOperand1.str in {'if', 'while'}):
|
||||||
continue
|
continue
|
||||||
if not isBoolExpression(token.astOperand2):
|
if not isBoolExpression(token.astOperand2):
|
||||||
reportError(token, 14, 4)
|
reportError(token, 14, 4)
|
||||||
|
|
||||||
|
|
||||||
def misra_15_1(data):
|
def misra_15_1(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str == "goto":
|
if token.str == "goto":
|
||||||
reportError(token, 15, 1)
|
reportError(token, 15, 1)
|
||||||
|
|
||||||
|
|
||||||
def misra_15_2(data):
|
def misra_15_2(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str != 'goto':
|
if token.str != 'goto':
|
||||||
|
@ -667,6 +725,7 @@ def misra_15_2(data):
|
||||||
if not findGotoLabel(token):
|
if not findGotoLabel(token):
|
||||||
reportError(token, 15, 2)
|
reportError(token, 15, 2)
|
||||||
|
|
||||||
|
|
||||||
def misra_15_3(data):
|
def misra_15_3(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str != 'goto':
|
if token.str != 'goto':
|
||||||
|
@ -682,17 +741,19 @@ def misra_15_3(data):
|
||||||
if not scope:
|
if not scope:
|
||||||
reportError(token, 15, 3)
|
reportError(token, 15, 3)
|
||||||
|
|
||||||
|
|
||||||
def misra_15_5(data):
|
def misra_15_5(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str == 'return' and token.scope.type != 'Function':
|
if token.str == 'return' and token.scope.type != 'Function':
|
||||||
reportError(token, 15, 5)
|
reportError(token, 15, 5)
|
||||||
|
|
||||||
|
|
||||||
def misra_15_6(rawTokens):
|
def misra_15_6(rawTokens):
|
||||||
state = 0
|
state = 0
|
||||||
indent = 0
|
indent = 0
|
||||||
tok1 = None
|
tok1 = None
|
||||||
for token in rawTokens:
|
for token in rawTokens:
|
||||||
if token.str in ['if', 'for', 'while']:
|
if token.str in {'if', 'for', 'while'}:
|
||||||
if simpleMatch(token.previous, '# if'):
|
if simpleMatch(token.previous, '# if'):
|
||||||
continue
|
continue
|
||||||
if simpleMatch(token.previous, "} while"):
|
if simpleMatch(token.previous, "} while"):
|
||||||
|
@ -719,6 +780,7 @@ def misra_15_6(rawTokens):
|
||||||
if token.str != '{':
|
if token.str != '{':
|
||||||
reportError(tok1, 15, 6)
|
reportError(tok1, 15, 6)
|
||||||
|
|
||||||
|
|
||||||
def misra_15_7(data):
|
def misra_15_7(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if not simpleMatch(token, 'if ('):
|
if not simpleMatch(token, 'if ('):
|
||||||
|
@ -730,11 +792,13 @@ def misra_15_7(data):
|
||||||
|
|
||||||
# TODO add 16.1 rule
|
# TODO add 16.1 rule
|
||||||
|
|
||||||
|
|
||||||
def misra_16_2(data):
|
def misra_16_2(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str == 'case' and token.scope.type != 'Switch':
|
if token.str == 'case' and token.scope.type != 'Switch':
|
||||||
reportError(token, 16, 2)
|
reportError(token, 16, 2)
|
||||||
|
|
||||||
|
|
||||||
def misra_16_3(rawTokens):
|
def misra_16_3(rawTokens):
|
||||||
# state: 0=no, 1=break is seen but not its ';', 2=after 'break;', 'comment', '{'
|
# state: 0=no, 1=break is seen but not its ';', 2=after 'break;', 'comment', '{'
|
||||||
state = 0
|
state = 0
|
||||||
|
@ -747,13 +811,14 @@ def misra_16_3(rawTokens):
|
||||||
else:
|
else:
|
||||||
state = 0
|
state = 0
|
||||||
elif token.str.startswith('/*') or token.str.startswith('//'):
|
elif token.str.startswith('/*') or token.str.startswith('//'):
|
||||||
if token.str.lower().find('fallthrough')>0:
|
if token.str.lower().find('fallthrough') > 0:
|
||||||
state = 2
|
state = 2
|
||||||
elif token.str == '{':
|
elif token.str == '{':
|
||||||
state = 2
|
state = 2
|
||||||
elif token.str == 'case' and state != 2:
|
elif token.str == 'case' and state != 2:
|
||||||
reportError(token, 16, 3)
|
reportError(token, 16, 3)
|
||||||
|
|
||||||
|
|
||||||
def misra_16_4(data):
|
def misra_16_4(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str != 'switch':
|
if token.str != 'switch':
|
||||||
|
@ -773,6 +838,7 @@ def misra_16_4(data):
|
||||||
if tok and tok.str != 'default':
|
if tok and tok.str != 'default':
|
||||||
reportError(token, 16, 4)
|
reportError(token, 16, 4)
|
||||||
|
|
||||||
|
|
||||||
def misra_16_5(data):
|
def misra_16_5(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str != 'default':
|
if token.str != 'default':
|
||||||
|
@ -781,7 +847,7 @@ def misra_16_5(data):
|
||||||
continue
|
continue
|
||||||
tok2 = token
|
tok2 = token
|
||||||
while tok2:
|
while tok2:
|
||||||
if tok2.str in ['}', 'case']:
|
if tok2.str in {'}', 'case'}:
|
||||||
break
|
break
|
||||||
if tok2.str == '{':
|
if tok2.str == '{':
|
||||||
tok2 = tok2.link
|
tok2 = tok2.link
|
||||||
|
@ -789,6 +855,7 @@ def misra_16_5(data):
|
||||||
if tok2 and tok2.str == 'case':
|
if tok2 and tok2.str == 'case':
|
||||||
reportError(token, 16, 5)
|
reportError(token, 16, 5)
|
||||||
|
|
||||||
|
|
||||||
def misra_16_6(data):
|
def misra_16_6(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if not (simpleMatch(token, 'switch (') and simpleMatch(token.next.link, ') {')):
|
if not (simpleMatch(token, 'switch (') and simpleMatch(token.next.link, ') {')):
|
||||||
|
@ -800,7 +867,7 @@ def misra_16_6(data):
|
||||||
count = count + 1
|
count = count + 1
|
||||||
elif tok.str == '{':
|
elif tok.str == '{':
|
||||||
tok = tok.link
|
tok = tok.link
|
||||||
if simpleMatch(tok.previous.previous,'break ;'):
|
if simpleMatch(tok.previous.previous, 'break ;'):
|
||||||
count = count + 1
|
count = count + 1
|
||||||
elif tok.str == '}':
|
elif tok.str == '}':
|
||||||
break
|
break
|
||||||
|
@ -808,24 +875,28 @@ def misra_16_6(data):
|
||||||
if count < 2:
|
if count < 2:
|
||||||
reportError(token, 16, 6)
|
reportError(token, 16, 6)
|
||||||
|
|
||||||
|
|
||||||
def misra_16_7(data):
|
def misra_16_7(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if simpleMatch(token, 'switch (') and isBoolExpression(token.next.astOperand2):
|
if simpleMatch(token, 'switch (') and isBoolExpression(token.next.astOperand2):
|
||||||
reportError(token, 16, 7)
|
reportError(token, 16, 7)
|
||||||
|
|
||||||
|
|
||||||
def misra_17_1(data):
|
def misra_17_1(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if isFunctionCall(token) and token.astOperand1.str in ['va_list', 'va_arg', 'va_start', 'va_end' , 'va_copy']:
|
if isFunctionCall(token) and token.astOperand1.str in {'va_list', 'va_arg', 'va_start', 'va_end', 'va_copy'}:
|
||||||
reportError(token, 17, 1)
|
reportError(token, 17, 1)
|
||||||
|
|
||||||
|
|
||||||
def misra_17_6(rawTokens):
|
def misra_17_6(rawTokens):
|
||||||
for token in rawTokens:
|
for token in rawTokens:
|
||||||
if simpleMatch(token, '[ static'):
|
if simpleMatch(token, '[ static'):
|
||||||
reportError(token, 17, 6)
|
reportError(token, 17, 6)
|
||||||
|
|
||||||
|
|
||||||
def misra_17_8(data):
|
def misra_17_8(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if not (token.isAssignmentOp or (token.str in ['++','--'])):
|
if not (token.isAssignmentOp or (token.str in {'++', '--'})):
|
||||||
continue
|
continue
|
||||||
if not token.astOperand1:
|
if not token.astOperand1:
|
||||||
continue
|
continue
|
||||||
|
@ -833,6 +904,7 @@ def misra_17_8(data):
|
||||||
if var and var.isArgument:
|
if var and var.isArgument:
|
||||||
reportError(token, 17, 8)
|
reportError(token, 17, 8)
|
||||||
|
|
||||||
|
|
||||||
def misra_18_5(data):
|
def misra_18_5(data):
|
||||||
for var in data.variables:
|
for var in data.variables:
|
||||||
if not var.isPointer:
|
if not var.isPointer:
|
||||||
|
@ -848,6 +920,7 @@ def misra_18_5(data):
|
||||||
if count > 2:
|
if count > 2:
|
||||||
reportError(var.nameToken, 18, 5)
|
reportError(var.nameToken, 18, 5)
|
||||||
|
|
||||||
|
|
||||||
def misra_18_8(data):
|
def misra_18_8(data):
|
||||||
for var in data.variables:
|
for var in data.variables:
|
||||||
if not var.isArray or not var.isLocal:
|
if not var.isArray or not var.isLocal:
|
||||||
|
@ -859,11 +932,13 @@ def misra_18_8(data):
|
||||||
if not isConstantExpression(typetok.astOperand2):
|
if not isConstantExpression(typetok.astOperand2):
|
||||||
reportError(var.nameToken, 18, 8)
|
reportError(var.nameToken, 18, 8)
|
||||||
|
|
||||||
|
|
||||||
def misra_19_2(data):
|
def misra_19_2(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if token.str == 'union':
|
if token.str == 'union':
|
||||||
reportError(token, 19, 2)
|
reportError(token, 19, 2)
|
||||||
|
|
||||||
|
|
||||||
def misra_20_1(data):
|
def misra_20_1(data):
|
||||||
for directive in data.directives:
|
for directive in data.directives:
|
||||||
if not directive.str.startswith('#include'):
|
if not directive.str.startswith('#include'):
|
||||||
|
@ -875,15 +950,17 @@ def misra_20_1(data):
|
||||||
reportError(directive, 20, 1)
|
reportError(directive, 20, 1)
|
||||||
break
|
break
|
||||||
|
|
||||||
|
|
||||||
def misra_20_2(data):
|
def misra_20_2(data):
|
||||||
for directive in data.directives:
|
for directive in data.directives:
|
||||||
if not directive.str.startswith('#include '):
|
if not directive.str.startswith('#include '):
|
||||||
continue
|
continue
|
||||||
for pattern in ['\\', '//', '/*', '\'']:
|
for pattern in {'\\', '//', '/*', "'"}:
|
||||||
if directive.str.find(pattern)>0:
|
if directive.str.find(pattern) > 0:
|
||||||
reportError(directive, 20, 2)
|
reportError(directive, 20, 2)
|
||||||
break
|
break
|
||||||
|
|
||||||
|
|
||||||
def misra_20_3(rawTokens):
|
def misra_20_3(rawTokens):
|
||||||
linenr = -1
|
linenr = -1
|
||||||
for token in rawTokens:
|
for token in rawTokens:
|
||||||
|
@ -896,57 +973,67 @@ def misra_20_3(rawTokens):
|
||||||
if not headerToken or not (headerToken.str.startswith('<') or headerToken.str.startswith('"')):
|
if not headerToken or not (headerToken.str.startswith('<') or headerToken.str.startswith('"')):
|
||||||
reportError(token, 20, 3)
|
reportError(token, 20, 3)
|
||||||
|
|
||||||
|
|
||||||
def misra_20_4(data):
|
def misra_20_4(data):
|
||||||
for directive in data.directives:
|
for directive in data.directives:
|
||||||
res = re.search(r'#define ([a-z][a-z0-9_]+)', directive.str)
|
res = re.search(r'#define ([a-z][a-z0-9_]+)', directive.str)
|
||||||
if res and (res.group(1) in KEYWORDS):
|
if res and (res.group(1) in KEYWORDS):
|
||||||
reportError(directive, 20, 4)
|
reportError(directive, 20, 4)
|
||||||
|
|
||||||
|
|
||||||
def misra_20_5(data):
|
def misra_20_5(data):
|
||||||
for directive in data.directives:
|
for directive in data.directives:
|
||||||
if directive.str.startswith('#undef '):
|
if directive.str.startswith('#undef '):
|
||||||
reportError(directive, 20, 5)
|
reportError(directive, 20, 5)
|
||||||
|
|
||||||
|
|
||||||
def misra_21_3(data):
|
def misra_21_3(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if isFunctionCall(token) and (token.astOperand1.str in ['malloc', 'calloc', 'realloc', 'free']):
|
if isFunctionCall(token) and (token.astOperand1.str in {'malloc', 'calloc', 'realloc', 'free'}):
|
||||||
reportError(token, 21, 3)
|
reportError(token, 21, 3)
|
||||||
|
|
||||||
|
|
||||||
def misra_21_4(data):
|
def misra_21_4(data):
|
||||||
directive = findInclude(data.directives, '<setjmp.h>')
|
directive = findInclude(data.directives, '<setjmp.h>')
|
||||||
if directive:
|
if directive:
|
||||||
reportError(directive, 21, 4)
|
reportError(directive, 21, 4)
|
||||||
|
|
||||||
|
|
||||||
def misra_21_5(data):
|
def misra_21_5(data):
|
||||||
directive = findInclude(data.directives, '<signal.h>')
|
directive = findInclude(data.directives, '<signal.h>')
|
||||||
if directive:
|
if directive:
|
||||||
reportError(directive, 21, 5)
|
reportError(directive, 21, 5)
|
||||||
|
|
||||||
|
|
||||||
def misra_21_7(data):
|
def misra_21_7(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if isFunctionCall(token) and (token.astOperand1.str in ['atof', 'atoi', 'atol', 'atoll']):
|
if isFunctionCall(token) and (token.astOperand1.str in {'atof', 'atoi', 'atol', 'atoll'}):
|
||||||
reportError(token, 21, 7)
|
reportError(token, 21, 7)
|
||||||
|
|
||||||
|
|
||||||
def misra_21_8(data):
|
def misra_21_8(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if isFunctionCall(token) and (token.astOperand1.str in ['abort', 'getenv', 'system']):
|
if isFunctionCall(token) and (token.astOperand1.str in {'abort', 'getenv', 'system'}):
|
||||||
reportError(token, 21, 8)
|
reportError(token, 21, 8)
|
||||||
|
|
||||||
|
|
||||||
def misra_21_9(data):
|
def misra_21_9(data):
|
||||||
for token in data.tokenlist:
|
for token in data.tokenlist:
|
||||||
if (token.str in ['bsearch', 'qsort']) and token.next and token.next.str == '(':
|
if (token.str in {'bsearch', 'qsort'}) and token.next and token.next.str == '(':
|
||||||
reportError(token, 21, 9)
|
reportError(token, 21, 9)
|
||||||
|
|
||||||
|
|
||||||
def misra_21_11(data):
|
def misra_21_11(data):
|
||||||
directive = findInclude(data.directives, '<tgmath.h>')
|
directive = findInclude(data.directives, '<tgmath.h>')
|
||||||
if directive:
|
if directive:
|
||||||
reportError(directive, 21, 11)
|
reportError(directive, 21, 11)
|
||||||
|
|
||||||
|
|
||||||
def loadRuleTexts(filename):
|
def loadRuleTexts(filename):
|
||||||
num1 = 0
|
num1 = 0
|
||||||
num2 = 0
|
num2 = 0
|
||||||
for line in open(filename,'rt'):
|
for line in open(filename, 'rt'):
|
||||||
line = line.replace('\r','').replace('\n','')
|
line = line.replace('\r', '').replace('\n', '')
|
||||||
res = re.match(r'^Rule ([0-9]+).([0-9]+)', line)
|
res = re.match(r'^Rule ([0-9]+).([0-9]+)', line)
|
||||||
if res:
|
if res:
|
||||||
num1 = int(res.group(1))
|
num1 = int(res.group(1))
|
||||||
|
@ -986,7 +1073,7 @@ for arg in sys.argv[1:]:
|
||||||
VERIFY_ACTUAL = []
|
VERIFY_ACTUAL = []
|
||||||
VERIFY_EXPECTED = []
|
VERIFY_EXPECTED = []
|
||||||
for tok in data.rawTokens:
|
for tok in data.rawTokens:
|
||||||
if tok.str.startswith('//') and tok.str.find('TODO')<0:
|
if tok.str.startswith('//') and 'TODO' not in tok.str:
|
||||||
for word in tok.str[2:].split(' '):
|
for word in tok.str[2:].split(' '):
|
||||||
if re.match(r'[0-9]+\.[0-9]+', word):
|
if re.match(r'[0-9]+\.[0-9]+', word):
|
||||||
VERIFY_EXPECTED.append(str(tok.linenr) + ':' + word)
|
VERIFY_EXPECTED.append(str(tok.linenr) + ':' + word)
|
||||||
|
@ -1075,10 +1162,10 @@ for arg in sys.argv[1:]:
|
||||||
|
|
||||||
if VERIFY:
|
if VERIFY:
|
||||||
for expected in VERIFY_EXPECTED:
|
for expected in VERIFY_EXPECTED:
|
||||||
if not expected in VERIFY_ACTUAL:
|
if expected not in VERIFY_ACTUAL:
|
||||||
print('Expected but not seen: ' + expected)
|
print('Expected but not seen: ' + expected)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
for actual in VERIFY_ACTUAL:
|
for actual in VERIFY_ACTUAL:
|
||||||
if not actual in VERIFY_EXPECTED:
|
if actual not in VERIFY_EXPECTED:
|
||||||
print('Not expected: ' + actual)
|
print('Not expected: ' + actual)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#/usr/bin/python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# cppcheck addon for naming conventions
|
# cppcheck addon for naming conventions
|
||||||
#
|
#
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#/usr/bin/python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# This script analyses Cppcheck dump files to locate threadsafety issues
|
# This script analyses Cppcheck dump files to locate threadsafety issues
|
||||||
# - warn about static local objects
|
# - warn about static local objects
|
||||||
|
@ -15,8 +15,8 @@ def reportError(token, severity, msg):
|
||||||
|
|
||||||
def checkstatic(data):
|
def checkstatic(data):
|
||||||
for var in data.variables:
|
for var in data.variables:
|
||||||
if var.isStatic == True and var.isLocal == True and var.isClass == True:
|
if var.isStatic and var.isLocal and var.isClass:
|
||||||
reportError(var.typeStartToken, 'warning', ('Local static object: ' + var.nameToken.str) )
|
reportError(var.typeStartToken, 'warning', ('Local static object: ' + var.nameToken.str))
|
||||||
|
|
||||||
for arg in sys.argv[1:]:
|
for arg in sys.argv[1:]:
|
||||||
print('Checking ' + arg + '...')
|
print('Checking ' + arg + '...')
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#! /usr/bin/python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# cppcheck addon for Y2038 safeness detection
|
# cppcheck addon for Y2038 safeness detection
|
||||||
#
|
#
|
||||||
|
@ -18,18 +18,17 @@ import cppcheckdata
|
||||||
import sys
|
import sys
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import argparse
|
|
||||||
|
|
||||||
#---------------
|
# --------------
|
||||||
# Error handling
|
# Error handling
|
||||||
#---------------
|
# --------------
|
||||||
|
|
||||||
diagnostics = {}
|
diagnostics = {}
|
||||||
|
|
||||||
|
|
||||||
def reportDiagnostic(template, configuration, file, line, severity, message):
|
def reportDiagnostic(template, configuration, file, line, severity, message):
|
||||||
# collect diagnostics by configuration
|
# collect diagnostics by configuration
|
||||||
if not configuration in diagnostics:
|
if configuration not in diagnostics:
|
||||||
diagnostics[configuration] = []
|
diagnostics[configuration] = []
|
||||||
# add error to this configuration
|
# add error to this configuration
|
||||||
diagnostics[configuration].append(
|
diagnostics[configuration].append(
|
||||||
|
@ -58,9 +57,9 @@ def reportTokDiag(template, cfg, token, severity, msg):
|
||||||
token.file, token.linenr,
|
token.file, token.linenr,
|
||||||
severity, msg)
|
severity, msg)
|
||||||
|
|
||||||
#---------------------------------------------
|
# --------------------------------------------
|
||||||
# #define/#undef detection regular expressions
|
# #define/#undef detection regular expressions
|
||||||
#---------------------------------------------
|
# --------------------------------------------
|
||||||
|
|
||||||
# test for '#define _TIME_BITS 64'
|
# test for '#define _TIME_BITS 64'
|
||||||
re_define_time_bits_64 = re.compile(r'^\s*#\s*define\s+_TIME_BITS\s+64\s*$')
|
re_define_time_bits_64 = re.compile(r'^\s*#\s*define\s+_TIME_BITS\s+64\s*$')
|
||||||
|
@ -77,14 +76,14 @@ re_define_use_time_bits64 = re.compile(r'^\s*#\s*define\s+_USE_TIME_BITS64\s*$')
|
||||||
# test for '#undef _USE_TIME_BITS64' (if it ever happens)
|
# test for '#undef _USE_TIME_BITS64' (if it ever happens)
|
||||||
re_undef_use_time_bits64 = re.compile(r'^\s*#\s*undef\s+_USE_TIME_BITS64\s*$')
|
re_undef_use_time_bits64 = re.compile(r'^\s*#\s*undef\s+_USE_TIME_BITS64\s*$')
|
||||||
|
|
||||||
#---------------------------------
|
# --------------------------------
|
||||||
# List of Y2038-unsafe identifiers
|
# List of Y2038-unsafe identifiers
|
||||||
#---------------------------------
|
# --------------------------------
|
||||||
|
|
||||||
# This is WIP. Eventually it should contain all identifiers (types
|
# This is WIP. Eventually it should contain all identifiers (types
|
||||||
# and functions) which would be affected by the Y2038 bug.
|
# and functions) which would be affected by the Y2038 bug.
|
||||||
|
|
||||||
id_Y2038 = [
|
id_Y2038 = {
|
||||||
# Y2038-unsafe types by definition
|
# Y2038-unsafe types by definition
|
||||||
'time_t'
|
'time_t'
|
||||||
# Types using Y2038-unsafe types
|
# Types using Y2038-unsafe types
|
||||||
|
@ -184,44 +183,42 @@ id_Y2038 = [
|
||||||
'getutxid',
|
'getutxid',
|
||||||
'getutxline',
|
'getutxline',
|
||||||
'pututxline'
|
'pututxline'
|
||||||
]
|
}
|
||||||
|
|
||||||
|
|
||||||
# return all files ending in .dump among or under the given paths
|
# return all files ending in .dump among or under the given paths
|
||||||
|
|
||||||
def find_dump_files(paths):
|
def find_dump_files(paths):
|
||||||
dumpfiles = []
|
dumpfiles = []
|
||||||
for path in paths:
|
for path in paths:
|
||||||
if path.endswith('.dump'):
|
if path.endswith('.dump'):
|
||||||
if not path in dumpfiles:
|
if path not in dumpfiles:
|
||||||
dumpfiles.append(path)
|
dumpfiles.append(path)
|
||||||
else:
|
else:
|
||||||
for (top, subdirs, files) in os.walk(path):
|
for (top, subdirs, files) in os.walk(path):
|
||||||
for file in files:
|
for file in files:
|
||||||
if file.endswith('.dump'):
|
if file.endswith('.dump'):
|
||||||
f = top + '/' + file
|
f = top + '/' + file
|
||||||
if not f in dumpfiles:
|
if f not in dumpfiles:
|
||||||
dumpfiles.append(f)
|
dumpfiles.append(f)
|
||||||
dumpfiles.sort()
|
dumpfiles.sort()
|
||||||
return dumpfiles
|
return dumpfiles
|
||||||
|
|
||||||
#------------------
|
# -----------------
|
||||||
# Let's get to work
|
# Let's get to work
|
||||||
#------------------
|
# -----------------
|
||||||
|
|
||||||
# extend cppcheck parser with our own options
|
# extend cppcheck parser with our own options
|
||||||
|
|
||||||
parser = cppcheckdata.ArgumentParser()
|
parser = cppcheckdata.ArgumentParser()
|
||||||
parser.add_argument('-q', '--quiet', action='store_true',
|
parser.add_argument('-q', '--quiet', action='store_true',
|
||||||
help='do not print "Checking ..." lines')
|
help='do not print "Checking ..." lines')
|
||||||
parser.add_argument('paths', nargs='+', metavar='path',
|
parser.add_argument('paths', nargs='+', metavar='path',
|
||||||
help='path to dump file or directory')
|
help='path to dump file or directory')
|
||||||
|
|
||||||
# parse command line
|
|
||||||
|
|
||||||
|
# parse command line
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
|
|
||||||
# now operate on each file in turn
|
# now operate on each file in turn
|
||||||
|
|
||||||
dumpfiles = find_dump_files(args.paths)
|
dumpfiles = find_dump_files(args.paths)
|
||||||
|
|
||||||
for dumpfile in dumpfiles:
|
for dumpfile in dumpfiles:
|
||||||
|
@ -258,7 +255,7 @@ for dumpfile in dumpfiles:
|
||||||
if re_define_use_time_bits64.match(directive.str):
|
if re_define_use_time_bits64.match(directive.str):
|
||||||
safe = int(srclinenr)
|
safe = int(srclinenr)
|
||||||
# warn about _TIME_BITS not being defined
|
# warn about _TIME_BITS not being defined
|
||||||
if time_bits_defined == False:
|
if not time_bits_defined:
|
||||||
reportDirDiag(args.template,
|
reportDirDiag(args.template,
|
||||||
cfg, srcfile, srclinenr, directive, 'warning',
|
cfg, srcfile, srclinenr, directive, 'warning',
|
||||||
'_USE_TIME_BITS64 is defined but _TIME_BITS was not')
|
'_USE_TIME_BITS64 is defined but _TIME_BITS was not')
|
||||||
|
|
|
@ -7,9 +7,9 @@ with open('README.txt') as f:
|
||||||
|
|
||||||
setup(
|
setup(
|
||||||
name="cppcheck",
|
name="cppcheck",
|
||||||
description='Python script to parse the XML (version 2) output of '
|
description='Python script to parse the XML (version 2) output of ' +
|
||||||
+ 'cppcheck and generate an HTML report using Pygments for syntax '
|
'cppcheck and generate an HTML report using Pygments for syntax ' +
|
||||||
+ 'highlighting.',
|
'highlighting.',
|
||||||
long_description=readme,
|
long_description=readme,
|
||||||
author='Henrik Nilsson',
|
author='Henrik Nilsson',
|
||||||
url='https://github.com/danmar/cppcheck',
|
url='https://github.com/danmar/cppcheck',
|
||||||
|
|
|
@ -1,52 +1,54 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
|
||||||
def hasresult(filename, result):
|
def hasresult(filename, result):
|
||||||
if not os.path.isfile(filename):
|
if not os.path.isfile(filename):
|
||||||
|
return False
|
||||||
|
for line in open(filename, 'rt'):
|
||||||
|
if result in line:
|
||||||
|
return True
|
||||||
return False
|
return False
|
||||||
for line in open(filename, 'rt'):
|
|
||||||
if line.find(result) >= 0:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def parsefile(filename):
|
def parsefile(filename):
|
||||||
ret = []
|
ret = []
|
||||||
linenr = 0
|
linenr = 0
|
||||||
functionName = None
|
functionName = None
|
||||||
for line in open(filename,'rt'):
|
for line in open(filename, 'rt'):
|
||||||
linenr = linenr + 1
|
linenr = linenr + 1
|
||||||
res = re.match('^[a-z]+[ *]+([a-z0-9_]+)[(]', line)
|
res = re.match('^[a-z]+[ *]+([a-z0-9_]+)[(]', line)
|
||||||
if res:
|
if res:
|
||||||
functionName = res.group(1)
|
functionName = res.group(1)
|
||||||
if line.startswith('}'):
|
if line.startswith('}'):
|
||||||
functionName = ''
|
functionName = ''
|
||||||
elif line.find('BUG')>0 or line.find('WARN')>0 or filename=='ub.c':
|
elif line.find('BUG') > 0 or line.find('WARN') > 0 or filename == 'ub.c':
|
||||||
spaces = ''
|
spaces = ''
|
||||||
for i in range(100):
|
for i in range(100):
|
||||||
spaces = spaces + ' '
|
spaces = spaces + ' '
|
||||||
s = filename + spaces
|
s = filename + spaces
|
||||||
s = s[:15] + str(linenr) + spaces
|
s = s[:15] + str(linenr) + spaces
|
||||||
s = s[:20] + functionName + spaces
|
s = s[:20] + functionName + spaces
|
||||||
s = s[:50]
|
s = s[:50]
|
||||||
if hasresult('cppcheck.txt', '[' + filename + ':' + str(linenr) + ']'):
|
if hasresult('cppcheck.txt', '[' + filename + ':' + str(linenr) + ']'):
|
||||||
s = s + ' X'
|
s = s + ' X'
|
||||||
else:
|
else:
|
||||||
s = s + ' '
|
s = s + ' '
|
||||||
if hasresult('clang.txt', filename + ':' + str(linenr)):
|
if hasresult('clang.txt', filename + ':' + str(linenr)):
|
||||||
s = s + ' X'
|
s = s + ' X'
|
||||||
else:
|
else:
|
||||||
s = s + ' '
|
s = s + ' '
|
||||||
if hasresult('lint.txt', filename + ' ' + str(linenr)):
|
if hasresult('lint.txt', filename + ' ' + str(linenr)):
|
||||||
s = s + ' X'
|
s = s + ' X'
|
||||||
else:
|
else:
|
||||||
s = s + ' '
|
s = s + ' '
|
||||||
if hasresult('cov.txt', filename + ':' + str(linenr)):
|
if hasresult('cov.txt', filename + ':' + str(linenr)):
|
||||||
s = s + ' X'
|
s = s + ' X'
|
||||||
else:
|
else:
|
||||||
s = s + ' '
|
s = s + ' '
|
||||||
ret.append(s)
|
ret.append(s)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
bugs = []
|
bugs = []
|
||||||
bugs.extend(parsefile('controlflow.c'))
|
bugs.extend(parsefile('controlflow.c'))
|
||||||
|
@ -54,4 +56,4 @@ bugs.extend(parsefile('data.c'))
|
||||||
bugs.extend(parsefile('functions.c'))
|
bugs.extend(parsefile('functions.c'))
|
||||||
bugs.extend(parsefile('ub.c'))
|
bugs.extend(parsefile('ub.c'))
|
||||||
for bug in bugs:
|
for bug in bugs:
|
||||||
print(bug)
|
print(bug)
|
||||||
|
|
27
tools/ci.py
27
tools/ci.py
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
# continuous integration
|
# continuous integration
|
||||||
# build daily reports (doxygen,coverage,etc)
|
# build daily reports (doxygen,coverage,etc)
|
||||||
|
@ -8,9 +8,7 @@ import time
|
||||||
import subprocess
|
import subprocess
|
||||||
import pexpect
|
import pexpect
|
||||||
import glob
|
import glob
|
||||||
import os
|
|
||||||
import sys
|
import sys
|
||||||
import urllib
|
|
||||||
|
|
||||||
|
|
||||||
# Upload file to sourceforge web server using scp
|
# Upload file to sourceforge web server using scp
|
||||||
|
@ -24,16 +22,11 @@ def upload(file_to_upload, destination):
|
||||||
child.expect('Password:')
|
child.expect('Password:')
|
||||||
child.sendline(password)
|
child.sendline(password)
|
||||||
child.interact()
|
child.interact()
|
||||||
except IOError:
|
except (IOError, OSError, pexpect.TIMEOUT):
|
||||||
pass
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
except pexpect.TIMEOUT:
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
# git push
|
# git push
|
||||||
|
|
||||||
|
|
||||||
def gitpush():
|
def gitpush():
|
||||||
try:
|
try:
|
||||||
password = sys.argv[1]
|
password = sys.argv[1]
|
||||||
|
@ -41,11 +34,7 @@ def gitpush():
|
||||||
child.expect("Enter passphrase for key '/home/daniel/.ssh/id_rsa':")
|
child.expect("Enter passphrase for key '/home/daniel/.ssh/id_rsa':")
|
||||||
child.sendline(password)
|
child.sendline(password)
|
||||||
child.interact()
|
child.interact()
|
||||||
except IOError:
|
except (IOError, OSError, pexpect.TIMEOUT):
|
||||||
pass
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
except pexpect.TIMEOUT:
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@ -53,7 +42,7 @@ def iconv(filename):
|
||||||
p = subprocess.Popen(['file', '-i', filename],
|
p = subprocess.Popen(['file', '-i', filename],
|
||||||
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||||
comm = p.communicate()
|
comm = p.communicate()
|
||||||
if comm[0].find('charset=iso-8859-1') >= 0:
|
if 'charset=iso-8859-1' in comm[0]:
|
||||||
subprocess.call(
|
subprocess.call(
|
||||||
["iconv", filename, "--from=ISO-8859-1", "--to=UTF-8", "-o", filename])
|
["iconv", filename, "--from=ISO-8859-1", "--to=UTF-8", "-o", filename])
|
||||||
|
|
||||||
|
@ -83,11 +72,7 @@ def gitpull():
|
||||||
child.expect('Already up-to-date.')
|
child.expect('Already up-to-date.')
|
||||||
child.interact()
|
child.interact()
|
||||||
|
|
||||||
except IOError:
|
except (IOError, OSError, pexpect.TIMEOUT):
|
||||||
pass
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
except pexpect.TIMEOUT:
|
|
||||||
pass
|
pass
|
||||||
except pexpect.EOF:
|
except pexpect.EOF:
|
||||||
return True
|
return True
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# 1. Create a folder daca2-addons in your HOME folder
|
# 1. Create a folder daca2-addons in your HOME folder
|
||||||
# 2. Put cppcheck-O2 in daca2-addons. It should be built with all optimisations.
|
# 2. Put cppcheck-O2 in daca2-addons. It should be built with all optimisations.
|
||||||
|
@ -14,13 +14,13 @@ import os
|
||||||
import datetime
|
import datetime
|
||||||
import time
|
import time
|
||||||
|
|
||||||
DEBIAN = ['ftp://ftp.se.debian.org/debian/',
|
DEBIAN = ('ftp://ftp.se.debian.org/debian/',
|
||||||
'ftp://ftp.debian.org/debian/']
|
'ftp://ftp.debian.org/debian/')
|
||||||
|
|
||||||
|
|
||||||
def wget(filepath):
|
def wget(filepath):
|
||||||
filename = filepath
|
filename = filepath
|
||||||
if filepath.find('/') >= 0:
|
if '/' in filepath:
|
||||||
filename = filename[filename.rfind('/') + 1:]
|
filename = filename[filename.rfind('/') + 1:]
|
||||||
for d in DEBIAN:
|
for d in DEBIAN:
|
||||||
subprocess.call(
|
subprocess.call(
|
||||||
|
@ -126,8 +126,7 @@ def dumpfiles(path):
|
||||||
if os.path.islink(g):
|
if os.path.islink(g):
|
||||||
continue
|
continue
|
||||||
if os.path.isdir(g):
|
if os.path.isdir(g):
|
||||||
for df in dumpfiles(path + g + '/'):
|
ret.extend(dumpfiles(path + g + '/'))
|
||||||
ret.append(df)
|
|
||||||
elif os.path.isfile(g) and g[-5:] == '.dump':
|
elif os.path.isfile(g) and g[-5:] == '.dump':
|
||||||
ret.append(g)
|
ret.append(g)
|
||||||
return ret
|
return ret
|
||||||
|
@ -166,7 +165,8 @@ def scanarchive(filepath, jobs):
|
||||||
# gcc-arm - no ticket. Reproducible timeout in daca2 though as of 1.73/early 2016.
|
# gcc-arm - no ticket. Reproducible timeout in daca2 though as of 1.73/early 2016.
|
||||||
#
|
#
|
||||||
|
|
||||||
if filename[:5] == 'flite' or filename[:5] == 'boost' or filename[:7] == 'insight' or filename[:8] == 'valgrind' or filename[:7] == 'gcc-arm':
|
if filename[:5] == 'flite' or filename[:5] == 'boost' or filename[:7] == 'insight' or\
|
||||||
|
filename[:8] == 'valgrind' or filename[:7] == 'gcc-arm':
|
||||||
results = open('results.txt', 'at')
|
results = open('results.txt', 'at')
|
||||||
results.write('fixme: skipped package to avoid hang\n')
|
results.write('fixme: skipped package to avoid hang\n')
|
||||||
results.close()
|
results.close()
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# Downloads all daca2 source code packages.
|
# Downloads all daca2 source code packages.
|
||||||
#
|
#
|
||||||
|
@ -11,16 +11,15 @@ import sys
|
||||||
import shutil
|
import shutil
|
||||||
import glob
|
import glob
|
||||||
import os
|
import os
|
||||||
import datetime
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
DEBIAN = ['ftp://ftp.se.debian.org/debian/',
|
DEBIAN = ('ftp://ftp.se.debian.org/debian/',
|
||||||
'ftp://ftp.debian.org/debian/']
|
'ftp://ftp.debian.org/debian/')
|
||||||
|
|
||||||
|
|
||||||
def wget(filepath):
|
def wget(filepath):
|
||||||
filename = filepath
|
filename = filepath
|
||||||
if filepath.find('/') >= 0:
|
if '/' in filepath:
|
||||||
filename = filename[filename.rfind('/') + 1:]
|
filename = filename[filename.rfind('/') + 1:]
|
||||||
for d in DEBIAN:
|
for d in DEBIAN:
|
||||||
subprocess.call(
|
subprocess.call(
|
||||||
|
@ -121,7 +120,8 @@ def removeLargeFiles(path):
|
||||||
os.remove(g)
|
os.remove(g)
|
||||||
|
|
||||||
# remove non-source files
|
# remove non-source files
|
||||||
elif g[-2:] != '.C' and g[-2:] != '.c' and g[-4:] != '.cc' and g[-4:] != '.cpp' and g[-4:] != '.cxx' and g[-2:] != '.h' and g[-2:] != '.H' and g[-4:] != '.c++' and g[-4:] != '.hpp' and g[-4:] != '.tpp' and g[-4:] != '.t++':
|
elif g[-2:] not in {'.C', '.c', '.H', '.h'} and g[-3:] != '.cc' and\
|
||||||
|
g[-4:] not in {'.cpp', '.cxx', '.c++', '.hpp', '.tpp', '.t++'}:
|
||||||
os.remove(g)
|
os.remove(g)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import subprocess
|
|
||||||
|
|
||||||
def readdate(data):
|
def readdate(data):
|
||||||
datepos = -1
|
datepos = -1
|
||||||
|
@ -54,7 +55,8 @@ mainpage.write('</head>\n')
|
||||||
mainpage.write('<body>\n')
|
mainpage.write('<body>\n')
|
||||||
mainpage.write('<h1>DACA2</h1>\n')
|
mainpage.write('<h1>DACA2</h1>\n')
|
||||||
mainpage.write('<p>Results when running latest (git head) Cppcheck on Debian.</p>\n')
|
mainpage.write('<p>Results when running latest (git head) Cppcheck on Debian.</p>\n')
|
||||||
mainpage.write('<p>For performance reasons the analysis is limited. Files larger than 1mb are skipped. If analysis of a file takes more than 10 minutes it may be stopped.</p>\n')
|
mainpage.write('<p>For performance reasons the analysis is limited. Files larger than 1mb are skipped. ' +
|
||||||
|
'If analysis of a file takes more than 10 minutes it may be stopped.</p>\n')
|
||||||
mainpage.write('<table class="sortable">\n')
|
mainpage.write('<table class="sortable">\n')
|
||||||
mainpage.write(
|
mainpage.write(
|
||||||
'<tr>' +
|
'<tr>' +
|
||||||
|
@ -72,9 +74,9 @@ lastupdate = None
|
||||||
recent = []
|
recent = []
|
||||||
|
|
||||||
daca2 = daca2folder
|
daca2 = daca2folder
|
||||||
for lib in [False, True]:
|
for lib in (False, True):
|
||||||
for a in "0123456789abcdefghijklmnopqrstuvwxyz":
|
for a in "0123456789abcdefghijklmnopqrstuvwxyz":
|
||||||
if lib == True:
|
if lib:
|
||||||
a = "lib" + a
|
a = "lib" + a
|
||||||
if not os.path.isfile(daca2 + a + '/results.txt'):
|
if not os.path.isfile(daca2 + a + '/results.txt'):
|
||||||
continue
|
continue
|
||||||
|
@ -83,7 +85,7 @@ for lib in [False, True]:
|
||||||
data = f.read()
|
data = f.read()
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
if data.find('ftp://')<0:
|
if 'ftp://' not in data:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
datestr = readdate(data)
|
datestr = readdate(data)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# 1. Create a folder daca2 in your HOME folder
|
# 1. Create a folder daca2 in your HOME folder
|
||||||
# 2. Put cppcheck-O2 in daca2. It should be built with all optimisations.
|
# 2. Put cppcheck-O2 in daca2. It should be built with all optimisations.
|
||||||
|
@ -16,13 +16,13 @@ import datetime
|
||||||
import time
|
import time
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
DEBIAN = ['ftp://ftp.se.debian.org/debian/',
|
DEBIAN = ('ftp://ftp.se.debian.org/debian/',
|
||||||
'ftp://ftp.debian.org/debian/']
|
'ftp://ftp.debian.org/debian/')
|
||||||
|
|
||||||
|
|
||||||
def wget(filepath):
|
def wget(filepath):
|
||||||
filename = filepath
|
filename = filepath
|
||||||
if filepath.find('/') >= 0:
|
if '/' in filepath:
|
||||||
filename = filename[filename.rfind('/') + 1:]
|
filename = filename[filename.rfind('/') + 1:]
|
||||||
for d in DEBIAN:
|
for d in DEBIAN:
|
||||||
subprocess.call(
|
subprocess.call(
|
||||||
|
@ -76,10 +76,8 @@ def handleRemoveReadonly(func, path, exc):
|
||||||
|
|
||||||
def removeAllExceptResults():
|
def removeAllExceptResults():
|
||||||
filenames = []
|
filenames = []
|
||||||
for g in glob.glob('[A-Za-z0-9]*'):
|
filenames.extend(glob.glob('[A-Za-z0-9]*'))
|
||||||
filenames.append(g)
|
filenames.extend(glob.glob('.[a-z]*'))
|
||||||
for g in glob.glob('.[a-z]*'):
|
|
||||||
filenames.append(g)
|
|
||||||
|
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
count = 5
|
count = 5
|
||||||
|
@ -104,7 +102,7 @@ def removeAllExceptResults():
|
||||||
|
|
||||||
def removeLargeFiles(path):
|
def removeLargeFiles(path):
|
||||||
for g in glob.glob(path + '*'):
|
for g in glob.glob(path + '*'):
|
||||||
if g == '.' or g == '..':
|
if g in {'.', '..'}:
|
||||||
continue
|
continue
|
||||||
if os.path.islink(g):
|
if os.path.islink(g):
|
||||||
continue
|
continue
|
||||||
|
@ -122,9 +120,11 @@ def removeLargeFiles(path):
|
||||||
except OSError as err:
|
except OSError as err:
|
||||||
logging.error('Failed to remove {}: {}'.format(g, err))
|
logging.error('Failed to remove {}: {}'.format(g, err))
|
||||||
|
|
||||||
|
|
||||||
def strfCurrTime(fmt):
|
def strfCurrTime(fmt):
|
||||||
return datetime.time.strftime(datetime.datetime.now().time(), fmt)
|
return datetime.time.strftime(datetime.datetime.now().time(), fmt)
|
||||||
|
|
||||||
|
|
||||||
def scanarchive(filepath, jobs, cpulimit):
|
def scanarchive(filepath, jobs, cpulimit):
|
||||||
# remove all files/folders except RESULTS_FILENAME
|
# remove all files/folders except RESULTS_FILENAME
|
||||||
removeAllExceptResults()
|
removeAllExceptResults()
|
||||||
|
@ -152,7 +152,8 @@ def scanarchive(filepath, jobs, cpulimit):
|
||||||
cmd = 'cpulimit --limit=' + cpulimit
|
cmd = 'cpulimit --limit=' + cpulimit
|
||||||
else:
|
else:
|
||||||
cmd = 'nice --adjustment=1000'
|
cmd = 'nice --adjustment=1000'
|
||||||
cmd = cmd + ' ../cppcheck-O2 -D__GCC__ --enable=style --inconclusive --error-exitcode=0 --exception-handling=stderr ' + jobs + ' --template=daca2 .'
|
cmd = cmd + ' ../cppcheck-O2 -D__GCC__ --enable=style --inconclusive --error-exitcode=0 ' +\
|
||||||
|
'--exception-handling=stderr ' + jobs + ' --template=daca2 .'
|
||||||
cmds = cmd.split()
|
cmds = cmd.split()
|
||||||
|
|
||||||
p = subprocess.Popen(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
p = subprocess.Popen(cmds, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
@ -160,7 +161,7 @@ def scanarchive(filepath, jobs, cpulimit):
|
||||||
|
|
||||||
if p.returncode == 0:
|
if p.returncode == 0:
|
||||||
logging.info(comm[1] + strfCurrTime('[%H:%M]'))
|
logging.info(comm[1] + strfCurrTime('[%H:%M]'))
|
||||||
elif comm[0].find('cppcheck: error: could not find or open any of the paths given.') < 0:
|
elif 'cppcheck: error: could not find or open any of the paths given.' not in comm[0]:
|
||||||
logging.error(comm[1] + strfCurrTime('[%H:%M]'))
|
logging.error(comm[1] + strfCurrTime('[%H:%M]'))
|
||||||
logging.error('Exit code is not zero! Crash?\n')
|
logging.error('Exit code is not zero! Crash?\n')
|
||||||
|
|
||||||
|
@ -188,9 +189,9 @@ RESULTS_FILENAME = 'results.txt'
|
||||||
RESULTS_FILE = os.path.join(workdir, RESULTS_FILENAME)
|
RESULTS_FILE = os.path.join(workdir, RESULTS_FILENAME)
|
||||||
|
|
||||||
logging.basicConfig(
|
logging.basicConfig(
|
||||||
filename=RESULTS_FILE,
|
filename=RESULTS_FILE,
|
||||||
level=logging.INFO,
|
level=logging.INFO,
|
||||||
format='%(message)s')
|
format='%(message)s')
|
||||||
|
|
||||||
print(workdir)
|
print(workdir)
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# Cppcheck - A tool for static C/C++ code analysis
|
# Cppcheck - A tool for static C/C++ code analysis
|
||||||
# Copyright (C) 2007-2016 Cppcheck team.
|
# Copyright (C) 2007-2016 Cppcheck team.
|
||||||
|
@ -163,8 +163,8 @@ def writeHtmlFile(nodes, functionName, filename, errorsOnly):
|
||||||
|
|
||||||
|
|
||||||
if len(sys.argv) <= 1 or '--help' in sys.argv:
|
if len(sys.argv) <= 1 or '--help' in sys.argv:
|
||||||
print ('Extract test cases from test file')
|
print('Extract test cases from test file')
|
||||||
print (
|
print(
|
||||||
'Syntax: extracttests.py [--html=folder] [--xml] [--code=folder] path/testfile.cpp')
|
'Syntax: extracttests.py [--html=folder] [--xml] [--code=folder] path/testfile.cpp')
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
@ -183,7 +183,7 @@ for arg in sys.argv[1:]:
|
||||||
elif arg.endswith('.cpp'):
|
elif arg.endswith('.cpp'):
|
||||||
filename = arg
|
filename = arg
|
||||||
else:
|
else:
|
||||||
print ('Invalid option: ' + arg)
|
print('Invalid option: ' + arg)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
@ -195,8 +195,8 @@ if filename is not None:
|
||||||
|
|
||||||
# generate output
|
# generate output
|
||||||
if xml:
|
if xml:
|
||||||
print ('<?xml version="1.0"?>')
|
print('<?xml version="1.0"?>')
|
||||||
print ('<tree>')
|
print('<tree>')
|
||||||
count = 0
|
count = 0
|
||||||
for node in e.nodes:
|
for node in e.nodes:
|
||||||
s = ' <node'
|
s = ' <node'
|
||||||
|
@ -204,8 +204,8 @@ if filename is not None:
|
||||||
s += ' code="' + strtoxml(node['code']) + '"'
|
s += ' code="' + strtoxml(node['code']) + '"'
|
||||||
s += ' expected="' + strtoxml(node['expected']) + '"'
|
s += ' expected="' + strtoxml(node['expected']) + '"'
|
||||||
s += '/>'
|
s += '/>'
|
||||||
print (s)
|
print(s)
|
||||||
print ('</tree>')
|
print('</tree>')
|
||||||
elif htmldir is not None:
|
elif htmldir is not None:
|
||||||
if not htmldir.endswith('/'):
|
if not htmldir.endswith('/'):
|
||||||
htmldir += '/'
|
htmldir += '/'
|
||||||
|
@ -311,4 +311,4 @@ if filename is not None:
|
||||||
errors.close()
|
errors.close()
|
||||||
else:
|
else:
|
||||||
for node in e.nodes:
|
for node in e.nodes:
|
||||||
print (node['functionName'])
|
print(node['functionName'])
|
||||||
|
|
|
@ -1,12 +1,12 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/env python
|
||||||
import argparse
|
import argparse
|
||||||
import xml.etree.ElementTree as ET
|
import xml.etree.ElementTree as ET
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(description="List all error without a CWE assigned in CSV format")
|
parser = argparse.ArgumentParser(description="List all error without a CWE assigned in CSV format")
|
||||||
parser.add_argument("-F", metavar="filename", required=True, help="XML file containing output from: ./cppcheck --errorlist --xml-version=2")
|
parser.add_argument("-F", metavar="filename", required=True,
|
||||||
|
help="XML file containing output from: ./cppcheck --errorlist --xml-version=2")
|
||||||
parsed = parser.parse_args()
|
parsed = parser.parse_args()
|
||||||
|
|
||||||
tree = ET.parse(vars(parsed)["F"])
|
tree = ET.parse(vars(parsed)["F"])
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# Cppcheck - A tool for static C/C++ code analysis
|
# Cppcheck - A tool for static C/C++ code analysis
|
||||||
# Copyright (C) 2007-2016 Cppcheck team.
|
# Copyright (C) 2007-2016 Cppcheck team.
|
||||||
|
@ -150,7 +150,8 @@ class MatchCompiler:
|
||||||
# if varid is provided, check that it's non-zero on first use
|
# if varid is provided, check that it's non-zero on first use
|
||||||
if varid and tok.find('%varid%') != -1 and checked_varid is False:
|
if varid and tok.find('%varid%') != -1 and checked_varid is False:
|
||||||
ret += ' if (varid==0U)\n'
|
ret += ' if (varid==0U)\n'
|
||||||
ret += ' throw InternalError(tok, "Internal error. Token::Match called with varid 0. Please report this to Cppcheck developers");\n'
|
ret += ' throw InternalError(tok, "Internal error. Token::Match called with varid 0. ' +\
|
||||||
|
'Please report this to Cppcheck developers");\n'
|
||||||
checked_varid = True
|
checked_varid = True
|
||||||
|
|
||||||
# [abc]
|
# [abc]
|
||||||
|
@ -189,7 +190,7 @@ class MatchCompiler:
|
||||||
gotoNextToken = ' tok = tok ? tok->next() : NULL;\n'
|
gotoNextToken = ' tok = tok ? tok->next() : NULL;\n'
|
||||||
|
|
||||||
else:
|
else:
|
||||||
negatedTok ="!" + self._compileCmd(tok)
|
negatedTok = "!" + self._compileCmd(tok)
|
||||||
# fold !true => false ; !false => true
|
# fold !true => false ; !false => true
|
||||||
# this avoids cppcheck warnings about condition always being true/false
|
# this avoids cppcheck warnings about condition always being true/false
|
||||||
if (negatedTok == "!false"):
|
if (negatedTok == "!false"):
|
||||||
|
@ -323,12 +324,14 @@ class MatchCompiler:
|
||||||
# Don't use assert() here, it's disabled for optimized builds.
|
# Don't use assert() here, it's disabled for optimized builds.
|
||||||
# We also need to verify builds in 'release' mode
|
# We also need to verify builds in 'release' mode
|
||||||
ret += ' if (res_parsed_match != res_compiled_match) {\n'
|
ret += ' if (res_parsed_match != res_compiled_match) {\n'
|
||||||
# ret += ' std::cout << "res_parsed_match' + str(verifyNumber) + ': " << res_parsed_match << ", res_compiled_match: " << res_compiled_match << "\\n";\n'
|
# ret += ' std::cout << "res_parsed_match' + str(verifyNumber) +\
|
||||||
# ret += ' if (tok)\n'
|
# ': " << res_parsed_match << ", res_compiled_match: " << res_compiled_match << "\\n";\n'
|
||||||
# ret += ' std::cout << "tok: " << tok->str();\n'
|
# ret += ' if (tok)\n'
|
||||||
# ret += ' if (tok->next())\n'
|
# ret += ' std::cout << "tok: " << tok->str();\n'
|
||||||
# ret += ' std::cout << "tok next: " << tok->next()->str();\n'
|
# ret += ' if (tok->next())\n'
|
||||||
ret += ' throw InternalError(tok, "Internal error. compiled match returned different result than parsed match: ' + pattern + '");\n'
|
# ret += ' std::cout << "tok next: " << tok->next()->str();\n'
|
||||||
|
ret += ' throw InternalError(tok, "Internal error.' +\
|
||||||
|
'compiled match returned different result than parsed match: ' + pattern + '");\n'
|
||||||
ret += ' }\n'
|
ret += ' }\n'
|
||||||
ret += ' return res_compiled_match;\n'
|
ret += ' return res_compiled_match;\n'
|
||||||
ret += '}\n'
|
ret += '}\n'
|
||||||
|
@ -403,7 +406,7 @@ class MatchCompiler:
|
||||||
res = re.match(r'\s*"((?:.|\\")*?)"\s*$', raw_pattern)
|
res = re.match(r'\s*"((?:.|\\")*?)"\s*$', raw_pattern)
|
||||||
if res is None:
|
if res is None:
|
||||||
if self._showSkipped:
|
if self._showSkipped:
|
||||||
print(filename +":" + str(linenr) +" skipping match pattern:" + raw_pattern)
|
print(filename + ":" + str(linenr) + " skipping match pattern:" + raw_pattern)
|
||||||
break # Non-const pattern - bailout
|
break # Non-const pattern - bailout
|
||||||
|
|
||||||
pattern = res.group(1)
|
pattern = res.group(1)
|
||||||
|
@ -454,7 +457,8 @@ class MatchCompiler:
|
||||||
# Don't use assert() here, it's disabled for optimized builds.
|
# Don't use assert() here, it's disabled for optimized builds.
|
||||||
# We also need to verify builds in 'release' mode
|
# We also need to verify builds in 'release' mode
|
||||||
ret += ' if (res_parsed_findmatch != res_compiled_findmatch) {\n'
|
ret += ' if (res_parsed_findmatch != res_compiled_findmatch) {\n'
|
||||||
ret += ' throw InternalError(tok, "Internal error. compiled findmatch returned different result than parsed findmatch: ' + pattern + '");\n'
|
ret += ' throw InternalError(tok, "Internal error. ' +\
|
||||||
|
'compiled findmatch returned different result than parsed findmatch: ' + pattern + '");\n'
|
||||||
ret += ' }\n'
|
ret += ' }\n'
|
||||||
ret += ' return res_compiled_findmatch;\n'
|
ret += ' return res_compiled_findmatch;\n'
|
||||||
ret += '}\n'
|
ret += '}\n'
|
||||||
|
@ -524,9 +528,8 @@ class MatchCompiler:
|
||||||
if res is None:
|
if res is None:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
# assert that Token::find(simple)match has either 2, 3 or 4 arguments
|
||||||
assert(len(res) >= 3 or len(res) < 6)
|
assert(len(res) >= 3 or len(res) < 6)
|
||||||
# assert that Token::find(simple)match has either 2, 3 or 4
|
|
||||||
# arguments
|
|
||||||
|
|
||||||
g0 = res[0]
|
g0 = res[0]
|
||||||
tok = res[1]
|
tok = res[1]
|
||||||
|
@ -556,7 +559,7 @@ class MatchCompiler:
|
||||||
res = re.match(r'\s*"((?:.|\\")*?)"\s*$', pattern)
|
res = re.match(r'\s*"((?:.|\\")*?)"\s*$', pattern)
|
||||||
if res is None:
|
if res is None:
|
||||||
if self._showSkipped:
|
if self._showSkipped:
|
||||||
print(filename +":" + str(linenr) +" skipping findmatch pattern:" + pattern)
|
print(filename + ":" + str(linenr) + " skipping findmatch pattern:" + pattern)
|
||||||
break # Non-const pattern - bailout
|
break # Non-const pattern - bailout
|
||||||
|
|
||||||
pattern = res.group(1)
|
pattern = res.group(1)
|
||||||
|
@ -588,7 +591,8 @@ class MatchCompiler:
|
||||||
startPos = res[0]
|
startPos = res[0]
|
||||||
endPos = res[1]
|
endPos = res[1]
|
||||||
text = line[startPos + 1:endPos - 1]
|
text = line[startPos + 1:endPos - 1]
|
||||||
line = line[:startPos] + 'MatchCompiler::makeConstStringBegin' + text + 'MatchCompiler::makeConstStringEnd' + line[endPos:]
|
line = line[:startPos] + 'MatchCompiler::makeConstStringBegin' +\
|
||||||
|
text + 'MatchCompiler::makeConstStringEnd' + line[endPos:]
|
||||||
line = line.replace('MatchCompiler::makeConstStringBegin', 'MatchCompiler::makeConstString("')
|
line = line.replace('MatchCompiler::makeConstStringBegin', 'MatchCompiler::makeConstString("')
|
||||||
line = line.replace('MatchCompiler::makeConstStringEnd', '")')
|
line = line.replace('MatchCompiler::makeConstStringEnd', '")')
|
||||||
return line
|
return line
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
|
|
||||||
import glob
|
import glob
|
||||||
import os
|
import os
|
||||||
|
@ -44,24 +45,24 @@ def parseheader(cppcheckpath, filename):
|
||||||
posixcfg = f.read()
|
posixcfg = f.read()
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
while data.find('/*') >= 0:
|
while '/*' in data:
|
||||||
pos1 = data.find('/*')
|
pos1 = data.find('/*')
|
||||||
pos2 = data.find('*/', pos1 + 2)
|
pos2 = data.find('*/', pos1 + 2)
|
||||||
data = data[:pos1] + data[pos2 + 2:]
|
data = data[:pos1] + data[pos2 + 2:]
|
||||||
|
|
||||||
data = data.replace('\\\n', '')
|
data = data.replace('\\\n', '')
|
||||||
|
|
||||||
while data.find('\n#') >= 0:
|
while '\n#' in data:
|
||||||
pos1 = data.find('\n#')
|
pos1 = data.find('\n#')
|
||||||
pos2 = data.find('\n', pos1 + 1)
|
pos2 = data.find('\n', pos1 + 1)
|
||||||
data = data[:pos1] + data[pos2:]
|
data = data[:pos1] + data[pos2:]
|
||||||
|
|
||||||
while data.find('\n__BEGIN') >= 0:
|
while '\n__BEGIN' in data:
|
||||||
pos1 = data.find('\n__BEGIN')
|
pos1 = data.find('\n__BEGIN')
|
||||||
pos2 = data.find('\n', pos1 + 1)
|
pos2 = data.find('\n', pos1 + 1)
|
||||||
data = data[:pos1] + data[pos2:]
|
data = data[:pos1] + data[pos2:]
|
||||||
|
|
||||||
while data.find('\n__END') >= 0:
|
while '\n__END' in data:
|
||||||
pos1 = data.find('\n__END')
|
pos1 = data.find('\n__END')
|
||||||
pos2 = data.find('\n', pos1 + 1)
|
pos2 = data.find('\n', pos1 + 1)
|
||||||
data = data[:pos1] + data[pos2:]
|
data = data[:pos1] + data[pos2:]
|
||||||
|
@ -75,24 +76,24 @@ def parseheader(cppcheckpath, filename):
|
||||||
output = []
|
output = []
|
||||||
|
|
||||||
for line in data.split('\n'):
|
for line in data.split('\n'):
|
||||||
if (line[:7] != 'extern ' and line.find(' extern ') < 0) or line[-1] != ';':
|
if (line[:7] != 'extern ' and ' extern ' not in line) or line[-1] != ';':
|
||||||
continue
|
continue
|
||||||
|
|
||||||
functionNameEnd = line.find('(') - 1
|
functionNameEnd = line.find('(') - 1
|
||||||
if functionNameEnd < 0:
|
if functionNameEnd < 0:
|
||||||
continue
|
continue
|
||||||
while line[functionNameEnd] == ' ':
|
while line[functionNameEnd] == ' ':
|
||||||
functionNameEnd = functionNameEnd - 1
|
functionNameEnd -= 1
|
||||||
if functionNameEnd < 10:
|
if functionNameEnd < 10:
|
||||||
continue
|
continue
|
||||||
functionNameStart = functionNameEnd
|
functionNameStart = functionNameEnd
|
||||||
while line[functionNameStart] == '_' or line[functionNameStart].isalnum():
|
while line[functionNameStart] == '_' or line[functionNameStart].isalnum():
|
||||||
functionNameStart = functionNameStart - 1
|
functionNameStart -= 1
|
||||||
if functionNameStart < 10:
|
if functionNameStart < 10:
|
||||||
continue
|
continue
|
||||||
if line[functionNameStart] != '*' and line[functionNameStart] != ' ':
|
if line[functionNameStart] != '*' and line[functionNameStart] != ' ':
|
||||||
continue
|
continue
|
||||||
functionNameStart = functionNameStart + 1
|
functionNameStart += 1
|
||||||
if not line[functionNameStart].isalpha():
|
if not line[functionNameStart].isalpha():
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
@ -102,13 +103,13 @@ def parseheader(cppcheckpath, filename):
|
||||||
|
|
||||||
nonnullStart = line.find('__nonnull')
|
nonnullStart = line.find('__nonnull')
|
||||||
if nonnullStart > 0:
|
if nonnullStart > 0:
|
||||||
nonnullStart = nonnullStart + 9
|
nonnullStart += 9
|
||||||
while nonnullStart < len(line) and line[nonnullStart] == ' ':
|
while nonnullStart < len(line) and line[nonnullStart] == ' ':
|
||||||
nonnullStart = nonnullStart + 1
|
nonnullStart += 1
|
||||||
if nonnullStart >= len(line) or line[nonnullStart] != '(':
|
if nonnullStart >= len(line) or line[nonnullStart] != '(':
|
||||||
continue
|
continue
|
||||||
while line[nonnullStart] == '(':
|
while line[nonnullStart] == '(':
|
||||||
nonnullStart = nonnullStart + 1
|
nonnullStart += 1
|
||||||
nonnullEnd = line.find(')', nonnullStart)
|
nonnullEnd = line.find(')', nonnullStart)
|
||||||
nonnull = line[nonnullStart:nonnullEnd]
|
nonnull = line[nonnullStart:nonnullEnd]
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
import subprocess
|
import subprocess
|
||||||
import os
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
CMD = None
|
CMD = None
|
||||||
|
@ -23,7 +22,7 @@ if CMD is None:
|
||||||
print('Abort: No --cmd')
|
print('Abort: No --cmd')
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if SEGFAULT == False and EXPECTED is None:
|
if not SEGFAULT and EXPECTED is None:
|
||||||
print('Abort: No --expected')
|
print('Abort: No --expected')
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
@ -38,6 +37,7 @@ else:
|
||||||
print('EXPECTED=' + EXPECTED)
|
print('EXPECTED=' + EXPECTED)
|
||||||
print('FILE=' + FILE)
|
print('FILE=' + FILE)
|
||||||
|
|
||||||
|
|
||||||
def runtool():
|
def runtool():
|
||||||
p = subprocess.Popen(CMD.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
p = subprocess.Popen(CMD.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
comm = p.communicate()
|
comm = p.communicate()
|
||||||
|
@ -46,22 +46,24 @@ def runtool():
|
||||||
return True
|
return True
|
||||||
elif p.returncode == 0:
|
elif p.returncode == 0:
|
||||||
out = comm[0] + '\n' + comm[1]
|
out = comm[0] + '\n' + comm[1]
|
||||||
if (out.find('error:') < 0) and (out.find(EXPECTED) > 0):
|
if ('error:' not in out) and (out.find(EXPECTED) > 0):
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def writefile(filename, filedata):
|
def writefile(filename, filedata):
|
||||||
f = open(filename, 'wt')
|
f = open(filename, 'wt')
|
||||||
for line in filedata:
|
for line in filedata:
|
||||||
f.write(line)
|
f.write(line)
|
||||||
f.close()
|
f.close()
|
||||||
|
|
||||||
|
|
||||||
def replaceandrun(what, filedata, i, line):
|
def replaceandrun(what, filedata, i, line):
|
||||||
print(what + ' ' + str(i + 1) + '/' + str(len(filedata)) + '..')
|
print(what + ' ' + str(i + 1) + '/' + str(len(filedata)) + '..')
|
||||||
bak = filedata[i]
|
bak = filedata[i]
|
||||||
filedata[i] = line
|
filedata[i] = line
|
||||||
writefile(FILE, filedata)
|
writefile(FILE, filedata)
|
||||||
if runtool() == True:
|
if runtool():
|
||||||
print('pass')
|
print('pass')
|
||||||
writefile(BACKUPFILE, filedata)
|
writefile(BACKUPFILE, filedata)
|
||||||
return True
|
return True
|
||||||
|
@ -69,6 +71,7 @@ def replaceandrun(what, filedata, i, line):
|
||||||
filedata[i] = bak
|
filedata[i] = bak
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
def replaceandrun2(what, filedata, i, line1, line2):
|
def replaceandrun2(what, filedata, i, line1, line2):
|
||||||
print(what + ' ' + str(i + 1) + '/' + str(len(filedata)) + '..')
|
print(what + ' ' + str(i + 1) + '/' + str(len(filedata)) + '..')
|
||||||
bak1 = filedata[i]
|
bak1 = filedata[i]
|
||||||
|
@ -76,7 +79,7 @@ def replaceandrun2(what, filedata, i, line1, line2):
|
||||||
filedata[i] = line1
|
filedata[i] = line1
|
||||||
filedata[i + 1] = line2
|
filedata[i + 1] = line2
|
||||||
writefile(FILE, filedata)
|
writefile(FILE, filedata)
|
||||||
if runtool() == True:
|
if runtool():
|
||||||
print('pass')
|
print('pass')
|
||||||
writefile(BACKUPFILE, filedata)
|
writefile(BACKUPFILE, filedata)
|
||||||
else:
|
else:
|
||||||
|
@ -84,6 +87,7 @@ def replaceandrun2(what, filedata, i, line1, line2):
|
||||||
filedata[i] = bak1
|
filedata[i] = bak1
|
||||||
filedata[i + 1] = bak2
|
filedata[i + 1] = bak2
|
||||||
|
|
||||||
|
|
||||||
def clearandrun(what, filedata, i1, i2):
|
def clearandrun(what, filedata, i1, i2):
|
||||||
print(what + ' ' + str(i1 + 1) + '/' + str(len(filedata)) + '..')
|
print(what + ' ' + str(i1 + 1) + '/' + str(len(filedata)) + '..')
|
||||||
filedata2 = list(filedata)
|
filedata2 = list(filedata)
|
||||||
|
@ -92,19 +96,21 @@ def clearandrun(what, filedata, i1, i2):
|
||||||
filedata2[i] = ''
|
filedata2[i] = ''
|
||||||
i = i + 1
|
i = i + 1
|
||||||
writefile(FILE, filedata2)
|
writefile(FILE, filedata2)
|
||||||
if runtool() == True:
|
if runtool():
|
||||||
print('pass')
|
print('pass')
|
||||||
writefile(BACKUPFILE, filedata2)
|
writefile(BACKUPFILE, filedata2)
|
||||||
return filedata2
|
return filedata2
|
||||||
print('fail')
|
print('fail')
|
||||||
return filedata
|
return filedata
|
||||||
|
|
||||||
|
|
||||||
def removecomments(filedata):
|
def removecomments(filedata):
|
||||||
for i in range(len(filedata)):
|
for i in range(len(filedata)):
|
||||||
line = filedata[i]
|
line = filedata[i]
|
||||||
if line.find('//') >= 0:
|
if '//' in line:
|
||||||
replaceandrun('remove comment', filedata, i, line[:line.find('//')].rstrip())
|
replaceandrun('remove comment', filedata, i, line[:line.find('//')].rstrip())
|
||||||
|
|
||||||
|
|
||||||
def checkpar(line):
|
def checkpar(line):
|
||||||
par = 0
|
par = 0
|
||||||
for c in line:
|
for c in line:
|
||||||
|
@ -116,6 +122,7 @@ def checkpar(line):
|
||||||
return False
|
return False
|
||||||
return par == 0
|
return par == 0
|
||||||
|
|
||||||
|
|
||||||
def combinelines(filedata):
|
def combinelines(filedata):
|
||||||
if len(filedata) < 3:
|
if len(filedata) < 3:
|
||||||
return
|
return
|
||||||
|
@ -156,11 +163,13 @@ def combinelines(filedata):
|
||||||
fd2 = filedata[line + 1].lstrip()
|
fd2 = filedata[line + 1].lstrip()
|
||||||
replaceandrun2('combine lines', filedata, line, fd1 + fd2, '')
|
replaceandrun2('combine lines', filedata, line, fd1 + fd2, '')
|
||||||
|
|
||||||
|
|
||||||
def removedirectives(filedata):
|
def removedirectives(filedata):
|
||||||
for i in range(len(filedata)):
|
for i in range(len(filedata)):
|
||||||
if filedata[i].lstrip().startswith('#'):
|
if filedata[i].lstrip().startswith('#'):
|
||||||
replaceandrun('remove preprocessor directive', filedata, i, '')
|
replaceandrun('remove preprocessor directive', filedata, i, '')
|
||||||
|
|
||||||
|
|
||||||
def removeblocks(filedata):
|
def removeblocks(filedata):
|
||||||
if len(filedata) < 3:
|
if len(filedata) < 3:
|
||||||
return filedata
|
return filedata
|
||||||
|
@ -169,7 +178,7 @@ def removeblocks(filedata):
|
||||||
strippedline = filedata[i].strip()
|
strippedline = filedata[i].strip()
|
||||||
if len(strippedline) == 0:
|
if len(strippedline) == 0:
|
||||||
continue
|
continue
|
||||||
if ';{}'.find(strippedline[-1]) < 0:
|
if strippedline[-1] not in ';{}':
|
||||||
continue
|
continue
|
||||||
|
|
||||||
i1 = i + 1
|
i1 = i + 1
|
||||||
|
@ -201,6 +210,7 @@ def removeblocks(filedata):
|
||||||
|
|
||||||
return filedata
|
return filedata
|
||||||
|
|
||||||
|
|
||||||
def removeline(filedata):
|
def removeline(filedata):
|
||||||
stmt = True
|
stmt = True
|
||||||
for i in range(len(filedata)):
|
for i in range(len(filedata)):
|
||||||
|
@ -210,13 +220,13 @@ def removeline(filedata):
|
||||||
if len(strippedline) == 0:
|
if len(strippedline) == 0:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if stmt and strippedline[-1] == ';' and checkpar(line) and line.find('{') < 0 and line.find('}') < 0:
|
if stmt and strippedline[-1] == ';' and checkpar(line) and '{' not in line and '}' not in line:
|
||||||
replaceandrun('remove line', filedata, i, '')
|
replaceandrun('remove line', filedata, i, '')
|
||||||
|
|
||||||
elif stmt and strippedline.find('{') > 0 and strippedline.find('}') == len(strippedline) - 1:
|
elif stmt and strippedline.find('{') > 0 and strippedline.find('}') == len(strippedline) - 1:
|
||||||
replaceandrun('remove line', filedata, i, '')
|
replaceandrun('remove line', filedata, i, '')
|
||||||
|
|
||||||
if ';{}'.find(strippedline[-1]) >= 0:
|
if strippedline[-1] in ';{}':
|
||||||
stmt = True
|
stmt = True
|
||||||
else:
|
else:
|
||||||
stmt = False
|
stmt = False
|
||||||
|
@ -224,7 +234,7 @@ def removeline(filedata):
|
||||||
|
|
||||||
# reduce..
|
# reduce..
|
||||||
print('Make sure error can be reproduced...')
|
print('Make sure error can be reproduced...')
|
||||||
if runtool() == False:
|
if not runtool():
|
||||||
print("Cannot reproduce")
|
print("Cannot reproduce")
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,8 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/env python
|
||||||
|
|
||||||
import subprocess
|
import subprocess
|
||||||
import pexpect
|
import pexpect
|
||||||
import os
|
import os
|
||||||
import shutil
|
|
||||||
import time
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
START = 0
|
START = 0
|
||||||
|
@ -18,37 +16,37 @@ for arg in sys.argv[1:]:
|
||||||
PASSWORD = arg
|
PASSWORD = arg
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
def compilecppcheck(CPPFLAGS):
|
def compilecppcheck(CPPFLAGS):
|
||||||
subprocess.call(['nice', 'make', 'clean'])
|
subprocess.call(['nice', 'make', 'clean'])
|
||||||
subprocess.call(['nice', 'make', 'SRCDIR=build', 'CFGDIR=' + os.path.expanduser('~/cppcheck/cfg'), 'CXXFLAGS=-g -O2', 'CPPFLAGS=' + CPPFLAGS])
|
subprocess.call(['nice', 'make', 'SRCDIR=build', 'CFGDIR=' +
|
||||||
|
os.path.expanduser('~/cppcheck/cfg'), 'CXXFLAGS=-g -O2', 'CPPFLAGS=' + CPPFLAGS])
|
||||||
subprocess.call(['cp', 'cppcheck', os.path.expanduser('~/daca2/cppcheck-O2')])
|
subprocess.call(['cp', 'cppcheck', os.path.expanduser('~/daca2/cppcheck-O2')])
|
||||||
|
|
||||||
|
|
||||||
def runcppcheck(rev, folder):
|
def runcppcheck(rev, folder):
|
||||||
subprocess.call(['rm', '-rf', os.path.expanduser('~/daca2/' + folder)])
|
subprocess.call(['rm', '-rf', os.path.expanduser('~/daca2/' + folder)])
|
||||||
subprocess.call(['nice', '--adjustment=19', 'python', os.path.expanduser('~/cppcheck/tools/daca2.py'), folder, '--rev=' + rev])
|
subprocess.call(['nice', '--adjustment=19', 'python',
|
||||||
|
os.path.expanduser('~/cppcheck/tools/daca2.py'), folder, '--rev=' + rev])
|
||||||
|
|
||||||
|
|
||||||
def daca2report(reportfolder):
|
def daca2report(reportfolder):
|
||||||
subprocess.call(['rm', '-rf', reportfolder])
|
subprocess.call(['rm', '-rf', reportfolder])
|
||||||
subprocess.call(['mkdir', reportfolder])
|
subprocess.call(['mkdir', reportfolder])
|
||||||
subprocess.call(['python', os.path.expanduser('~/cppcheck/tools/daca2-report.py'), reportfolder])
|
subprocess.call(['python', os.path.expanduser('~/cppcheck/tools/daca2-report.py'), reportfolder])
|
||||||
|
|
||||||
|
|
||||||
# Upload file to sourceforge server using scp
|
# Upload file to sourceforge server using scp
|
||||||
def upload(localfolder, webfolder):
|
def upload(localfolder, webfolder):
|
||||||
if len(PASSWORD)<3:
|
if len(PASSWORD) < 3:
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
child = pexpect.spawn(
|
child = pexpect.spawn(
|
||||||
'scp -r ' + localfolder + ' danielmarjamaki,cppcheck@web.sf.net:htdocs/' + webfolder)
|
'scp -r ' + localfolder + ' danielmarjamaki,cppcheck@web.sf.net:htdocs/' + webfolder)
|
||||||
#child.expect('upload@trac.cppcheck.net\'s password:')
|
# child.expect('upload@trac.cppcheck.net\'s password:')
|
||||||
child.expect('Password:')
|
child.expect('Password:')
|
||||||
child.sendline(PASSWORD)
|
child.sendline(PASSWORD)
|
||||||
child.interact()
|
child.interact()
|
||||||
except IOError:
|
except (IOError, OSError, pexpect.TIMEOUT):
|
||||||
pass
|
|
||||||
except OSError:
|
|
||||||
pass
|
|
||||||
except pexpect.TIMEOUT:
|
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
#!/usr/bin/python
|
#!/usr/bin/env python
|
||||||
#
|
#
|
||||||
# Cppcheck - A tool for static C/C++ code analysis
|
# Cppcheck - A tool for static C/C++ code analysis
|
||||||
# Copyright (C) 2007-2016 Cppcheck team.
|
# Copyright (C) 2007-2016 Cppcheck team.
|
||||||
|
@ -31,7 +31,8 @@ class MatchCompilerTest(unittest.TestCase):
|
||||||
self.assertEqual(self.mc.parseMatch(' Token::Match(tok,', 2), None)
|
self.assertEqual(self.mc.parseMatch(' Token::Match(tok,', 2), None)
|
||||||
# multiline Token::Match is not supported yet
|
# multiline Token::Match is not supported yet
|
||||||
self.assertEqual(self.mc.parseMatch(' Token::Match(Token::findsimplematch(tok,")"), ";")', 2), [
|
self.assertEqual(self.mc.parseMatch(' Token::Match(Token::findsimplematch(tok,")"), ";")', 2), [
|
||||||
'Token::Match(Token::findsimplematch(tok,")"), ";")', 'Token::findsimplematch(tok,")")', ' ";"']) # inner function call
|
'Token::Match(Token::findsimplematch(tok,")"), ";")',
|
||||||
|
'Token::findsimplematch(tok,")")', ' ";"']) # inner function call
|
||||||
|
|
||||||
def test_replaceTokenMatch(self):
|
def test_replaceTokenMatch(self):
|
||||||
input = 'if (Token::Match(tok, "foobar")) {'
|
input = 'if (Token::Match(tok, "foobar")) {'
|
||||||
|
@ -140,12 +141,14 @@ class MatchCompilerTest(unittest.TestCase):
|
||||||
# offset '5' is chosen as an abritary start offset to look for
|
# offset '5' is chosen as an abritary start offset to look for
|
||||||
res = self.mc._parseStringComparison(input, 5)
|
res = self.mc._parseStringComparison(input, 5)
|
||||||
self.assertEqual(2, len(res))
|
self.assertEqual(2, len(res))
|
||||||
self.assertEqual('str == MatchCompiler::makeConstString("abc")', input[:res[0]] + "MatchCompiler::makeConstString(" + input[res[0]:res[1]] + ")" + input[res[1]:])
|
self.assertEqual('str == MatchCompiler::makeConstString("abc")', input[:res[0]] +
|
||||||
|
"MatchCompiler::makeConstString(" + input[res[0]:res[1]] + ")" + input[res[1]:])
|
||||||
|
|
||||||
input = 'str == "a\\"b\\"c"'
|
input = 'str == "a\\"b\\"c"'
|
||||||
res = self.mc._parseStringComparison(input, 5)
|
res = self.mc._parseStringComparison(input, 5)
|
||||||
self.assertEqual(2, len(res))
|
self.assertEqual(2, len(res))
|
||||||
self.assertEqual('str == MatchCompiler::makeConstString("a\\"b\\"c")', input[:res[0]] + "MatchCompiler::makeConstString(" + input[res[0]:res[1]] + ")" + input[res[1]:])
|
self.assertEqual('str == MatchCompiler::makeConstString("a\\"b\\"c")', input[:res[0]] +
|
||||||
|
"MatchCompiler::makeConstString(" + input[res[0]:res[1]] + ")" + input[res[1]:])
|
||||||
|
|
||||||
def test_replaceCStrings(self):
|
def test_replaceCStrings(self):
|
||||||
# str() ==
|
# str() ==
|
||||||
|
@ -161,7 +164,9 @@ class MatchCompilerTest(unittest.TestCase):
|
||||||
# strAt()
|
# strAt()
|
||||||
input = 'if (match16(parent->tokAt(-3)) && tok->strAt(1) == ")")'
|
input = 'if (match16(parent->tokAt(-3)) && tok->strAt(1) == ")")'
|
||||||
output = self.mc._replaceCStrings(input)
|
output = self.mc._replaceCStrings(input)
|
||||||
self.assertEqual('if (match16(parent->tokAt(-3)) && tok->strAt(1) == MatchCompiler::makeConstString(")"))', output)
|
self.assertEqual(
|
||||||
|
'if (match16(parent->tokAt(-3)) && tok->strAt(1) == MatchCompiler::makeConstString(")"))',
|
||||||
|
output)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
|
#!/usr/bin/env python3
|
||||||
# Times script using Visual Studio compiler in Windows
|
# Times script using Visual Studio compiler in Windows
|
||||||
#
|
#
|
||||||
# This script assumes that you have:
|
# This script assumes that you have:
|
||||||
|
@ -49,7 +49,8 @@ for rev in range(rev1, rev2):
|
||||||
subprocess.call([r'c:\cygwin64\bin\sed.exe', '-i', 's/140/120/', vcxproj])
|
subprocess.call([r'c:\cygwin64\bin\sed.exe', '-i', 's/140/120/', vcxproj])
|
||||||
subprocess.call('msbuild cppcheck.sln /t:build /p:configuration=Release,platform=x64'.split())
|
subprocess.call('msbuild cppcheck.sln /t:build /p:configuration=Release,platform=x64'.split())
|
||||||
print('Revision:' + str(rev))
|
print('Revision:' + str(rev))
|
||||||
p = subprocess.Popen(r'bin\cppcheck.exe src -q --showtime=summary'.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
p = subprocess.Popen(r'bin\cppcheck.exe src -q --showtime=summary'.split(),
|
||||||
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
comm = p.communicate()
|
comm = p.communicate()
|
||||||
f = open('results.txt', 'at')
|
f = open('results.txt', 'at')
|
||||||
f.write('\nREV ' + str(rev) + '\n')
|
f.write('\nREV ' + str(rev) + '\n')
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
|
#!/usr/bin/env python
|
||||||
import glob
|
|
||||||
import sys
|
import sys
|
||||||
import re
|
import re
|
||||||
|
|
||||||
|
@ -22,11 +21,12 @@ fin = open(resultfile, 'rt')
|
||||||
results = fin.read()
|
results = fin.read()
|
||||||
fin.close()
|
fin.close()
|
||||||
|
|
||||||
out = {}
|
out = {
|
||||||
out['untriaged'] = ''
|
'untriaged': '',
|
||||||
out['fn'] = ''
|
'fn': '',
|
||||||
out['fp'] = ''
|
'fp': '',
|
||||||
out['tp'] = ''
|
'tp': ''
|
||||||
|
}
|
||||||
|
|
||||||
numberOfFalsePositives = 0
|
numberOfFalsePositives = 0
|
||||||
numberOfTruePositives = 0
|
numberOfTruePositives = 0
|
||||||
|
@ -71,7 +71,7 @@ for result in results.split('\n'):
|
||||||
f = open(project + '/true-positives.txt', 'rt')
|
f = open(project + '/true-positives.txt', 'rt')
|
||||||
for line in f.readlines():
|
for line in f.readlines():
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
if line.find('] -> [') > 0 or line.find('(error)') < 0:
|
if line.find('] -> [') > 0 or '(error)' not in line:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
res = re.match('\\[(' + project + '.+):([0-9]+)\\]:\s+[(][a-z]+[)] (.+)', line)
|
res = re.match('\\[(' + project + '.+):([0-9]+)\\]:\s+[(][a-z]+[)] (.+)', line)
|
||||||
|
@ -107,7 +107,8 @@ else:
|
||||||
project2 = project
|
project2 = project
|
||||||
|
|
||||||
fout = open('report.html', 'wt')
|
fout = open('report.html', 'wt')
|
||||||
fout.write('<html><head><title>Cppcheck results for ' + project + '</title><link rel="stylesheet" type="text/css" href="theme1.css"></head><body>\n')
|
fout.write('<html><head><title>Cppcheck results for ' + project +
|
||||||
|
'</title><link rel="stylesheet" type="text/css" href="theme1.css"></head><body>\n')
|
||||||
fout.write('<h1>Cppcheck results for ' + project + '</h1>\n')
|
fout.write('<h1>Cppcheck results for ' + project + '</h1>\n')
|
||||||
fout.write('<p>Number of false negatives: ' + str(numberOfFalseNegatives) + '</p>\n')
|
fout.write('<p>Number of false negatives: ' + str(numberOfFalseNegatives) + '</p>\n')
|
||||||
fout.write('<p>Number of true positives: ' + str(numberOfTruePositives) + '</p>\n')
|
fout.write('<p>Number of true positives: ' + str(numberOfTruePositives) + '</p>\n')
|
||||||
|
|
Loading…
Reference in New Issue