2019-11-15 21:38:20 +01:00
|
|
|
#!/usr/bin/env python3
|
2017-10-09 15:25:55 +02:00
|
|
|
#
|
|
|
|
# MISRA C 2012 checkers
|
|
|
|
#
|
|
|
|
# Example usage of this addon (scan a sourcefile main.cpp)
|
|
|
|
# cppcheck --dump main.cpp
|
2018-01-20 14:13:09 +01:00
|
|
|
# python misra.py --rule-texts=<path-to-rule-texts> main.cpp.dump
|
2017-10-09 15:25:55 +02:00
|
|
|
#
|
|
|
|
# Limitations: This addon is released as open source. Rule texts can't be freely
|
|
|
|
# distributed. https://www.misra.org.uk/forum/viewtopic.php?f=56&t=1189
|
|
|
|
#
|
2018-03-16 08:12:39 +01:00
|
|
|
# The MISRA standard documents may be obtained from https://www.misra.org.uk
|
|
|
|
#
|
2018-04-10 08:55:25 +02:00
|
|
|
# Total number of rules: 143
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-05-21 21:46:58 +02:00
|
|
|
from __future__ import print_function
|
|
|
|
|
2019-12-30 17:30:17 +01:00
|
|
|
import cppcheckdata
|
2019-11-08 17:20:37 +01:00
|
|
|
import itertools
|
2017-10-09 15:25:55 +02:00
|
|
|
import sys
|
|
|
|
import re
|
2018-01-20 14:13:09 +01:00
|
|
|
import os
|
2018-05-24 06:31:20 +02:00
|
|
|
import argparse
|
2019-01-18 21:30:08 +01:00
|
|
|
import codecs
|
2019-09-28 20:16:10 +02:00
|
|
|
import string
|
2020-01-09 19:31:18 +01:00
|
|
|
from collections import defaultdict
|
2019-09-28 20:16:10 +02:00
|
|
|
|
|
|
|
try:
|
|
|
|
from itertools import izip as zip
|
|
|
|
except ImportError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
def grouped(iterable, n):
|
2019-11-12 15:32:05 +01:00
|
|
|
"""s -> (s0,s1,s2,...sn-1), (sn,sn+1,sn+2,...s2n-1), (s2n,s2n+1,s2n+2,...s3n-1), ..."""
|
2020-01-08 06:54:43 +01:00
|
|
|
return zip(*[iter(iterable)] * n)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
|
2020-02-27 11:28:48 +01:00
|
|
|
INT_TYPES = ['bool', 'char', 'short', 'int', 'long', 'long long']
|
|
|
|
|
|
|
|
|
|
|
|
STDINT_TYPES = ['%s%d_t' % (n, v) for n, v in itertools.product(
|
|
|
|
['int', 'uint', 'int_least', 'uint_least', 'int_fast', 'uint_fast'],
|
|
|
|
[8, 16, 32, 64])]
|
|
|
|
|
|
|
|
|
2018-05-24 06:31:20 +02:00
|
|
|
typeBits = {
|
|
|
|
'CHAR': None,
|
|
|
|
'SHORT': None,
|
|
|
|
'INT': None,
|
|
|
|
'LONG': None,
|
|
|
|
'LONG_LONG': None,
|
|
|
|
'POINTER': None
|
|
|
|
}
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-05-24 06:31:20 +02:00
|
|
|
|
2020-02-27 11:28:48 +01:00
|
|
|
def isUnsignedType(ty):
|
|
|
|
return ty == 'unsigned' or ty.startswith('uint')
|
|
|
|
|
|
|
|
|
2017-10-09 15:25:55 +02:00
|
|
|
def simpleMatch(token, pattern):
|
|
|
|
for p in pattern.split(' '):
|
|
|
|
if not token or token.str != p:
|
|
|
|
return False
|
|
|
|
token = token.next
|
|
|
|
return True
|
|
|
|
|
2018-05-24 06:31:20 +02:00
|
|
|
|
2018-03-13 14:22:25 +01:00
|
|
|
def rawlink(rawtoken):
|
|
|
|
if rawtoken.str == '}':
|
|
|
|
indent = 0
|
|
|
|
while rawtoken:
|
|
|
|
if rawtoken.str == '}':
|
|
|
|
indent = indent + 1
|
|
|
|
elif rawtoken.str == '{':
|
|
|
|
indent = indent - 1
|
|
|
|
if indent == 0:
|
|
|
|
break
|
|
|
|
rawtoken = rawtoken.previous
|
|
|
|
else:
|
|
|
|
rawtoken = None
|
|
|
|
return rawtoken
|
|
|
|
|
2018-05-24 06:31:20 +02:00
|
|
|
|
2020-02-11 11:10:54 +01:00
|
|
|
# Identifiers described in Section 7 "Library" of C90 Standard
|
|
|
|
# Based on ISO/IEC9899:1990 Annex D -- Library summary and
|
|
|
|
# Annex E -- Implementation limits.
|
|
|
|
C90_STDLIB_IDENTIFIERS = {
|
|
|
|
# D.1 Errors
|
|
|
|
'errno.h': ['EDOM', 'ERANGE', 'errno'],
|
|
|
|
# D.2 Common definitions
|
|
|
|
'stddef.h': ['NULL', 'offsetof', 'ptrdiff_t', 'size_t', 'wchar_t'],
|
|
|
|
# D.3 Diagnostics
|
|
|
|
'assert.h': ['NDEBUG', 'assert'],
|
|
|
|
# D.4 Character handling
|
|
|
|
'ctype.h': [
|
|
|
|
'isalnum', 'isalpha', 'isblank', 'iscntrl', 'isdigit',
|
|
|
|
'isgraph', 'islower', 'isprint', 'ispunct', 'isspace',
|
|
|
|
'isupper', 'isxdigit', 'tolower', 'toupper',
|
|
|
|
],
|
|
|
|
# D.5 Localization
|
|
|
|
'locale.h': [
|
|
|
|
'LC_ALL', 'LC_COLLATE', 'LC_CTYPE', 'LC_MONETARY',
|
|
|
|
'LC_NUMERIC', 'LC_TIME', 'NULL', 'lconv',
|
|
|
|
'setlocale', 'localeconv',
|
|
|
|
],
|
|
|
|
# D.6 Mathematics
|
|
|
|
'math.h': [
|
2020-04-14 15:47:44 +02:00
|
|
|
'HUGE_VAL', 'acos', 'asin' , 'atan2', 'cos', 'sin', 'tan', 'cosh',
|
2020-02-11 11:10:54 +01:00
|
|
|
'sinh', 'tanh', 'exp', 'frexp', 'ldexp', 'log', 'loglO', 'modf',
|
|
|
|
'pow', 'sqrt', 'ceil', 'fabs', 'floor', 'fmod',
|
|
|
|
],
|
|
|
|
# D.7 Nonlocal jumps
|
|
|
|
'setjmp.h': ['jmp_buf', 'setjmp', 'longjmp'],
|
|
|
|
# D.8 Signal handling
|
|
|
|
'signal.h': [
|
|
|
|
'sig_atomic_t', 'SIG_DFL', 'SIG_ERR', 'SIG_IGN', 'SIGABRT', 'SIGFPE',
|
|
|
|
'SIGILL', 'SIGINT', 'SIGSEGV', 'SIGTERM', 'signal', 'raise',
|
|
|
|
],
|
|
|
|
# D.9 Variable arguments
|
|
|
|
'stdarg.h': ['va_list', 'va_start', 'va_arg', 'va_end'],
|
|
|
|
# D.10 Input/output
|
|
|
|
'stdio.h': [
|
|
|
|
'_IOFBF', '_IOLBF', '_IONBF', 'BUFSIZ', 'EOF', 'FILE', 'FILENAME_MAX',
|
|
|
|
'FOPEN_MAX', 'fpos_t', 'L_tmpnam', 'NULL', 'SEEK_CUR', 'SEEK_END',
|
|
|
|
'SEEK_SET', 'size_t', 'stderr', 'stdin', 'stdout', 'TMP_MAX',
|
|
|
|
'remove', 'rename', 'tmpfile', 'tmpnam', 'fclose', 'fflush', 'fopen',
|
|
|
|
'freopen', 'setbuf', 'setvbuf', 'fprintf', 'fscanf', 'printf',
|
|
|
|
'scanf', 'sprintf', 'sscanf', 'vfprintf', 'vprintf', 'vsprintf',
|
|
|
|
'fgetc', 'fgets', 'fputc', 'fputs', 'getc', 'getchar', 'gets', 'putc',
|
|
|
|
'putchar', 'puts', 'ungetc', 'fread', 'fwrite', 'fgetpos', 'fseek',
|
|
|
|
'fsetpos', 'rewind', 'clearerr', 'feof', 'ferror', 'perror',
|
|
|
|
],
|
|
|
|
# D.11 General utilities
|
|
|
|
'stdlib.h': [
|
|
|
|
'EXIT_FAILURE', 'EXIT_SUCCESS', 'MB_CUR_MAX', 'NULL', 'RAND_MAX',
|
|
|
|
'div_t', 'ldiv_t', 'wchar_t', 'atof', 'atoi', 'strtod', 'rand',
|
|
|
|
'srand', 'calloc', 'free', 'malloc', 'realloc', 'abort', 'atexit',
|
|
|
|
'exit', 'getenv', 'system', 'bsearch', 'qsort', 'abs', 'div', 'ldiv',
|
|
|
|
'mblen', 'mbtowc', 'wctomb', 'mbstowcs', 'wcstombs',
|
|
|
|
],
|
|
|
|
# D.12 String handling
|
|
|
|
'string.h': [
|
|
|
|
'NULL', 'size_t', 'memcpy', 'memmove', 'strcpy', 'strncpy', 'strcat',
|
|
|
|
'strncat', 'memcmp', 'strcmp', 'strcoll', 'strncmp', 'strxfrm',
|
|
|
|
'memchr', 'strchr', 'strcspn', 'strpbrk', 'strrchr', 'strspn',
|
|
|
|
'strstr', 'strtok', 'memset', 'strerror', 'strlen',
|
|
|
|
],
|
|
|
|
# D.13 Date and time
|
|
|
|
'time.h': [
|
|
|
|
'CLK_TCK', 'NULL', 'clock_t', 'time_t', 'size_t', 'tm', 'clock',
|
|
|
|
'difftime', 'mktime', 'time', 'asctime', 'ctime', 'gmtime',
|
|
|
|
'localtime', 'strftime',
|
|
|
|
],
|
|
|
|
# Annex E: Implementation limits
|
|
|
|
'limits.h': [
|
|
|
|
'CHAR_BIT', 'SCHAR_MIN', 'SCHAR_MAX', 'UCHAR_MAX', 'CHAR_MIN',
|
|
|
|
'CHAR_MAX', 'MB_LEN_MAX', 'SHRT_MIN', 'SHRT_MAX', 'USHRT_MAX',
|
|
|
|
'INT_MIN', 'INT_MAX', 'UINT_MAX', 'LONG_MIN', 'LONG_MAX', 'ULONG_MAX',
|
|
|
|
],
|
|
|
|
'float.h': [
|
|
|
|
'FLT_ROUNDS', 'FLT_RADIX', 'FLT_MANT_DIG', 'DBL_MANT_DIG',
|
|
|
|
'LDBL_MANT_DIG', 'DECIMAL_DIG', 'FLT_DIG', 'DBL_DIG', 'LDBL_DIG',
|
|
|
|
'DBL_MIN_EXP', 'LDBL_MIN_EXP', 'FLT_MIN_10_EXP', 'DBL_MIN_10_EXP',
|
|
|
|
'LDBL_MIN_10_EXP', 'FLT_MAX_EXP', 'DBL_MAX_EXP', 'LDBL_MAX_EXP',
|
|
|
|
'FLT_MAX_10_EXP', 'DBL_MAX_10_EXP', 'LDBL_MAX_10_EXP', 'FLT_MAX',
|
|
|
|
'DBL_MAX', 'LDBL_MAX', 'FLT_MIN', 'DBL_MIN', 'LDBL_MIN',
|
|
|
|
'FLT_EPSILON', 'DBL_EPSILON', 'LDBL_EPSILON'
|
|
|
|
],
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
# Identifiers described in Section 7 "Library" of C99 Standard
|
|
|
|
# Based on ISO/IEC 9899 WF14/N1256 Annex B -- Library summary
|
|
|
|
C99_STDLIB_IDENTIFIERS = {
|
|
|
|
# B.1 Diagnostics
|
|
|
|
'assert.h': C90_STDLIB_IDENTIFIERS['assert.h'],
|
|
|
|
# B.2 Complex
|
|
|
|
'complex.h': [
|
|
|
|
'complex', 'imaginary', 'I', '_Complex_I', '_Imaginary_I',
|
|
|
|
'CX_LIMITED_RANGE',
|
|
|
|
'cacos', 'cacosf', 'cacosl',
|
|
|
|
'casin', 'casinf', 'casinl',
|
|
|
|
'catan', 'catanf', 'catanl',
|
|
|
|
'ccos', 'ccosf', 'ccosl',
|
|
|
|
'csin', 'csinf', 'csinl',
|
|
|
|
'ctan', 'ctanf', 'ctanl',
|
|
|
|
'cacosh', 'cacoshf', 'cacoshl',
|
|
|
|
'casinh', 'casinhf', 'casinhl',
|
|
|
|
'catanh', 'catanhf', 'catanhl',
|
|
|
|
'ccosh', 'ccoshf', 'ccoshl',
|
|
|
|
'csinh', 'csinhf', 'csinhl',
|
|
|
|
'ctanh', 'ctanhf', 'ctanhl',
|
|
|
|
'cexp', 'cexpf', 'cexpl',
|
|
|
|
'clog', 'clogf', 'clogl',
|
|
|
|
'cabs', 'cabsf', 'cabsl',
|
|
|
|
'cpow', 'cpowf', 'cpowl',
|
|
|
|
'csqrt', 'csqrtf', 'csqrtl',
|
|
|
|
'carg', 'cargf', 'cargl',
|
|
|
|
'cimag', 'cimagf', 'cimagl',
|
|
|
|
'conj', 'conjf', 'conjl',
|
|
|
|
'cproj', 'cprojf', 'cprojl',
|
|
|
|
'creal', 'crealf', 'creall',
|
|
|
|
],
|
|
|
|
# B.3 Character handling
|
|
|
|
'ctype.h': C90_STDLIB_IDENTIFIERS['ctype.h'],
|
|
|
|
# B.4 Errors
|
|
|
|
'errno.h': C90_STDLIB_IDENTIFIERS['errno.h'] + ['EILSEQ'],
|
|
|
|
# B.5 Floating-point environment
|
|
|
|
'fenv.h': [
|
|
|
|
'fenv_t', 'FE_OVERFLOW', 'FE_TOWARDZERO',
|
|
|
|
'fexcept_t', 'FE_UNDERFLOW', 'FE_UPWARD',
|
|
|
|
'FE_DIVBYZERO', 'FE_ALL_EXCEPT', 'FE_DFL_ENV',
|
|
|
|
'FE_INEXACT', 'FE_DOWNWARD',
|
|
|
|
'FE_INVALID', 'FE_TONEAREST',
|
|
|
|
'FENV_ACCESS',
|
|
|
|
'feclearexcept', 'fegetexceptflag', 'fegetround',
|
|
|
|
'fesetround', 'fegetenv', 'feholdexcept',
|
|
|
|
'fesetenv', 'feupdateenv',
|
|
|
|
],
|
|
|
|
# B.6 Characteristics of floating types
|
|
|
|
'float.h': C90_STDLIB_IDENTIFIERS['float.h'] + ['FLT_EVAL_METHOD'],
|
|
|
|
# B.7 Format conversion of integer types
|
|
|
|
'inttypes.h': [
|
|
|
|
'imaxdiv_t', 'imaxabs', 'imaxdiv', 'strtoimax',
|
|
|
|
'strtoumax', 'wcstoimax', 'wcstoumax',
|
|
|
|
],
|
|
|
|
# B.8 Alternative spellings
|
|
|
|
'iso646.h': [
|
|
|
|
'and', 'and_eq', 'bitand', 'bitor', 'compl', 'not', 'not_eq',
|
|
|
|
'or', 'or_eq', 'xor', 'xor_eq',
|
|
|
|
],
|
|
|
|
# B.9 Size of integer types
|
|
|
|
'limits.h': C90_STDLIB_IDENTIFIERS['limits.h'] +
|
|
|
|
['LLONG_MIN', 'LLONG_MAX', 'ULLONG_MAX'],
|
|
|
|
# B.10 Localization
|
|
|
|
'locale.h': C90_STDLIB_IDENTIFIERS['locale.h'],
|
|
|
|
# B.11 Mathematics
|
|
|
|
'math.h': C90_STDLIB_IDENTIFIERS['math.h'] + [
|
|
|
|
'float_t', 'double_t', 'HUGE_VAL', 'HUGE_VALF', 'HUGE_VALL',
|
|
|
|
'INFINITY', 'NAN', 'FP_INFINITE', 'FP_NAN', 'FP_NORMAL',
|
|
|
|
'FP_SUBNORMAL', 'FP_ZERO', 'FP_FAST_FMA', 'FP_FAST_FMAF',
|
|
|
|
'FP_FAST_FMAL', 'FP_ILOGB0', 'FP_ILOGBNAN', 'MATH_ERRNO',
|
|
|
|
'MATH_ERREXCEPT', 'math_errhandling', 'FP_CONTRACT', 'fpclassify',
|
|
|
|
'isfinite', 'isinf', 'isnan', 'isnormal', 'signbit', 'acosf', 'acosl',
|
|
|
|
'asinf', 'asinl', 'atanf', 'atanl', 'atan2', 'atan2f', 'atan2l',
|
|
|
|
'cosf', 'cosl', 'sinf', 'sinl', 'tanf', 'tanl', 'acosh', 'acoshf',
|
|
|
|
'acoshl', 'asinh', 'asinhf', 'asinhl', 'atanh', 'atanhf', 'atanhl',
|
|
|
|
'cosh', 'coshf', 'coshl', 'sinh', 'sinhf', 'sinhl', 'tanh', 'tanhf',
|
|
|
|
'tanhl', 'expf', 'expl', 'exp2', 'exp2f', 'exp2l', 'expm1', 'expm1f',
|
|
|
|
'expm1l', 'frexpf', 'frexpl', 'ilogb', 'ilogbf', 'ilogbl', 'float',
|
|
|
|
'ldexpl', 'logf', 'logl', 'log10f', 'log10l', 'log1p', 'log1pf',
|
|
|
|
'log1pl', 'log2', 'log2f', 'log2l', 'logb', 'logbf', 'logbl', 'modff',
|
|
|
|
'modfl', 'scalbn', 'scalbnf', 'scalbnl', 'scalbln', 'scalblnf',
|
|
|
|
'scalblnl', 'hypotl', 'powf', 'powl', 'sqrtf', 'sqrtl', 'erf', 'erff',
|
|
|
|
'erfl', 'erfc', 'erfcf', 'erfcl', 'lgamma', 'lgammaf', 'lgammal',
|
|
|
|
'tgamma', 'tgammaf', 'tgammal', 'ceilf', 'ceill', 'floorf', 'floorl',
|
|
|
|
'nearbyint', 'nearbyintf', 'nearbyintl', 'rint', 'rintf', 'rintl',
|
|
|
|
'lrint', 'lrintf', 'lrintl', 'llrint', 'llrintf', 'llrintl', 'round',
|
|
|
|
'roundf', 'roundl', 'lround', 'lroundf', 'lroundl', 'llround',
|
|
|
|
'llroundf', 'llroundl', 'trunc', 'truncf', 'truncl', 'fmodf', 'fmodl',
|
|
|
|
'remainder', 'remainderf', 'remainderl', 'remquo', 'remquof',
|
|
|
|
'remquol', 'copysign', 'copysignf', 'copysignl', 'nan', 'nanf',
|
|
|
|
'nanl', 'nextafter', 'nextafterf', 'nextafterl', 'nexttoward',
|
|
|
|
'nexttowardf', 'nexttowardl', 'fdim', 'fdimf', 'fdiml', 'fmax',
|
|
|
|
'fmaxf', 'fmaxl', 'fmin', 'fminf', 'fminl', 'fmal', 'isgreater',
|
|
|
|
'isgreaterequal', 'isless', 'islessequal', 'islessgreater',
|
|
|
|
'isunordered',
|
|
|
|
],
|
|
|
|
# B.12 Nonlocal jumps
|
|
|
|
'setjmp.h': C90_STDLIB_IDENTIFIERS['setjmp.h'],
|
|
|
|
# B.13 Signal handling
|
|
|
|
'signal.h': C90_STDLIB_IDENTIFIERS['signal.h'],
|
|
|
|
# B.14 Variable arguments
|
|
|
|
'stdarg.h': C90_STDLIB_IDENTIFIERS['stdarg.h'] + ['va_copy'],
|
|
|
|
# B.15 Boolean type and values
|
|
|
|
'stdbool.h': ['bool', 'true', 'false', '__bool_true_false_are_defined'],
|
|
|
|
# B.16 Common definitions
|
|
|
|
'stddef.h': C90_STDLIB_IDENTIFIERS['stddef.h'],
|
|
|
|
# B.17 Integer types
|
|
|
|
'stdint.h': [
|
|
|
|
'intptr_t', 'uintptr_t', 'intmax_t', 'uintmax_t', 'INTN_MIN',
|
|
|
|
'INTN_MAX', 'UINTN_MAX', 'INT_LEASTN_MIN', 'INT_LEASTN_MAX',
|
|
|
|
'UINT_LEASTN_MAX', 'INT_FASTN_MIN', 'INT_FASTN_MAX', 'UINT_FASTN_MAX',
|
|
|
|
'INTPTR_MIN', 'INTPTR_MAX', 'UINTPTR_MAX', 'INTMAX_MIN', 'INTMAX_MAX',
|
|
|
|
'UINTMAX_MAX', 'PTRDIFF_MIN', 'PTRDIFF_MAX', 'SIG_ATOMIC_MIN',
|
|
|
|
'SIG_ATOMIC_MAX', 'SIZE_MAX', 'WCHAR_MIN', 'WCHAR_MAX', 'WINT_MIN',
|
|
|
|
'WINT_MAX', 'INTN_C', 'UINTN_C', 'INTMAX_C', 'UINTMAX_C',
|
2020-02-27 11:28:48 +01:00
|
|
|
] + STDINT_TYPES,
|
2020-02-11 11:10:54 +01:00
|
|
|
# B.18 Input/output
|
|
|
|
'stdio.h': C90_STDLIB_IDENTIFIERS['stdio.h'] + [
|
|
|
|
'mode', 'restrict', 'snprintf', 'vfscanf', 'vscanf',
|
|
|
|
'vsnprintf', 'vsscanf',
|
|
|
|
],
|
|
|
|
# B.19 General utilities
|
|
|
|
'stdlib.h': C90_STDLIB_IDENTIFIERS['stdlib.h'] + [
|
|
|
|
'_Exit', 'labs', 'llabs', 'lldiv', 'lldiv_t', 'strtof', 'strtol',
|
|
|
|
'strtold', 'strtoll', 'strtoul', 'strtoull'
|
|
|
|
],
|
|
|
|
# B.20 String handling
|
|
|
|
'string.h': C90_STDLIB_IDENTIFIERS['string.h'],
|
|
|
|
# B.21 Type-generic math
|
|
|
|
'tgmath.h': [
|
|
|
|
'acos', 'asin', 'atan', 'acosh', 'asinh', 'atanh', 'cos', 'sin', 'tan',
|
|
|
|
'cosh', 'sinh', 'tanh', 'exp', 'log', 'pow', 'sqrt', 'fabs', 'atan2',
|
|
|
|
'cbrt', 'ceil', 'copysign', 'erf', 'erfc', 'exp2', 'expm1', 'fdim',
|
|
|
|
'floor', 'fma', 'fmax', 'fmin', 'fmod', 'frexp', 'hypot', 'ilogb',
|
|
|
|
'ldexp', 'lgamma', 'llrint', 'llround', 'log10', 'log1p', 'log2',
|
|
|
|
'logb', 'lrint', 'lround', 'nearbyint', 'nextafter', 'nexttoward',
|
|
|
|
'remainder', 'remquo', 'rint', 'round', 'scalbn', 'scalbln', 'tgamma',
|
|
|
|
'trunc', 'carg', 'cimag', 'conj', 'cproj', 'creal',
|
|
|
|
],
|
|
|
|
# B.22 Date and time
|
|
|
|
'time.h': C90_STDLIB_IDENTIFIERS['time.h'] + ['CLOCKS_PER_SEC'],
|
|
|
|
# B.23 Extended multibyte/wide character utilities
|
|
|
|
'wchar.h': [
|
|
|
|
'wchar_t', 'size_t', 'mbstate_t', 'wint_t', 'tm', 'NULL', 'WCHAR_MAX',
|
|
|
|
'WCHAR_MIN', 'WEOF', 'fwprintf', 'fwscanf', 'swprintf', 'swscanf',
|
|
|
|
'vfwprintf', 'vfwscanf', 'vswprintf', 'vswscanf', 'vwprintf',
|
|
|
|
'vwscanf', 'wprintf', 'wscanf', 'fgetwc', 'fgetws', 'fputwc', 'fputws',
|
|
|
|
'fwide', 'getwc', 'getwchar', 'putwc', 'putwchar', 'ungetwc', 'wcstod',
|
|
|
|
'wcstof', 'double', 'int', 'long', 'long', 'long', 'wcscpy', 'wcsncpy',
|
|
|
|
'wmemcpy', 'wmemmove', 'wcscat', 'wcsncat', 'wcscmp', 'wcscoll',
|
|
|
|
'wcsncmp', 'wcsxfrm', 'wmemcmp', 'wcschr', 'wcscspn', 'wcspbrk',
|
|
|
|
'wcsrchr', 'wcsspn', 'wcsstr', 'wcstok', 'wmemchr', 'wcslen',
|
|
|
|
'wmemset', 'wcsftime', 'btowc', 'wctob', 'mbsinit', 'mbrlen',
|
|
|
|
'mbrtowc', 'wcrtomb', 'mbsrtowcs', 'wcsrtombs',
|
|
|
|
],
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def isStdLibId(id_, standard='c99'):
|
|
|
|
id_lists = []
|
|
|
|
if standard == 'c89':
|
|
|
|
id_lists = C90_STDLIB_IDENTIFIERS.values()
|
|
|
|
elif standard == 'c99':
|
|
|
|
id_lists = C99_STDLIB_IDENTIFIERS.values()
|
|
|
|
for l in id_lists:
|
|
|
|
if id_ in l:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2020-02-09 10:46:13 +01:00
|
|
|
# Reserved keywords defined in ISO/IEC9899:1990 -- ch 6.1.1
|
|
|
|
C90_KEYWORDS = {
|
|
|
|
'auto', 'break', 'double', 'else', 'enum', 'extern', 'float', 'for',
|
|
|
|
'goto', 'if', 'case', 'char', 'const', 'continue', 'default', 'do', 'int',
|
|
|
|
'long', 'struct', 'switch', 'register', 'typedef', 'union', 'unsigned',
|
|
|
|
'void', 'volatile', 'while', 'return', 'short', 'signed', 'sizeof',
|
|
|
|
'static'
|
2017-10-09 15:25:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2020-02-09 10:46:13 +01:00
|
|
|
# Reserved keywords defined in ISO/IEC 9899 WF14/N1256 -- ch. 6.4.1
|
|
|
|
C99_KEYWORDS = {
|
|
|
|
'auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',
|
|
|
|
'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if', 'inline',
|
|
|
|
'int', 'long', 'register', 'restrict', 'return', 'short', 'signed',
|
|
|
|
'sizeof', 'static', 'struct', 'switch', 'typedef', 'union', 'unsigned',
|
|
|
|
'void', 'volatile', 'while', '_Bool', '_Complex', '_Imaginary'
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
def isKeyword(keyword, standard='c99'):
|
|
|
|
kw_set = {}
|
|
|
|
if standard == 'c89':
|
|
|
|
kw_set = C90_KEYWORDS
|
|
|
|
elif standard == 'c99':
|
|
|
|
kw_set = C99_KEYWORDS
|
|
|
|
return keyword in kw_set
|
|
|
|
|
|
|
|
|
2018-05-21 12:04:20 +02:00
|
|
|
def getEssentialTypeCategory(expr):
|
|
|
|
if not expr:
|
|
|
|
return None
|
2019-08-11 10:15:07 +02:00
|
|
|
if expr.str == ',':
|
|
|
|
return getEssentialTypeCategory(expr.astOperand2)
|
|
|
|
if expr.str in ('<', '<=', '==', '!=', '>=', '>', '&&', '||', '!'):
|
|
|
|
return 'bool'
|
|
|
|
if expr.str in ('<<', '>>'):
|
|
|
|
# TODO this is incomplete
|
|
|
|
return getEssentialTypeCategory(expr.astOperand1)
|
|
|
|
if len(expr.str) == 1 and expr.str in '+-*/%&|^':
|
|
|
|
# TODO this is incomplete
|
2019-08-11 09:47:37 +02:00
|
|
|
e1 = getEssentialTypeCategory(expr.astOperand1)
|
|
|
|
e2 = getEssentialTypeCategory(expr.astOperand2)
|
2020-01-08 06:54:43 +01:00
|
|
|
# print('{0}: {1} {2}'.format(expr.str, e1, e2))
|
2019-08-11 09:47:37 +02:00
|
|
|
if e1 and e2 and e1 == e2:
|
|
|
|
return e1
|
|
|
|
if expr.valueType:
|
|
|
|
return expr.valueType.sign
|
2019-12-01 14:51:12 +01:00
|
|
|
if expr.valueType and expr.valueType.typeScope and expr.valueType.typeScope.className:
|
2019-08-11 10:15:07 +02:00
|
|
|
return "enum<" + expr.valueType.typeScope.className + ">"
|
2018-05-21 12:04:20 +02:00
|
|
|
if expr.variable:
|
|
|
|
typeToken = expr.variable.typeStartToken
|
|
|
|
while typeToken:
|
|
|
|
if typeToken.valueType:
|
2019-08-10 18:12:23 +02:00
|
|
|
if typeToken.valueType.type == 'bool':
|
2018-05-21 12:04:20 +02:00
|
|
|
return typeToken.valueType.type
|
2019-08-10 18:12:23 +02:00
|
|
|
if typeToken.valueType.type in ('float', 'double', 'long double'):
|
2018-05-21 12:04:20 +02:00
|
|
|
return "float"
|
|
|
|
if typeToken.valueType.sign:
|
|
|
|
return typeToken.valueType.sign
|
|
|
|
typeToken = typeToken.next
|
|
|
|
if expr.valueType:
|
|
|
|
return expr.valueType.sign
|
2018-05-23 16:26:00 +02:00
|
|
|
return None
|
2018-05-21 12:04:20 +02:00
|
|
|
|
|
|
|
|
|
|
|
def getEssentialCategorylist(operand1, operand2):
|
|
|
|
if not operand1 or not operand2:
|
|
|
|
return None, None
|
2019-08-10 18:12:23 +02:00
|
|
|
if (operand1.str in ('++', '--') or
|
|
|
|
operand2.str in ('++', '--')):
|
2018-05-21 12:04:20 +02:00
|
|
|
return None, None
|
2019-04-10 21:21:17 +02:00
|
|
|
if ((operand1.valueType and operand1.valueType.pointer) or
|
|
|
|
(operand2.valueType and operand2.valueType.pointer)):
|
2018-05-21 12:04:20 +02:00
|
|
|
return None, None
|
|
|
|
e1 = getEssentialTypeCategory(operand1)
|
|
|
|
e2 = getEssentialTypeCategory(operand2)
|
|
|
|
return e1, e2
|
|
|
|
|
|
|
|
|
2017-10-09 15:25:55 +02:00
|
|
|
def getEssentialType(expr):
|
|
|
|
if not expr:
|
|
|
|
return None
|
|
|
|
if expr.variable:
|
|
|
|
typeToken = expr.variable.typeStartToken
|
|
|
|
while typeToken and typeToken.isName:
|
2020-02-27 11:28:48 +01:00
|
|
|
if typeToken.str in INT_TYPES + STDINT_TYPES + ['float', 'double']:
|
2017-10-09 15:25:55 +02:00
|
|
|
return typeToken.str
|
|
|
|
typeToken = typeToken.next
|
|
|
|
|
2020-01-08 06:54:43 +01:00
|
|
|
elif expr.astOperand1 and expr.astOperand2 and expr.str in (
|
|
|
|
'+', '-', '*', '/', '%', '&', '|', '^', '>>', "<<", "?", ":"):
|
2018-05-24 22:28:36 +02:00
|
|
|
if expr.astOperand1.valueType and expr.astOperand1.valueType.pointer > 0:
|
|
|
|
return None
|
|
|
|
if expr.astOperand2.valueType and expr.astOperand2.valueType.pointer > 0:
|
|
|
|
return None
|
2017-10-09 15:25:55 +02:00
|
|
|
e1 = getEssentialType(expr.astOperand1)
|
|
|
|
e2 = getEssentialType(expr.astOperand2)
|
|
|
|
if not e1 or not e2:
|
|
|
|
return None
|
2020-02-27 11:28:48 +01:00
|
|
|
if bitsOfEssentialType(e2) >= bitsOfEssentialType(e1):
|
|
|
|
return e2
|
|
|
|
else:
|
|
|
|
return e1
|
2018-05-24 22:28:36 +02:00
|
|
|
elif expr.str == "~":
|
|
|
|
e1 = getEssentialType(expr.astOperand1)
|
|
|
|
return e1
|
2017-10-09 15:25:55 +02:00
|
|
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2020-02-27 11:28:48 +01:00
|
|
|
def bitsOfEssentialType(ty):
|
|
|
|
if ty is None:
|
2017-10-09 15:25:55 +02:00
|
|
|
return 0
|
2020-02-27 11:28:48 +01:00
|
|
|
if ty == 'char':
|
2018-05-24 06:31:20 +02:00
|
|
|
return typeBits['CHAR']
|
2020-02-27 11:28:48 +01:00
|
|
|
if ty == 'short':
|
2018-05-24 06:31:20 +02:00
|
|
|
return typeBits['SHORT']
|
2020-02-27 11:28:48 +01:00
|
|
|
if ty == 'int':
|
2018-05-24 06:31:20 +02:00
|
|
|
return typeBits['INT']
|
2020-02-27 11:28:48 +01:00
|
|
|
if ty == 'long':
|
2018-05-24 06:31:20 +02:00
|
|
|
return typeBits['LONG']
|
2020-02-27 11:28:48 +01:00
|
|
|
if ty == 'long long':
|
2018-05-24 06:31:20 +02:00
|
|
|
return typeBits['LONG_LONG']
|
2020-02-27 11:28:48 +01:00
|
|
|
for sty in STDINT_TYPES:
|
|
|
|
if ty == sty:
|
|
|
|
return int(''.join(filter(str.isdigit, sty)))
|
2017-10-09 15:25:55 +02:00
|
|
|
return 0
|
|
|
|
|
|
|
|
|
|
|
|
def isCast(expr):
|
|
|
|
if not expr or expr.str != '(' or not expr.astOperand1 or expr.astOperand2:
|
|
|
|
return False
|
|
|
|
if simpleMatch(expr, '( )'):
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2020-02-09 10:46:13 +01:00
|
|
|
def isFunctionCall(expr, std='c99'):
|
2017-10-09 15:25:55 +02:00
|
|
|
if not expr:
|
|
|
|
return False
|
|
|
|
if expr.str != '(' or not expr.astOperand1:
|
|
|
|
return False
|
|
|
|
if expr.astOperand1 != expr.previous:
|
|
|
|
return False
|
2020-02-09 10:46:13 +01:00
|
|
|
if isKeyword(expr.astOperand1.str, std):
|
2017-10-09 15:25:55 +02:00
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2019-05-26 18:46:00 +02:00
|
|
|
def hasExternalLinkage(var):
|
2019-06-27 06:59:47 +02:00
|
|
|
return var.isGlobal and not var.isStatic
|
2019-05-26 18:46:00 +02:00
|
|
|
|
|
|
|
|
2017-10-09 15:25:55 +02:00
|
|
|
def countSideEffects(expr):
|
2019-08-10 18:12:23 +02:00
|
|
|
if not expr or expr.str in (',', ';'):
|
2017-10-09 15:25:55 +02:00
|
|
|
return 0
|
|
|
|
ret = 0
|
2019-08-10 18:12:23 +02:00
|
|
|
if expr.str in ('++', '--', '='):
|
2017-10-09 15:25:55 +02:00
|
|
|
ret = 1
|
|
|
|
return ret + countSideEffects(expr.astOperand1) + countSideEffects(expr.astOperand2)
|
|
|
|
|
|
|
|
|
|
|
|
def getForLoopExpressions(forToken):
|
|
|
|
if not forToken or forToken.str != 'for':
|
|
|
|
return None
|
|
|
|
lpar = forToken.next
|
|
|
|
if not lpar or lpar.str != '(':
|
|
|
|
return None
|
|
|
|
if not lpar.astOperand2 or lpar.astOperand2.str != ';':
|
|
|
|
return None
|
|
|
|
if not lpar.astOperand2.astOperand2 or lpar.astOperand2.astOperand2.str != ';':
|
|
|
|
return None
|
|
|
|
return [lpar.astOperand2.astOperand1,
|
|
|
|
lpar.astOperand2.astOperand2.astOperand1,
|
|
|
|
lpar.astOperand2.astOperand2.astOperand2]
|
|
|
|
|
|
|
|
|
2019-12-15 18:23:12 +01:00
|
|
|
def getForLoopCounterVariables(forToken):
|
|
|
|
""" Return a set of Variable objects defined in ``for`` statement and
|
|
|
|
satisfy requirements to loop counter term from section 8.14 of MISRA
|
|
|
|
document.
|
|
|
|
"""
|
|
|
|
if not forToken or forToken.str != 'for':
|
|
|
|
return None
|
|
|
|
tn = forToken.next
|
|
|
|
if not tn or tn.str != '(':
|
|
|
|
return None
|
|
|
|
vars_defined = set()
|
|
|
|
vars_exit = set()
|
|
|
|
vars_modified = set()
|
|
|
|
cur_clause = 1
|
|
|
|
te = tn.link
|
|
|
|
while tn and tn != te:
|
|
|
|
if tn.variable:
|
|
|
|
if cur_clause == 1 and tn.variable.nameToken == tn:
|
|
|
|
vars_defined.add(tn.variable)
|
|
|
|
elif cur_clause == 2:
|
|
|
|
vars_exit.add(tn.variable)
|
|
|
|
elif cur_clause == 3:
|
|
|
|
if tn.next and hasSideEffectsRecursive(tn.next):
|
|
|
|
vars_modified.add(tn.variable)
|
|
|
|
elif tn.previous and tn.previous.str in ('++', '--'):
|
|
|
|
vars_modified.add(tn.variable)
|
|
|
|
if tn.str == ';':
|
|
|
|
cur_clause += 1
|
|
|
|
tn = tn.next
|
|
|
|
return vars_defined & vars_exit & vars_modified
|
|
|
|
|
|
|
|
|
2018-06-04 10:12:51 +02:00
|
|
|
def findCounterTokens(cond):
|
|
|
|
if not cond:
|
|
|
|
return []
|
|
|
|
if cond.str in ['&&', '||']:
|
|
|
|
c = findCounterTokens(cond.astOperand1)
|
|
|
|
c.extend(findCounterTokens(cond.astOperand2))
|
|
|
|
return c
|
|
|
|
ret = []
|
|
|
|
if ((cond.isArithmeticalOp and cond.astOperand1 and cond.astOperand2) or
|
|
|
|
(cond.isComparisonOp and cond.astOperand1 and cond.astOperand2)):
|
|
|
|
if cond.astOperand1.isName:
|
|
|
|
ret.append(cond.astOperand1)
|
|
|
|
if cond.astOperand2.isName:
|
|
|
|
ret.append(cond.astOperand2)
|
|
|
|
if cond.astOperand1.isOp:
|
|
|
|
ret.extend(findCounterTokens(cond.astOperand1))
|
|
|
|
if cond.astOperand2.isOp:
|
|
|
|
ret.extend(findCounterTokens(cond.astOperand2))
|
|
|
|
return ret
|
|
|
|
|
|
|
|
|
|
|
|
def isFloatCounterInWhileLoop(whileToken):
|
|
|
|
if not simpleMatch(whileToken, 'while ('):
|
|
|
|
return False
|
|
|
|
lpar = whileToken.next
|
|
|
|
rpar = lpar.link
|
|
|
|
counterTokens = findCounterTokens(lpar.astOperand2)
|
|
|
|
whileBodyStart = None
|
|
|
|
if simpleMatch(rpar, ') {'):
|
|
|
|
whileBodyStart = rpar.next
|
|
|
|
elif simpleMatch(whileToken.previous, '} while') and simpleMatch(whileToken.previous.link.previous, 'do {'):
|
|
|
|
whileBodyStart = whileToken.previous.link
|
|
|
|
else:
|
2017-10-09 15:25:55 +02:00
|
|
|
return False
|
2018-06-04 10:12:51 +02:00
|
|
|
token = whileBodyStart
|
2018-08-05 20:36:21 +02:00
|
|
|
while token != whileBodyStart.link:
|
2018-06-04 10:12:51 +02:00
|
|
|
token = token.next
|
|
|
|
for counterToken in counterTokens:
|
|
|
|
if not counterToken.valueType or not counterToken.valueType.isFloat():
|
|
|
|
continue
|
|
|
|
if token.isAssignmentOp and token.astOperand1.str == counterToken.str:
|
|
|
|
return True
|
2019-08-10 18:12:23 +02:00
|
|
|
if token.str == counterToken.str and token.astParent and token.astParent.str in ('++', '--'):
|
2018-06-04 10:12:51 +02:00
|
|
|
return True
|
2017-10-09 15:25:55 +02:00
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
def hasSideEffectsRecursive(expr):
|
2019-12-15 18:23:12 +01:00
|
|
|
if not expr or expr.str == ';':
|
2017-10-09 15:25:55 +02:00
|
|
|
return False
|
|
|
|
if expr.str == '=' and expr.astOperand1 and expr.astOperand1.str == '[':
|
|
|
|
prev = expr.astOperand1.previous
|
|
|
|
if prev and (prev.str == '{' or prev.str == '{'):
|
|
|
|
return hasSideEffectsRecursive(expr.astOperand2)
|
2018-05-22 13:27:27 +02:00
|
|
|
if expr.str == '=' and expr.astOperand1 and expr.astOperand1.str == '.':
|
|
|
|
e = expr.astOperand1
|
|
|
|
while e and e.str == '.' and e.astOperand2:
|
|
|
|
e = e.astOperand1
|
|
|
|
if e and e.str == '.':
|
|
|
|
return False
|
2019-12-15 18:23:12 +01:00
|
|
|
if expr.isAssignmentOp or expr.str in {'++', '--'}:
|
2017-10-09 15:25:55 +02:00
|
|
|
return True
|
|
|
|
# Todo: Check function calls
|
|
|
|
return hasSideEffectsRecursive(expr.astOperand1) or hasSideEffectsRecursive(expr.astOperand2)
|
|
|
|
|
|
|
|
|
|
|
|
def isBoolExpression(expr):
|
2018-04-18 16:20:54 +02:00
|
|
|
if not expr:
|
|
|
|
return False
|
2018-05-02 20:56:03 +02:00
|
|
|
if expr.valueType and (expr.valueType.type == 'bool' or expr.valueType.bits == 1):
|
2018-04-18 16:20:54 +02:00
|
|
|
return True
|
2018-05-29 06:50:32 +02:00
|
|
|
return expr.str in ['!', '==', '!=', '<', '<=', '>', '>=', '&&', '||', '0', '1', 'true', 'false']
|
2017-10-09 15:25:55 +02:00
|
|
|
|
|
|
|
|
2020-09-25 21:02:34 +02:00
|
|
|
def isEnumConstant(expr):
|
|
|
|
if not expr or not expr.values:
|
|
|
|
return False
|
|
|
|
values = expr.values
|
|
|
|
return len(values) == 1 and values[0].valueKind == 'known'
|
|
|
|
|
|
|
|
|
2017-10-09 15:25:55 +02:00
|
|
|
def isConstantExpression(expr):
|
|
|
|
if expr.isNumber:
|
|
|
|
return True
|
2020-09-25 21:02:34 +02:00
|
|
|
if expr.isName and not isEnumConstant(expr):
|
2017-10-09 15:25:55 +02:00
|
|
|
return False
|
|
|
|
if simpleMatch(expr.previous, 'sizeof ('):
|
|
|
|
return True
|
|
|
|
if expr.astOperand1 and not isConstantExpression(expr.astOperand1):
|
|
|
|
return False
|
|
|
|
if expr.astOperand2 and not isConstantExpression(expr.astOperand2):
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
|
|
|
def isUnsignedInt(expr):
|
2019-12-08 18:52:52 +01:00
|
|
|
return expr and expr.valueType and expr.valueType.type in ('short', 'int') and expr.valueType.sign == 'unsigned'
|
2017-10-09 15:25:55 +02:00
|
|
|
|
|
|
|
|
|
|
|
def getPrecedence(expr):
|
|
|
|
if not expr:
|
|
|
|
return 16
|
|
|
|
if not expr.astOperand1 or not expr.astOperand2:
|
|
|
|
return 16
|
2019-08-10 18:12:23 +02:00
|
|
|
if expr.str in ('*', '/', '%'):
|
2017-10-09 15:25:55 +02:00
|
|
|
return 12
|
2019-08-10 18:12:23 +02:00
|
|
|
if expr.str in ('+', '-'):
|
2017-10-09 15:25:55 +02:00
|
|
|
return 11
|
2019-08-10 18:12:23 +02:00
|
|
|
if expr.str in ('<<', '>>'):
|
2017-10-09 15:25:55 +02:00
|
|
|
return 10
|
2019-08-10 18:12:23 +02:00
|
|
|
if expr.str in ('<', '>', '<=', '>='):
|
2017-10-09 15:25:55 +02:00
|
|
|
return 9
|
2019-08-10 18:12:23 +02:00
|
|
|
if expr.str in ('==', '!='):
|
2017-10-09 15:25:55 +02:00
|
|
|
return 8
|
|
|
|
if expr.str == '&':
|
|
|
|
return 7
|
|
|
|
if expr.str == '^':
|
|
|
|
return 6
|
|
|
|
if expr.str == '|':
|
|
|
|
return 5
|
|
|
|
if expr.str == '&&':
|
|
|
|
return 4
|
|
|
|
if expr.str == '||':
|
|
|
|
return 3
|
2019-08-10 18:12:23 +02:00
|
|
|
if expr.str in ('?', ':'):
|
2017-10-09 15:25:55 +02:00
|
|
|
return 2
|
|
|
|
if expr.isAssignmentOp:
|
|
|
|
return 1
|
|
|
|
if expr.str == ',':
|
|
|
|
return 0
|
|
|
|
return -1
|
|
|
|
|
|
|
|
|
2018-04-24 09:28:24 +02:00
|
|
|
def findRawLink(token):
|
|
|
|
tok1 = None
|
|
|
|
tok2 = None
|
|
|
|
forward = False
|
|
|
|
|
|
|
|
if token.str in '{([':
|
|
|
|
tok1 = token.str
|
|
|
|
tok2 = '})]'['{(['.find(token.str)]
|
|
|
|
forward = True
|
|
|
|
elif token.str in '})]':
|
|
|
|
tok1 = token.str
|
|
|
|
tok2 = '{(['['})]'.find(token.str)]
|
|
|
|
forward = False
|
|
|
|
else:
|
|
|
|
return None
|
|
|
|
|
|
|
|
# try to find link
|
|
|
|
indent = 0
|
|
|
|
while token:
|
|
|
|
if token.str == tok1:
|
|
|
|
indent = indent + 1
|
|
|
|
elif token.str == tok2:
|
|
|
|
if indent <= 1:
|
|
|
|
return token
|
|
|
|
indent = indent - 1
|
2018-05-24 06:31:20 +02:00
|
|
|
if forward is True:
|
2018-04-24 09:28:24 +02:00
|
|
|
token = token.next
|
|
|
|
else:
|
|
|
|
token = token.previous
|
|
|
|
|
|
|
|
# raw link not found
|
|
|
|
return None
|
|
|
|
|
2018-05-24 06:31:20 +02:00
|
|
|
|
2018-04-24 09:28:24 +02:00
|
|
|
def numberOfParentheses(tok1, tok2):
|
2017-10-09 15:25:55 +02:00
|
|
|
while tok1 and tok1 != tok2:
|
|
|
|
if tok1.str == '(' or tok1.str == ')':
|
|
|
|
return False
|
|
|
|
tok1 = tok1.next
|
|
|
|
return tok1 == tok2
|
|
|
|
|
|
|
|
|
|
|
|
def findGotoLabel(gotoToken):
|
|
|
|
label = gotoToken.next.str
|
|
|
|
tok = gotoToken.next.next
|
|
|
|
while tok:
|
|
|
|
if tok.str == '}' and tok.scope.type == 'Function':
|
|
|
|
break
|
|
|
|
if tok.str == label and tok.next.str == ':':
|
|
|
|
return tok
|
|
|
|
tok = tok.next
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
def findInclude(directives, header):
|
|
|
|
for directive in directives:
|
|
|
|
if directive.str == '#include ' + header:
|
|
|
|
return directive
|
|
|
|
return None
|
|
|
|
|
2018-04-03 15:11:25 +02:00
|
|
|
|
|
|
|
# Get function arguments
|
|
|
|
def getArgumentsRecursive(tok, arguments):
|
|
|
|
if tok is None:
|
|
|
|
return
|
|
|
|
if tok.str == ',':
|
|
|
|
getArgumentsRecursive(tok.astOperand1, arguments)
|
|
|
|
getArgumentsRecursive(tok.astOperand2, arguments)
|
|
|
|
else:
|
2018-05-24 06:31:20 +02:00
|
|
|
arguments.append(tok)
|
|
|
|
|
2018-04-03 15:11:25 +02:00
|
|
|
|
|
|
|
def getArguments(ftok):
|
|
|
|
arguments = []
|
|
|
|
getArgumentsRecursive(ftok.astOperand2, arguments)
|
|
|
|
return arguments
|
|
|
|
|
|
|
|
|
2019-04-11 10:36:02 +02:00
|
|
|
def isalnum(c):
|
2019-09-28 20:16:10 +02:00
|
|
|
return c in string.digits or c in string.ascii_letters
|
|
|
|
|
2019-04-11 10:36:02 +02:00
|
|
|
|
2019-09-28 20:16:10 +02:00
|
|
|
def isHexEscapeSequence(symbols):
|
|
|
|
"""Checks that given symbols are valid hex escape sequence.
|
2019-04-11 10:36:02 +02:00
|
|
|
|
2019-09-29 21:23:19 +02:00
|
|
|
hexadecimal-escape-sequence:
|
|
|
|
\\x hexadecimal-digit
|
|
|
|
hexadecimal-escape-sequence hexadecimal-digit
|
2019-09-28 20:16:10 +02:00
|
|
|
|
|
|
|
Reference: n1570 6.4.4.4"""
|
|
|
|
if len(symbols) < 3 or symbols[:2] != '\\x':
|
|
|
|
return False
|
|
|
|
return all([s in string.hexdigits for s in symbols[2:]])
|
2018-03-31 12:17:55 +02:00
|
|
|
|
2018-05-24 06:31:20 +02:00
|
|
|
|
2019-09-28 20:16:10 +02:00
|
|
|
def isOctalEscapeSequence(symbols):
|
2019-11-06 17:49:37 +01:00
|
|
|
r"""Checks that given symbols are valid octal escape sequence:
|
2019-09-28 20:16:10 +02:00
|
|
|
|
|
|
|
octal-escape-sequence:
|
|
|
|
\ octal-digit
|
|
|
|
\ octal-digit octal-digit
|
|
|
|
\ octal-digit octal-digit octal-digit
|
|
|
|
|
|
|
|
Reference: n1570 6.4.4.4"""
|
|
|
|
if len(symbols) not in range(2, 5) or symbols[0] != '\\':
|
|
|
|
return False
|
|
|
|
return all([s in string.octdigits for s in symbols[1:]])
|
|
|
|
|
|
|
|
|
|
|
|
def isSimpleEscapeSequence(symbols):
|
|
|
|
"""Checks that given symbols are simple escape sequence.
|
|
|
|
Reference: n1570 6.4.4.4"""
|
|
|
|
if len(symbols) != 2 or symbols[0] != '\\':
|
|
|
|
return False
|
|
|
|
return symbols[1] in ("'", '"', '?', '\\', 'a', 'b', 'f', 'n', 'r', 't', 'v')
|
|
|
|
|
|
|
|
|
2019-12-21 07:40:15 +01:00
|
|
|
def isTernaryOperator(token):
|
|
|
|
if not token:
|
|
|
|
return False
|
|
|
|
if not token.astOperand2:
|
|
|
|
return False
|
|
|
|
return token.str == '?' and token.astOperand2.str == ':'
|
|
|
|
|
|
|
|
|
|
|
|
def getTernaryOperandsRecursive(token):
|
2020-02-17 18:28:58 +01:00
|
|
|
"""Returns list of ternary operands including nested ones."""
|
2019-12-21 07:40:15 +01:00
|
|
|
if not isTernaryOperator(token):
|
|
|
|
return []
|
|
|
|
result = []
|
|
|
|
result += getTernaryOperandsRecursive(token.astOperand2.astOperand1)
|
|
|
|
if token.astOperand2.astOperand1 and not isTernaryOperator(token.astOperand2.astOperand1):
|
|
|
|
result += [token.astOperand2.astOperand1]
|
|
|
|
result += getTernaryOperandsRecursive(token.astOperand2.astOperand2)
|
|
|
|
if token.astOperand2.astOperand2 and not isTernaryOperator(token.astOperand2.astOperand2):
|
|
|
|
result += [token.astOperand2.astOperand2]
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2019-09-28 20:16:10 +02:00
|
|
|
def hasNumericEscapeSequence(symbols):
|
2019-09-29 21:23:19 +02:00
|
|
|
"""Check that given string contains octal or hexadecimal escape sequences."""
|
2019-09-28 20:16:10 +02:00
|
|
|
if '\\' not in symbols:
|
|
|
|
return False
|
|
|
|
for c, cn in grouped(symbols, 2):
|
|
|
|
if c == '\\' and cn in ('x' + string.octdigits):
|
|
|
|
return True
|
|
|
|
return False
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-05-24 06:31:20 +02:00
|
|
|
|
2018-05-03 10:59:09 +02:00
|
|
|
def isNoReturnScope(tok):
|
|
|
|
if tok is None or tok.str != '}':
|
|
|
|
return False
|
|
|
|
if tok.previous is None or tok.previous.str != ';':
|
|
|
|
return False
|
|
|
|
if simpleMatch(tok.previous.previous, 'break ;'):
|
|
|
|
return True
|
|
|
|
prev = tok.previous.previous
|
|
|
|
while prev and prev.str not in ';{}':
|
|
|
|
if prev.str in '])':
|
|
|
|
prev = prev.link
|
|
|
|
prev = prev.previous
|
|
|
|
if prev and prev.next.str in ['throw', 'return']:
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
2019-04-11 10:36:02 +02:00
|
|
|
|
|
|
|
class Define:
|
|
|
|
def __init__(self, directive):
|
|
|
|
self.args = []
|
|
|
|
self.expansionList = ''
|
|
|
|
|
2020-02-11 20:59:11 +01:00
|
|
|
res = re.match(r'#define [A-Za-z0-9_]+\(([A-Za-z0-9_, ]+)\)[ ]+(.*)', directive.str)
|
2019-04-11 10:36:02 +02:00
|
|
|
if res is None:
|
|
|
|
return
|
|
|
|
|
2020-02-11 20:59:11 +01:00
|
|
|
self.args = res.group(1).strip().split(',')
|
2019-04-11 10:36:02 +02:00
|
|
|
self.expansionList = res.group(2)
|
|
|
|
|
2019-12-09 19:08:45 +01:00
|
|
|
def __repr__(self):
|
|
|
|
attrs = ["args", "expansionList"]
|
|
|
|
return "{}({})".format(
|
|
|
|
"Define",
|
|
|
|
", ".join(("{}={}".format(a, repr(getattr(self, a))) for a in attrs))
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2019-06-19 21:57:28 +02:00
|
|
|
def getAddonRules():
|
|
|
|
"""Returns dict of MISRA rules handled by this addon."""
|
|
|
|
addon_rules = []
|
|
|
|
compiled = re.compile(r'.*def[ ]+misra_([0-9]+)_([0-9]+)[(].*')
|
|
|
|
for line in open(__file__):
|
|
|
|
res = compiled.match(line)
|
|
|
|
if res is None:
|
|
|
|
continue
|
|
|
|
addon_rules.append(res.group(1) + '.' + res.group(2))
|
|
|
|
return addon_rules
|
|
|
|
|
|
|
|
|
|
|
|
def getCppcheckRules():
|
|
|
|
"""Returns list of rules handled by cppcheck."""
|
2019-12-08 19:06:34 +01:00
|
|
|
return ['1.3', '2.1', '2.2', '2.4', '2.6', '5.3', '8.3', '12.2',
|
|
|
|
'13.2', '13.6', '14.3', '17.5', '18.1', '18.2', '18.3',
|
|
|
|
'18.6', '20.6', '22.1', '22.2', '22.4', '22.6']
|
2019-06-19 21:57:28 +02:00
|
|
|
|
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def generateTable():
|
2019-06-19 21:57:28 +02:00
|
|
|
# print table
|
2018-09-29 09:05:13 +02:00
|
|
|
numberOfRules = {}
|
|
|
|
numberOfRules[1] = 3
|
|
|
|
numberOfRules[2] = 7
|
|
|
|
numberOfRules[3] = 2
|
|
|
|
numberOfRules[4] = 2
|
|
|
|
numberOfRules[5] = 9
|
|
|
|
numberOfRules[6] = 2
|
|
|
|
numberOfRules[7] = 4
|
|
|
|
numberOfRules[8] = 14
|
|
|
|
numberOfRules[9] = 5
|
|
|
|
numberOfRules[10] = 8
|
|
|
|
numberOfRules[11] = 9
|
|
|
|
numberOfRules[12] = 4
|
|
|
|
numberOfRules[13] = 6
|
|
|
|
numberOfRules[14] = 4
|
|
|
|
numberOfRules[15] = 7
|
|
|
|
numberOfRules[16] = 7
|
|
|
|
numberOfRules[17] = 8
|
|
|
|
numberOfRules[18] = 8
|
|
|
|
numberOfRules[19] = 2
|
|
|
|
numberOfRules[20] = 14
|
|
|
|
numberOfRules[21] = 12
|
|
|
|
numberOfRules[22] = 6
|
2018-05-24 06:31:20 +02:00
|
|
|
|
2019-06-19 21:57:28 +02:00
|
|
|
# Rules that can be checked with compilers:
|
2018-09-29 09:05:13 +02:00
|
|
|
# compiler = ['1.1', '1.2']
|
|
|
|
|
2019-06-19 21:57:28 +02:00
|
|
|
addon = getAddonRules()
|
|
|
|
cppcheck = getCppcheckRules()
|
2018-09-29 09:05:13 +02:00
|
|
|
for i1 in range(1, 23):
|
|
|
|
for i2 in range(1, numberOfRules[i1] + 1):
|
|
|
|
num = str(i1) + '.' + str(i2)
|
|
|
|
s = ''
|
|
|
|
if num in addon:
|
|
|
|
s = 'X (Addon)'
|
|
|
|
elif num in cppcheck:
|
|
|
|
s = 'X (Cppcheck)'
|
|
|
|
num = num + ' '
|
|
|
|
print(num[:8] + s)
|
|
|
|
|
|
|
|
|
2019-07-24 07:16:48 +02:00
|
|
|
def remove_file_prefix(file_path, prefix):
|
|
|
|
"""
|
|
|
|
Remove a file path prefix from a give path. leftover
|
|
|
|
directory separators at the beginning of a file
|
|
|
|
after the removal are also stripped.
|
|
|
|
|
|
|
|
Example:
|
|
|
|
'/remove/this/path/file.c'
|
|
|
|
with a prefix of:
|
|
|
|
'/remove/this/path'
|
|
|
|
becomes:
|
|
|
|
file.c
|
|
|
|
"""
|
|
|
|
result = None
|
|
|
|
if file_path.startswith(prefix):
|
|
|
|
result = file_path[len(prefix):]
|
|
|
|
# Remove any leftover directory separators at the
|
|
|
|
# beginning
|
|
|
|
result = result.lstrip('\\/')
|
|
|
|
else:
|
|
|
|
result = file_path
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2019-06-23 14:08:05 +02:00
|
|
|
class Rule(object):
|
|
|
|
"""Class to keep rule text and metadata"""
|
|
|
|
|
2019-11-12 15:32:05 +01:00
|
|
|
MISRA_SEVERITY_LEVELS = ['Required', 'Mandatory', 'Advisory']
|
2019-06-23 14:08:05 +02:00
|
|
|
|
2019-04-03 19:39:05 +02:00
|
|
|
def __init__(self, num1, num2):
|
|
|
|
self.num1 = num1
|
|
|
|
self.num2 = num2
|
|
|
|
self.text = ''
|
2019-06-23 14:08:05 +02:00
|
|
|
self.misra_severity = ''
|
2019-04-03 19:39:05 +02:00
|
|
|
|
|
|
|
@property
|
|
|
|
def num(self):
|
|
|
|
return self.num1 * 100 + self.num2
|
|
|
|
|
2019-06-23 14:08:05 +02:00
|
|
|
@property
|
|
|
|
def misra_severity(self):
|
|
|
|
return self._misra_severity
|
|
|
|
|
|
|
|
@misra_severity.setter
|
|
|
|
def misra_severity(self, val):
|
2019-11-12 15:32:05 +01:00
|
|
|
if val in self.MISRA_SEVERITY_LEVELS:
|
2019-06-23 14:08:05 +02:00
|
|
|
self._misra_severity = val
|
|
|
|
else:
|
|
|
|
self._misra_severity = ''
|
|
|
|
|
2019-04-03 19:39:05 +02:00
|
|
|
@property
|
|
|
|
def cppcheck_severity(self):
|
2019-06-23 14:08:05 +02:00
|
|
|
return 'style'
|
2019-04-03 19:39:05 +02:00
|
|
|
|
|
|
|
def __repr__(self):
|
2019-06-23 14:08:05 +02:00
|
|
|
return "%d.%d (%s)" % (self.num1, self.num2, self.misra_severity)
|
2018-10-18 09:17:57 +02:00
|
|
|
|
2019-06-17 21:17:29 +02:00
|
|
|
|
|
|
|
class MisraSettings(object):
|
|
|
|
"""Hold settings for misra.py script."""
|
|
|
|
|
|
|
|
__slots__ = ["verify", "quiet", "show_summary"]
|
|
|
|
|
|
|
|
def __init__(self, args):
|
|
|
|
"""
|
|
|
|
:param args: Arguments given by argparse.
|
|
|
|
"""
|
|
|
|
self.verify = False
|
|
|
|
self.quiet = False
|
|
|
|
self.show_summary = True
|
|
|
|
|
|
|
|
if args.verify:
|
|
|
|
self.verify = True
|
|
|
|
if args.cli:
|
|
|
|
self.quiet = True
|
|
|
|
self.show_summary = False
|
|
|
|
if args.quiet:
|
|
|
|
self.quiet = True
|
|
|
|
if args.no_summary:
|
|
|
|
self.show_summary = False
|
|
|
|
|
2019-12-09 19:08:45 +01:00
|
|
|
def __repr__(self):
|
|
|
|
attrs = ["verify", "quiet", "show_summary", "verify"]
|
|
|
|
return "{}({})".format(
|
|
|
|
"MisraSettings",
|
|
|
|
", ".join(("{}={}".format(a, repr(getattr(self, a))) for a in attrs))
|
|
|
|
)
|
|
|
|
|
2019-06-17 21:17:29 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
class MisraChecker:
|
|
|
|
|
2020-10-28 20:54:43 +01:00
|
|
|
def __init__(self, settings, stdversion="c89"):
|
2019-06-17 21:17:29 +02:00
|
|
|
"""
|
|
|
|
:param settings: misra.py script settings.
|
|
|
|
"""
|
|
|
|
|
|
|
|
self.settings = settings
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
# Test validation rules lists
|
2020-01-08 06:54:43 +01:00
|
|
|
self.verify_expected = list()
|
|
|
|
self.verify_actual = list()
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
# List of formatted violation messages
|
2020-01-08 06:54:43 +01:00
|
|
|
self.violations = dict()
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
# if --rule-texts is specified this dictionary
|
|
|
|
# is loaded with descriptions of each rule
|
|
|
|
# by rule number (in hundreds).
|
|
|
|
# ie rule 1.2 becomes 102
|
2020-01-08 06:54:43 +01:00
|
|
|
self.ruleTexts = dict()
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
# Dictionary of dictionaries for rules to suppress
|
|
|
|
# Dict1 is keyed by rule number in the hundreds format of
|
|
|
|
# Major * 100 + minor. ie Rule 5.2 = (5*100) + 2
|
2019-01-06 17:15:57 +01:00
|
|
|
# Dict 2 is keyed by filename. An entry of None means suppress globally.
|
2019-11-12 15:32:05 +01:00
|
|
|
# Each file name entry contains a list of tuples of (lineNumber, symbolName)
|
2018-10-18 09:17:57 +02:00
|
|
|
# or an item of None which indicates suppress rule for the entire file.
|
2018-09-29 09:05:13 +02:00
|
|
|
# The line and symbol name tuple may have None as either of its elements but
|
|
|
|
# should not be None for both.
|
2020-01-08 06:54:43 +01:00
|
|
|
self.suppressedRules = dict()
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
# List of suppression extracted from the dumpfile
|
|
|
|
self.dumpfileSuppressions = None
|
|
|
|
|
2019-07-24 07:16:48 +02:00
|
|
|
# Prefix to ignore when matching suppression files.
|
|
|
|
self.filePrefix = None
|
|
|
|
|
2019-11-04 06:58:33 +01:00
|
|
|
# Number of all violations suppressed per rule
|
2020-01-08 06:54:43 +01:00
|
|
|
self.suppressionStats = dict()
|
2019-04-03 19:38:03 +02:00
|
|
|
|
2019-06-14 12:06:57 +02:00
|
|
|
self.stdversion = stdversion
|
|
|
|
|
2020-06-08 15:58:17 +02:00
|
|
|
self.severity = None
|
|
|
|
|
2020-07-16 21:29:17 +02:00
|
|
|
self.existing_violations = set()
|
|
|
|
|
2019-12-09 19:08:45 +01:00
|
|
|
def __repr__(self):
|
|
|
|
attrs = ["settings", "verify_expected", "verify_actual", "violations",
|
|
|
|
"ruleTexts", "suppressedRules", "dumpfileSuppressions",
|
2020-06-08 15:58:17 +02:00
|
|
|
"filePrefix", "suppressionStats", "stdversion", "severity"]
|
2019-12-09 19:08:45 +01:00
|
|
|
return "{}({})".format(
|
|
|
|
"MisraChecker",
|
|
|
|
", ".join(("{}={}".format(a, repr(getattr(self, a))) for a in attrs))
|
|
|
|
)
|
|
|
|
|
2019-06-14 12:06:57 +02:00
|
|
|
def get_num_significant_naming_chars(self, cfg):
|
2020-01-31 23:38:42 +01:00
|
|
|
if cfg.standards and cfg.standards.c == "c89":
|
2019-06-14 12:06:57 +02:00
|
|
|
return 31
|
2020-01-31 23:38:42 +01:00
|
|
|
else:
|
|
|
|
return 63
|
2019-06-14 12:06:57 +02:00
|
|
|
|
2019-11-18 06:56:30 +01:00
|
|
|
def misra_2_7(self, data):
|
|
|
|
for func in data.functions:
|
|
|
|
# Skip function with no parameter
|
|
|
|
if (len(func.argument) == 0):
|
|
|
|
continue
|
|
|
|
# Setup list of function parameters
|
|
|
|
func_param_list = list()
|
|
|
|
for arg in func.argument:
|
|
|
|
func_param_list.append(func.argument[arg])
|
|
|
|
# Search for scope of current function
|
|
|
|
for scope in data.scopes:
|
|
|
|
if (scope.type == "Function") and (scope.function == func):
|
|
|
|
# Search function body: remove referenced function parameter from list
|
|
|
|
token = scope.bodyStart
|
|
|
|
while (token.next != None and token != scope.bodyEnd and len(func_param_list) > 0):
|
|
|
|
if (token.variable != None and token.variable in func_param_list):
|
|
|
|
func_param_list.remove(token.variable)
|
|
|
|
token = token.next
|
|
|
|
if (len(func_param_list) > 0):
|
|
|
|
# At least one parameter has not been referenced in function body
|
|
|
|
self.reportError(func.tokenDef, 2, 7)
|
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_3_1(self, rawTokens):
|
|
|
|
for token in rawTokens:
|
2019-10-03 21:15:00 +02:00
|
|
|
starts_with_double_slash = token.str.startswith('//')
|
|
|
|
if token.str.startswith('/*') or starts_with_double_slash:
|
2018-09-29 09:05:13 +02:00
|
|
|
s = token.str.lstrip('/')
|
2019-10-03 21:15:00 +02:00
|
|
|
if ((not starts_with_double_slash) and '//' in s) or '/*' in s:
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(token, 3, 1)
|
|
|
|
|
2019-10-16 11:31:42 +02:00
|
|
|
def misra_3_2(self, rawTokens):
|
|
|
|
for token in rawTokens:
|
|
|
|
if token.str.startswith('//'):
|
|
|
|
# Check for comment ends with trigraph which might be replaced
|
|
|
|
# by a backslash.
|
|
|
|
if token.str.endswith('??/'):
|
|
|
|
self.reportError(token, 3, 2)
|
|
|
|
# Check for comment which has been merged with subsequent line
|
|
|
|
# because it ends with backslash.
|
|
|
|
# The last backslash is no more part of the comment token thus
|
|
|
|
# check if next token exists and compare line numbers.
|
2019-11-12 15:32:05 +01:00
|
|
|
elif (token.next is not None) and (token.linenr == token.next.linenr):
|
2019-10-16 11:31:42 +02:00
|
|
|
self.reportError(token, 3, 2)
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
def misra_4_1(self, rawTokens):
|
|
|
|
for token in rawTokens:
|
2019-09-28 20:16:10 +02:00
|
|
|
if (token.str[0] != '"') and (token.str[0] != '\''):
|
2018-09-29 09:05:13 +02:00
|
|
|
continue
|
2019-09-28 20:16:10 +02:00
|
|
|
if len(token.str) < 3:
|
|
|
|
continue
|
|
|
|
|
|
|
|
delimiter = token.str[0]
|
|
|
|
symbols = token.str[1:-1]
|
|
|
|
|
|
|
|
# No closing delimiter. This will not compile.
|
|
|
|
if token.str[-1] != delimiter:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if len(symbols) < 2:
|
|
|
|
continue
|
|
|
|
|
|
|
|
if not hasNumericEscapeSequence(symbols):
|
|
|
|
continue
|
|
|
|
|
|
|
|
# String literals that contains one or more escape sequences. All of them should be
|
|
|
|
# terminated.
|
|
|
|
for sequence in ['\\' + t for t in symbols.split('\\')][1:]:
|
|
|
|
if (isHexEscapeSequence(sequence) or isOctalEscapeSequence(sequence) or
|
2019-11-12 15:32:05 +01:00
|
|
|
isSimpleEscapeSequence(sequence)):
|
2018-03-31 12:17:55 +02:00
|
|
|
continue
|
2018-09-29 09:05:13 +02:00
|
|
|
else:
|
|
|
|
self.reportError(token, 4, 1)
|
2018-03-31 12:17:55 +02:00
|
|
|
|
2019-10-09 08:26:05 +02:00
|
|
|
def misra_4_2(self, rawTokens):
|
|
|
|
for token in rawTokens:
|
|
|
|
if (token.str[0] != '"') or (token.str[-1] != '"'):
|
|
|
|
continue
|
|
|
|
# Check for trigraph sequence as defined by ISO/IEC 9899:1999
|
|
|
|
for sequence in ['??=', '??(', '??/', '??)', '??\'', '??<', '??!', '??>', '??-']:
|
|
|
|
if sequence in token.str[1:-1]:
|
|
|
|
# First trigraph sequence match, report error and leave loop.
|
|
|
|
self.reportError(token, 4, 2)
|
|
|
|
break
|
2019-09-28 20:16:10 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_5_1(self, data):
|
2019-07-09 15:53:23 +02:00
|
|
|
long_vars = {}
|
2020-04-28 07:18:54 +02:00
|
|
|
num_sign_chars = self.get_num_significant_naming_chars(data)
|
2018-09-29 09:05:13 +02:00
|
|
|
for var in data.variables:
|
2019-05-26 18:46:00 +02:00
|
|
|
if var.nameToken is None:
|
2018-09-29 09:05:13 +02:00
|
|
|
continue
|
2020-04-28 07:18:54 +02:00
|
|
|
if len(var.nameToken.str) <= num_sign_chars:
|
2019-05-26 18:46:00 +02:00
|
|
|
continue
|
|
|
|
if not hasExternalLinkage(var):
|
|
|
|
continue
|
2020-04-28 07:18:54 +02:00
|
|
|
long_vars.setdefault(var.nameToken.str[:num_sign_chars], []).append(var.nameToken)
|
2019-07-09 15:53:23 +02:00
|
|
|
for name_prefix in long_vars:
|
|
|
|
tokens = long_vars[name_prefix]
|
|
|
|
if len(tokens) < 2:
|
|
|
|
continue
|
2019-08-18 12:19:05 +02:00
|
|
|
for tok in sorted(tokens, key=lambda t: (t.linenr, t.column))[1:]:
|
2019-07-09 15:53:23 +02:00
|
|
|
self.reportError(tok, 5, 1)
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
def misra_5_2(self, data):
|
|
|
|
scopeVars = {}
|
2020-04-28 07:18:54 +02:00
|
|
|
num_sign_chars = self.get_num_significant_naming_chars(data)
|
2018-09-29 09:05:13 +02:00
|
|
|
for var in data.variables:
|
2019-05-26 18:46:00 +02:00
|
|
|
if var.nameToken is None:
|
|
|
|
continue
|
2020-04-28 07:18:54 +02:00
|
|
|
if len(var.nameToken.str) <= num_sign_chars:
|
2019-05-26 18:46:00 +02:00
|
|
|
continue
|
|
|
|
if var.nameToken.scope not in scopeVars:
|
|
|
|
scopeVars.setdefault(var.nameToken.scope, {})["varlist"] = []
|
|
|
|
scopeVars.setdefault(var.nameToken.scope, {})["scopelist"] = []
|
|
|
|
scopeVars[var.nameToken.scope]["varlist"].append(var)
|
2018-09-29 09:05:13 +02:00
|
|
|
for scope in data.scopes:
|
|
|
|
if scope.nestedIn and scope.className:
|
|
|
|
if scope.nestedIn not in scopeVars:
|
|
|
|
scopeVars.setdefault(scope.nestedIn, {})["varlist"] = []
|
|
|
|
scopeVars.setdefault(scope.nestedIn, {})["scopelist"] = []
|
|
|
|
scopeVars[scope.nestedIn]["scopelist"].append(scope)
|
|
|
|
for scope in scopeVars:
|
|
|
|
if len(scopeVars[scope]["varlist"]) <= 1:
|
|
|
|
continue
|
|
|
|
for i, variable1 in enumerate(scopeVars[scope]["varlist"]):
|
|
|
|
for variable2 in scopeVars[scope]["varlist"][i + 1:]:
|
|
|
|
if variable1.isArgument and variable2.isArgument:
|
|
|
|
continue
|
2019-05-26 18:46:00 +02:00
|
|
|
if hasExternalLinkage(variable1) or hasExternalLinkage(variable2):
|
2018-09-29 09:05:13 +02:00
|
|
|
continue
|
2020-04-28 07:18:54 +02:00
|
|
|
if (variable1.nameToken.str[:num_sign_chars] == variable2.nameToken.str[:num_sign_chars] and
|
2018-09-29 09:05:13 +02:00
|
|
|
variable1.Id != variable2.Id):
|
|
|
|
if int(variable1.nameToken.linenr) > int(variable2.nameToken.linenr):
|
|
|
|
self.reportError(variable1.nameToken, 5, 2)
|
|
|
|
else:
|
|
|
|
self.reportError(variable2.nameToken, 5, 2)
|
|
|
|
for innerscope in scopeVars[scope]["scopelist"]:
|
2020-04-28 07:18:54 +02:00
|
|
|
if variable1.nameToken.str[:num_sign_chars] == innerscope.className[:num_sign_chars]:
|
2018-09-29 09:05:13 +02:00
|
|
|
if int(variable1.nameToken.linenr) > int(innerscope.bodyStart.linenr):
|
|
|
|
self.reportError(variable1.nameToken, 5, 2)
|
|
|
|
else:
|
|
|
|
self.reportError(innerscope.bodyStart, 5, 2)
|
|
|
|
if len(scopeVars[scope]["scopelist"]) <= 1:
|
|
|
|
continue
|
|
|
|
for i, scopename1 in enumerate(scopeVars[scope]["scopelist"]):
|
|
|
|
for scopename2 in scopeVars[scope]["scopelist"][i + 1:]:
|
2020-04-28 07:18:54 +02:00
|
|
|
if scopename1.className[:num_sign_chars] == scopename2.className[:num_sign_chars]:
|
2018-09-29 09:05:13 +02:00
|
|
|
if int(scopename1.bodyStart.linenr) > int(scopename2.bodyStart.linenr):
|
|
|
|
self.reportError(scopename1.bodyStart, 5, 2)
|
|
|
|
else:
|
|
|
|
self.reportError(scopename2.bodyStart, 5, 2)
|
|
|
|
|
|
|
|
def misra_5_4(self, data):
|
2019-06-14 12:06:57 +02:00
|
|
|
num_sign_chars = self.get_num_significant_naming_chars(data)
|
2018-09-29 09:05:13 +02:00
|
|
|
macro = {}
|
|
|
|
compile_name = re.compile(r'#define ([a-zA-Z0-9_]+)')
|
|
|
|
compile_param = re.compile(r'#define ([a-zA-Z0-9_]+)[(]([a-zA-Z0-9_, ]+)[)]')
|
2020-01-08 06:54:43 +01:00
|
|
|
short_names = {}
|
|
|
|
macro_w_arg = []
|
2018-09-29 09:05:13 +02:00
|
|
|
for dir in data.directives:
|
|
|
|
res1 = compile_name.match(dir.str)
|
|
|
|
if res1:
|
|
|
|
if dir not in macro:
|
|
|
|
macro.setdefault(dir, {})["name"] = []
|
|
|
|
macro.setdefault(dir, {})["params"] = []
|
2020-01-08 06:54:43 +01:00
|
|
|
full_name = res1.group(1)
|
|
|
|
macro[dir]["name"] = full_name
|
|
|
|
short_name = full_name[:num_sign_chars]
|
|
|
|
if short_name in short_names:
|
|
|
|
_dir = short_names[short_name]
|
|
|
|
if full_name != macro[_dir]["name"]:
|
|
|
|
self.reportError(dir, 5, 4)
|
|
|
|
else:
|
|
|
|
short_names[short_name] = dir
|
2018-09-29 09:05:13 +02:00
|
|
|
res2 = compile_param.match(dir.str)
|
|
|
|
if res2:
|
|
|
|
res_gp2 = res2.group(2).split(",")
|
|
|
|
res_gp2 = [macroname.replace(" ", "") for macroname in res_gp2]
|
|
|
|
macro[dir]["params"].extend(res_gp2)
|
2020-01-08 06:54:43 +01:00
|
|
|
macro_w_arg.append(dir)
|
|
|
|
for mvar in macro_w_arg:
|
|
|
|
for i, macroparam1 in enumerate(macro[mvar]["params"]):
|
|
|
|
for j, macroparam2 in enumerate(macro[mvar]["params"]):
|
|
|
|
if j > i and macroparam1[:num_sign_chars] == macroparam2[:num_sign_chars]:
|
|
|
|
self.reportError(mvar, 5, 4)
|
|
|
|
param = macroparam1
|
|
|
|
if param[:num_sign_chars] in short_names:
|
|
|
|
m_var1 = short_names[param[:num_sign_chars]]
|
|
|
|
if m_var1.linenr > mvar.linenr:
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(m_var1, 5, 4)
|
2018-05-14 13:09:38 +02:00
|
|
|
else:
|
2020-01-08 06:54:43 +01:00
|
|
|
self.reportError(mvar, 5, 4)
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
def misra_5_5(self, data):
|
2019-06-14 12:06:57 +02:00
|
|
|
num_sign_chars = self.get_num_significant_naming_chars(data)
|
2020-01-08 06:54:43 +01:00
|
|
|
macroNames = {}
|
2018-09-29 09:05:13 +02:00
|
|
|
compiled = re.compile(r'#define ([A-Za-z0-9_]+)')
|
|
|
|
for dir in data.directives:
|
|
|
|
res = compiled.match(dir.str)
|
|
|
|
if res:
|
2020-01-08 06:54:43 +01:00
|
|
|
macroNames[res.group(1)[:num_sign_chars]] = dir
|
2018-09-29 09:05:13 +02:00
|
|
|
for var in data.variables:
|
2020-01-08 06:54:43 +01:00
|
|
|
if var.nameToken and var.nameToken.str[:num_sign_chars] in macroNames:
|
|
|
|
self.reportError(var.nameToken, 5, 5)
|
2018-09-29 09:05:13 +02:00
|
|
|
for scope in data.scopes:
|
2020-01-08 06:54:43 +01:00
|
|
|
if scope.className and scope.className[:num_sign_chars] in macroNames:
|
|
|
|
self.reportError(scope.bodyStart, 5, 5)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2020-10-27 16:00:19 +01:00
|
|
|
|
|
|
|
def misra_6_1(self, data):
|
|
|
|
# Bitfield type must be bool or explicity signed/unsigned int
|
2020-10-28 20:54:43 +01:00
|
|
|
for token in data.tokenlist:
|
|
|
|
if not token.valueType:
|
2020-10-27 16:00:19 +01:00
|
|
|
continue
|
|
|
|
if token.valueType.bits == 0:
|
|
|
|
continue
|
2020-10-28 20:54:43 +01:00
|
|
|
if not token.variable:
|
2020-10-27 16:00:19 +01:00
|
|
|
continue
|
2020-10-28 20:54:43 +01:00
|
|
|
if not token.scope:
|
|
|
|
continue
|
|
|
|
if token.scope.type not in 'Struct':
|
2020-10-27 16:00:19 +01:00
|
|
|
continue
|
|
|
|
|
2020-10-28 20:54:43 +01:00
|
|
|
if data.standards.c == 'c89':
|
|
|
|
if token.valueType.type != 'int':
|
|
|
|
self.reportError(token, 6, 1)
|
|
|
|
elif data.standards.c == 'c99':
|
|
|
|
if token.valueType.type == 'bool':
|
|
|
|
continue
|
|
|
|
|
2020-10-27 16:00:19 +01:00
|
|
|
isExplicitlySignedOrUnsigned = False
|
|
|
|
typeToken = token.variable.typeStartToken
|
|
|
|
while typeToken:
|
|
|
|
if typeToken.isUnsigned or typeToken.isSigned:
|
|
|
|
isExplicitlySignedOrUnsigned = True
|
|
|
|
break
|
|
|
|
|
|
|
|
if typeToken.Id == token.variable.typeEndToken.Id:
|
|
|
|
break
|
2020-10-28 20:54:43 +01:00
|
|
|
|
2020-10-27 16:00:19 +01:00
|
|
|
typeToken = typeToken.next
|
|
|
|
|
|
|
|
if not isExplicitlySignedOrUnsigned:
|
|
|
|
self.reportError(token, 6, 1)
|
|
|
|
|
|
|
|
|
|
|
|
def misra_6_2(self, data):
|
|
|
|
# Bitfields of size 1 can not be signed
|
2020-10-28 20:54:43 +01:00
|
|
|
for token in data.tokenlist:
|
|
|
|
if not token.valueType:
|
|
|
|
continue
|
|
|
|
if not token.scope:
|
|
|
|
continue
|
|
|
|
if token.scope.type not in 'Struct':
|
2020-10-27 16:00:19 +01:00
|
|
|
continue
|
|
|
|
if token.valueType.bits == 1 and token.valueType.sign == 'signed':
|
|
|
|
self.reportError(token, 6, 2)
|
|
|
|
|
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_7_1(self, rawTokens):
|
|
|
|
compiled = re.compile(r'^0[0-7]+$')
|
|
|
|
for tok in rawTokens:
|
|
|
|
if compiled.match(tok.str):
|
|
|
|
self.reportError(tok, 7, 1)
|
|
|
|
|
|
|
|
def misra_7_3(self, rawTokens):
|
|
|
|
compiled = re.compile(r'^[0-9.uU]+l')
|
|
|
|
for tok in rawTokens:
|
|
|
|
if compiled.match(tok.str):
|
|
|
|
self.reportError(tok, 7, 3)
|
|
|
|
|
|
|
|
def misra_8_11(self, data):
|
|
|
|
for var in data.variables:
|
|
|
|
if var.isExtern and simpleMatch(var.nameToken.next, '[ ]') and var.nameToken.scope.type == 'Global':
|
|
|
|
self.reportError(var.nameToken, 8, 11)
|
|
|
|
|
|
|
|
def misra_8_12(self, data):
|
2018-05-14 13:09:38 +02:00
|
|
|
for scope in data.scopes:
|
2018-09-29 09:05:13 +02:00
|
|
|
if scope.type != 'Enum':
|
2018-05-21 12:04:20 +02:00
|
|
|
continue
|
2019-06-01 13:10:20 +02:00
|
|
|
enum_values = []
|
|
|
|
implicit_enum_values = []
|
2018-09-29 09:05:13 +02:00
|
|
|
e_token = scope.bodyStart.next
|
|
|
|
while e_token != scope.bodyEnd:
|
2019-06-01 13:10:20 +02:00
|
|
|
if e_token.str == '(':
|
2019-08-04 12:24:28 +02:00
|
|
|
e_token = e_token.link
|
2019-06-01 13:10:20 +02:00
|
|
|
continue
|
2019-11-12 15:32:05 +01:00
|
|
|
if e_token.previous.str not in ',{':
|
2019-06-01 13:10:20 +02:00
|
|
|
e_token = e_token.next
|
|
|
|
continue
|
|
|
|
if e_token.isName and e_token.values and e_token.valueType and e_token.valueType.typeScope == scope:
|
2018-09-29 09:05:13 +02:00
|
|
|
token_values = [v.intvalue for v in e_token.values]
|
|
|
|
enum_values += token_values
|
|
|
|
if e_token.next.str != "=":
|
|
|
|
implicit_enum_values += token_values
|
|
|
|
e_token = e_token.next
|
|
|
|
for implicit_enum_value in implicit_enum_values:
|
|
|
|
if enum_values.count(implicit_enum_value) != 1:
|
|
|
|
self.reportError(scope.bodyStart, 8, 12)
|
|
|
|
|
|
|
|
def misra_8_14(self, rawTokens):
|
|
|
|
for token in rawTokens:
|
|
|
|
if token.str == 'restrict':
|
|
|
|
self.reportError(token, 8, 14)
|
|
|
|
|
|
|
|
def misra_9_5(self, rawTokens):
|
|
|
|
for token in rawTokens:
|
|
|
|
if simpleMatch(token, '[ ] = { ['):
|
|
|
|
self.reportError(token, 9, 5)
|
|
|
|
|
|
|
|
def misra_10_1(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if not token.isOp:
|
2018-05-21 12:04:20 +02:00
|
|
|
continue
|
2019-12-21 07:40:15 +01:00
|
|
|
|
|
|
|
for t1, t2 in itertools.product(
|
2020-01-08 06:54:43 +01:00
|
|
|
list(getTernaryOperandsRecursive(token.astOperand1) or [token.astOperand1]),
|
|
|
|
list(getTernaryOperandsRecursive(token.astOperand2) or [token.astOperand2]),
|
2019-12-21 07:40:15 +01:00
|
|
|
):
|
|
|
|
e1 = getEssentialTypeCategory(t1)
|
|
|
|
e2 = getEssentialTypeCategory(t2)
|
|
|
|
if not e1 or not e2:
|
|
|
|
continue
|
|
|
|
if token.str in ('<<', '>>'):
|
2020-02-27 11:28:48 +01:00
|
|
|
if not isUnsignedType(e1):
|
2019-12-21 07:40:15 +01:00
|
|
|
self.reportError(token, 10, 1)
|
2020-02-27 11:28:48 +01:00
|
|
|
elif not isUnsignedType(e2) and not token.astOperand2.isNumber:
|
2019-12-21 07:40:15 +01:00
|
|
|
self.reportError(token, 10, 1)
|
|
|
|
elif token.str in ('~', '&', '|', '^'):
|
|
|
|
e1_et = getEssentialType(token.astOperand1)
|
|
|
|
e2_et = getEssentialType(token.astOperand2)
|
|
|
|
if e1_et == 'char' and e2_et == 'char':
|
|
|
|
self.reportError(token, 10, 1)
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
def misra_10_4(self, data):
|
|
|
|
op = {'+', '-', '*', '/', '%', '&', '|', '^', '+=', '-=', ':'}
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str not in op and not token.isComparisonOp:
|
|
|
|
continue
|
|
|
|
if not token.astOperand1 or not token.astOperand2:
|
|
|
|
continue
|
|
|
|
if not token.astOperand1.valueType or not token.astOperand2.valueType:
|
|
|
|
continue
|
|
|
|
if ((token.astOperand1.str in op or token.astOperand1.isComparisonOp) and
|
|
|
|
(token.astOperand2.str in op or token.astOperand1.isComparisonOp)):
|
|
|
|
e1, e2 = getEssentialCategorylist(token.astOperand1.astOperand2, token.astOperand2.astOperand1)
|
|
|
|
elif token.astOperand1.str in op or token.astOperand1.isComparisonOp:
|
|
|
|
e1, e2 = getEssentialCategorylist(token.astOperand1.astOperand2, token.astOperand2)
|
|
|
|
elif token.astOperand2.str in op or token.astOperand2.isComparisonOp:
|
|
|
|
e1, e2 = getEssentialCategorylist(token.astOperand1, token.astOperand2.astOperand1)
|
2018-05-24 22:28:36 +02:00
|
|
|
else:
|
2018-09-29 09:05:13 +02:00
|
|
|
e1, e2 = getEssentialCategorylist(token.astOperand1, token.astOperand2)
|
|
|
|
if token.str == "+=" or token.str == "+":
|
|
|
|
if e1 == "char" and (e2 == "signed" or e2 == "unsigned"):
|
|
|
|
continue
|
|
|
|
if e2 == "char" and (e1 == "signed" or e1 == "unsigned"):
|
|
|
|
continue
|
|
|
|
if token.str == "-=" or token.str == "-":
|
|
|
|
if e1 == "char" and (e2 == "signed" or e2 == "unsigned"):
|
|
|
|
continue
|
|
|
|
if e1 and e2 and (e1.find('Anonymous') != -1 and (e2 == "signed" or e2 == "unsigned")):
|
2017-10-09 15:25:55 +02:00
|
|
|
continue
|
2018-09-29 09:05:13 +02:00
|
|
|
if e1 and e2 and (e2.find('Anonymous') != -1 and (e1 == "signed" or e1 == "unsigned")):
|
|
|
|
continue
|
|
|
|
if e1 and e2 and e1 != e2:
|
|
|
|
self.reportError(token, 10, 4)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_10_6(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str != '=' or not token.astOperand1 or not token.astOperand2:
|
|
|
|
continue
|
2019-08-10 18:12:23 +02:00
|
|
|
if (token.astOperand2.str not in ('+', '-', '*', '/', '%', '&', '|', '^', '>>', "<<", "?", ":", '~') and
|
2018-09-29 09:05:13 +02:00
|
|
|
not isCast(token.astOperand2)):
|
|
|
|
continue
|
|
|
|
vt1 = token.astOperand1.valueType
|
|
|
|
vt2 = token.astOperand2.valueType
|
|
|
|
if not vt1 or vt1.pointer > 0:
|
|
|
|
continue
|
|
|
|
if not vt2 or vt2.pointer > 0:
|
2017-10-09 15:25:55 +02:00
|
|
|
continue
|
2018-05-28 12:58:19 +02:00
|
|
|
try:
|
2018-09-29 09:05:13 +02:00
|
|
|
if isCast(token.astOperand2):
|
|
|
|
e = vt2.type
|
|
|
|
else:
|
|
|
|
e = getEssentialType(token.astOperand2)
|
2018-05-28 12:58:19 +02:00
|
|
|
if not e:
|
|
|
|
continue
|
2020-02-27 11:28:48 +01:00
|
|
|
if bitsOfEssentialType(vt1.type) > bitsOfEssentialType(e):
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(token, 10, 6)
|
2018-05-28 12:58:19 +02:00
|
|
|
except ValueError:
|
|
|
|
pass
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_10_8(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if not isCast(token):
|
|
|
|
continue
|
|
|
|
if not token.valueType or token.valueType.pointer > 0:
|
|
|
|
continue
|
|
|
|
if not token.astOperand1.valueType or token.astOperand1.valueType.pointer > 0:
|
|
|
|
continue
|
|
|
|
if not token.astOperand1.astOperand1:
|
|
|
|
continue
|
2019-08-10 18:12:23 +02:00
|
|
|
if token.astOperand1.str not in ('+', '-', '*', '/', '%', '&', '|', '^', '>>', "<<", "?", ":", '~'):
|
2018-09-29 09:05:13 +02:00
|
|
|
continue
|
|
|
|
if token.astOperand1.str != '~' and not token.astOperand1.astOperand2:
|
|
|
|
continue
|
|
|
|
if token.astOperand1.str == '~':
|
|
|
|
e2 = getEssentialTypeCategory(token.astOperand1.astOperand1)
|
|
|
|
else:
|
|
|
|
e2, e3 = getEssentialCategorylist(token.astOperand1.astOperand1, token.astOperand1.astOperand2)
|
|
|
|
if e2 != e3:
|
|
|
|
continue
|
|
|
|
e1 = getEssentialTypeCategory(token)
|
|
|
|
if e1 != e2:
|
|
|
|
self.reportError(token, 10, 8)
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
e = getEssentialType(token.astOperand1)
|
|
|
|
if not e:
|
|
|
|
continue
|
2020-02-27 11:28:48 +01:00
|
|
|
if bitsOfEssentialType(token.valueType.type) > bitsOfEssentialType(e):
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(token, 10, 8)
|
|
|
|
except ValueError:
|
|
|
|
pass
|
|
|
|
|
|
|
|
def misra_11_3(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if not isCast(token):
|
|
|
|
continue
|
|
|
|
vt1 = token.valueType
|
|
|
|
vt2 = token.astOperand1.valueType
|
|
|
|
if not vt1 or not vt2:
|
|
|
|
continue
|
|
|
|
if vt1.type == 'void' or vt2.type == 'void':
|
|
|
|
continue
|
|
|
|
if (vt1.pointer > 0 and vt1.type == 'record' and
|
2020-01-08 06:54:43 +01:00
|
|
|
vt2.pointer > 0 and vt2.type == 'record' and
|
2018-09-29 09:05:13 +02:00
|
|
|
vt1.typeScopeId != vt2.typeScopeId):
|
|
|
|
self.reportError(token, 11, 3)
|
|
|
|
elif (vt1.pointer == vt2.pointer and vt1.pointer > 0 and
|
2020-01-08 06:54:43 +01:00
|
|
|
vt1.type != vt2.type and vt1.type != 'char'):
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(token, 11, 3)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_11_4(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if not isCast(token):
|
|
|
|
continue
|
|
|
|
vt1 = token.valueType
|
|
|
|
vt2 = token.astOperand1.valueType
|
|
|
|
if not vt1 or not vt2:
|
|
|
|
continue
|
|
|
|
if vt2.pointer > 0 and vt1.pointer == 0 and (vt1.isIntegral() or vt1.isEnum()) and vt2.type != 'void':
|
|
|
|
self.reportError(token, 11, 4)
|
2020-01-08 06:54:43 +01:00
|
|
|
elif vt1.pointer > 0 and vt2.pointer == 0 and (vt2.isIntegral() or vt2.isEnum()) and vt1.type != 'void':
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(token, 11, 4)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_11_5(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if not isCast(token):
|
|
|
|
if token.astOperand1 and token.astOperand2 and token.str == "=" and token.next.str != "(":
|
|
|
|
vt1 = token.astOperand1.valueType
|
|
|
|
vt2 = token.astOperand2.valueType
|
|
|
|
if not vt1 or not vt2:
|
|
|
|
continue
|
|
|
|
if vt1.pointer > 0 and vt1.type != 'void' and vt2.pointer == vt1.pointer and vt2.type == 'void':
|
|
|
|
self.reportError(token, 11, 5)
|
|
|
|
continue
|
2020-01-08 06:54:43 +01:00
|
|
|
if token.astOperand1.astOperand1 and token.astOperand1.astOperand1.str in (
|
|
|
|
'malloc', 'calloc', 'realloc', 'free'):
|
2018-09-29 09:05:13 +02:00
|
|
|
continue
|
|
|
|
vt1 = token.valueType
|
|
|
|
vt2 = token.astOperand1.valueType
|
|
|
|
if not vt1 or not vt2:
|
|
|
|
continue
|
|
|
|
if vt1.pointer > 0 and vt1.type != 'void' and vt2.pointer == vt1.pointer and vt2.type == 'void':
|
|
|
|
self.reportError(token, 11, 5)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_11_6(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if not isCast(token):
|
|
|
|
continue
|
|
|
|
if token.astOperand1.astOperand1:
|
|
|
|
continue
|
|
|
|
vt1 = token.valueType
|
|
|
|
vt2 = token.astOperand1.valueType
|
|
|
|
if not vt1 or not vt2:
|
|
|
|
continue
|
|
|
|
if vt1.pointer == 1 and vt1.type == 'void' and vt2.pointer == 0 and token.astOperand1.str != "0":
|
|
|
|
self.reportError(token, 11, 6)
|
|
|
|
elif vt1.pointer == 0 and vt1.type != 'void' and vt2.pointer == 1 and vt2.type == 'void':
|
|
|
|
self.reportError(token, 11, 6)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_11_7(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if not isCast(token):
|
|
|
|
continue
|
|
|
|
vt1 = token.valueType
|
|
|
|
vt2 = token.astOperand1.valueType
|
|
|
|
if not vt1 or not vt2:
|
|
|
|
continue
|
|
|
|
if token.astOperand1.astOperand1:
|
|
|
|
continue
|
|
|
|
if (vt2.pointer > 0 and vt1.pointer == 0 and
|
|
|
|
not vt1.isIntegral() and not vt1.isEnum() and
|
2019-10-03 10:15:50 +02:00
|
|
|
vt1.type != 'void'):
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(token, 11, 7)
|
|
|
|
elif (vt1.pointer > 0 and vt2.pointer == 0 and
|
2020-01-08 06:54:43 +01:00
|
|
|
not vt2.isIntegral() and not vt2.isEnum() and
|
|
|
|
vt1.type != 'void'):
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(token, 11, 7)
|
|
|
|
|
|
|
|
def misra_11_8(self, data):
|
|
|
|
# TODO: reuse code in CERT-EXP05
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if isCast(token):
|
|
|
|
# C-style cast
|
|
|
|
if not token.valueType:
|
2018-04-03 15:11:25 +02:00
|
|
|
continue
|
2018-09-29 09:05:13 +02:00
|
|
|
if not token.astOperand1.valueType:
|
2018-04-03 15:11:25 +02:00
|
|
|
continue
|
2018-09-29 09:05:13 +02:00
|
|
|
if token.valueType.pointer == 0:
|
2018-04-03 15:11:25 +02:00
|
|
|
continue
|
2018-09-29 09:05:13 +02:00
|
|
|
if token.astOperand1.valueType.pointer == 0:
|
2018-04-03 15:11:25 +02:00
|
|
|
continue
|
2018-09-29 09:05:13 +02:00
|
|
|
const1 = token.valueType.constness
|
|
|
|
const2 = token.astOperand1.valueType.constness
|
2018-04-03 15:11:25 +02:00
|
|
|
if (const1 % 2) < (const2 % 2):
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(token, 11, 8)
|
|
|
|
|
|
|
|
elif token.str == '(' and token.astOperand1 and token.astOperand2 and token.astOperand1.function:
|
|
|
|
# Function call
|
|
|
|
function = token.astOperand1.function
|
|
|
|
arguments = getArguments(token)
|
|
|
|
for argnr, argvar in function.argument.items():
|
|
|
|
if argnr < 1 or argnr > len(arguments):
|
|
|
|
continue
|
|
|
|
if not argvar.isPointer:
|
|
|
|
continue
|
|
|
|
argtok = arguments[argnr - 1]
|
|
|
|
if not argtok.valueType:
|
|
|
|
continue
|
|
|
|
if argtok.valueType.pointer == 0:
|
|
|
|
continue
|
|
|
|
const1 = argvar.constness
|
|
|
|
const2 = arguments[argnr - 1].valueType.constness
|
|
|
|
if (const1 % 2) < (const2 % 2):
|
|
|
|
self.reportError(token, 11, 8)
|
|
|
|
|
|
|
|
def misra_11_9(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.astOperand1 and token.astOperand2 and token.str in ["=", "==", "!=", "?", ":"]:
|
|
|
|
vt1 = token.astOperand1.valueType
|
|
|
|
vt2 = token.astOperand2.valueType
|
|
|
|
if not vt1 or not vt2:
|
|
|
|
continue
|
|
|
|
if vt1.pointer > 0 and vt2.pointer == 0 and token.astOperand2.str == "NULL":
|
|
|
|
continue
|
|
|
|
if (token.astOperand2.values and vt1.pointer > 0 and
|
|
|
|
vt2.pointer == 0 and token.astOperand2.values):
|
|
|
|
for val in token.astOperand2.values:
|
|
|
|
if val.intvalue == 0:
|
|
|
|
self.reportError(token, 11, 9)
|
|
|
|
|
|
|
|
def misra_12_1_sizeof(self, rawTokens):
|
|
|
|
state = 0
|
|
|
|
compiled = re.compile(r'^[a-zA-Z_]')
|
|
|
|
for tok in rawTokens:
|
|
|
|
if tok.str.startswith('//') or tok.str.startswith('/*'):
|
|
|
|
continue
|
|
|
|
if tok.str == 'sizeof':
|
|
|
|
state = 1
|
|
|
|
elif state == 1:
|
|
|
|
if compiled.match(tok.str):
|
|
|
|
state = 2
|
|
|
|
else:
|
|
|
|
state = 0
|
|
|
|
elif state == 2:
|
2019-08-10 18:12:23 +02:00
|
|
|
if tok.str in ('+', '-', '*', '/', '%'):
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(tok, 12, 1)
|
|
|
|
else:
|
|
|
|
state = 0
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_12_1(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
p = getPrecedence(token)
|
|
|
|
if p < 2 or p > 12:
|
|
|
|
continue
|
|
|
|
p1 = getPrecedence(token.astOperand1)
|
|
|
|
if p < p1 <= 12 and numberOfParentheses(token.astOperand1, token):
|
|
|
|
self.reportError(token, 12, 1)
|
2018-05-23 15:48:07 +02:00
|
|
|
continue
|
2018-09-29 09:05:13 +02:00
|
|
|
p2 = getPrecedence(token.astOperand2)
|
|
|
|
if p < p2 <= 12 and numberOfParentheses(token, token.astOperand2):
|
|
|
|
self.reportError(token, 12, 1)
|
2018-05-23 15:48:07 +02:00
|
|
|
continue
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_12_2(self, data):
|
|
|
|
for token in data.tokenlist:
|
2019-08-10 18:12:23 +02:00
|
|
|
if not (token.str in ('<<', '>>')):
|
2018-09-29 09:05:13 +02:00
|
|
|
continue
|
|
|
|
if (not token.astOperand2) or (not token.astOperand2.values):
|
|
|
|
continue
|
|
|
|
maxval = 0
|
|
|
|
for val in token.astOperand2.values:
|
|
|
|
if val.intvalue and val.intvalue > maxval:
|
|
|
|
maxval = val.intvalue
|
|
|
|
if maxval == 0:
|
|
|
|
continue
|
2020-02-27 11:28:48 +01:00
|
|
|
sz = bitsOfEssentialType(getEssentialType(token.astOperand1))
|
2018-09-29 09:05:13 +02:00
|
|
|
if sz <= 0:
|
|
|
|
continue
|
|
|
|
if maxval >= sz:
|
|
|
|
self.reportError(token, 12, 2)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2020-09-06 11:33:37 +02:00
|
|
|
def misra_12_3(self, data):
|
2018-09-29 09:05:13 +02:00
|
|
|
for token in data.tokenlist:
|
2020-09-09 16:22:36 +02:00
|
|
|
if token.str == ';' and (token.isSplittedVarDeclComma is True):
|
2020-09-06 11:33:37 +02:00
|
|
|
self.reportError(token, 12, 3)
|
|
|
|
if token.str == ',' and token.astParent and token.astParent.str == ';':
|
|
|
|
self.reportError(token, 12, 3)
|
|
|
|
if token.str == ',' and token.astParent is None:
|
2020-09-08 09:29:57 +02:00
|
|
|
if token.scope.type in ('Class', 'Struct'):
|
|
|
|
# Is this initlist..
|
|
|
|
tok = token
|
|
|
|
while tok and tok.str == ',':
|
|
|
|
tok = tok.next
|
|
|
|
if tok and tok.next and tok.isName and tok.next.str == '(':
|
|
|
|
tok = tok.next.link.next
|
|
|
|
if tok.str == '{':
|
|
|
|
# This comma is used in initlist, do not warn
|
|
|
|
continue
|
2020-09-06 11:33:37 +02:00
|
|
|
prev = token.previous
|
|
|
|
while prev:
|
|
|
|
if prev.str == ';':
|
|
|
|
self.reportError(token, 12, 3)
|
2019-12-19 08:36:10 +01:00
|
|
|
break
|
2020-09-06 11:33:37 +02:00
|
|
|
elif prev.str in ')}]':
|
|
|
|
prev = prev.link
|
|
|
|
elif prev.str in '({[':
|
|
|
|
break
|
|
|
|
prev = prev.previous
|
2019-12-19 08:36:10 +01:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_12_4(self, data):
|
|
|
|
if typeBits['INT'] == 16:
|
|
|
|
max_uint = 0xffff
|
|
|
|
elif typeBits['INT'] == 32:
|
|
|
|
max_uint = 0xffffffff
|
|
|
|
else:
|
|
|
|
return
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
for token in data.tokenlist:
|
2019-12-01 15:03:33 +01:00
|
|
|
if not token.values:
|
|
|
|
continue
|
2018-09-29 09:05:13 +02:00
|
|
|
if (not isConstantExpression(token)) or (not isUnsignedInt(token)):
|
|
|
|
continue
|
|
|
|
for value in token.values:
|
2020-05-21 08:34:28 +02:00
|
|
|
if value.intvalue is None:
|
|
|
|
continue
|
2018-09-29 09:05:13 +02:00
|
|
|
if value.intvalue < 0 or value.intvalue > max_uint:
|
|
|
|
self.reportError(token, 12, 4)
|
|
|
|
break
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_13_1(self, data):
|
|
|
|
for token in data.tokenlist:
|
2020-02-11 18:00:41 +01:00
|
|
|
if simpleMatch(token, ") {") and token.next.astParent == token.link:
|
|
|
|
pass
|
|
|
|
elif not simpleMatch(token, '= {'):
|
2018-09-29 09:05:13 +02:00
|
|
|
continue
|
|
|
|
init = token.next
|
2019-12-01 15:03:33 +01:00
|
|
|
end = init.link
|
|
|
|
if not end:
|
|
|
|
continue # syntax is broken
|
|
|
|
|
|
|
|
tn = init
|
|
|
|
while tn and tn != end:
|
|
|
|
if tn.str == '[' and tn.link:
|
|
|
|
tn = tn.link
|
|
|
|
if tn and tn.next and tn.next.str == '=':
|
|
|
|
tn = tn.next.next
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
if tn.str == '.' and tn.next and tn.next.isName:
|
|
|
|
tn = tn.next
|
|
|
|
if tn.next and tn.next.str == '=':
|
|
|
|
tn = tn.next.next
|
|
|
|
continue
|
|
|
|
if tn.str in {'++', '--'} or tn.isAssignmentOp:
|
|
|
|
self.reportError(init, 13, 1)
|
|
|
|
tn = tn.next
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_13_3(self, data):
|
|
|
|
for token in data.tokenlist:
|
2019-08-10 18:12:23 +02:00
|
|
|
if token.str not in ('++', '--'):
|
2018-09-29 09:05:13 +02:00
|
|
|
continue
|
|
|
|
astTop = token
|
2019-08-10 18:12:23 +02:00
|
|
|
while astTop.astParent and astTop.astParent.str not in (',', ';'):
|
2018-09-29 09:05:13 +02:00
|
|
|
astTop = astTop.astParent
|
|
|
|
if countSideEffects(astTop) >= 2:
|
|
|
|
self.reportError(astTop, 13, 3)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_13_4(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str != '=':
|
|
|
|
continue
|
|
|
|
if not token.astParent:
|
|
|
|
continue
|
2019-08-10 18:12:23 +02:00
|
|
|
if token.astOperand1.str == '[' and token.astOperand1.previous.str in ('{', ','):
|
2018-09-29 09:05:13 +02:00
|
|
|
continue
|
2019-04-27 17:43:26 +02:00
|
|
|
if not (token.astParent.str in [',', ';', '{']):
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(token, 13, 4)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_13_5(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.isLogicalOp and hasSideEffectsRecursive(token.astOperand2):
|
|
|
|
self.reportError(token, 13, 5)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_13_6(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str == 'sizeof' and hasSideEffectsRecursive(token.next):
|
|
|
|
self.reportError(token, 13, 6)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_14_1(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str == 'for':
|
|
|
|
exprs = getForLoopExpressions(token)
|
|
|
|
if not exprs:
|
|
|
|
continue
|
|
|
|
for counter in findCounterTokens(exprs[1]):
|
|
|
|
if counter.valueType and counter.valueType.isFloat():
|
|
|
|
self.reportError(token, 14, 1)
|
|
|
|
elif token.str == 'while':
|
|
|
|
if isFloatCounterInWhileLoop(token):
|
|
|
|
self.reportError(token, 14, 1)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_14_2(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
expressions = getForLoopExpressions(token)
|
|
|
|
if not expressions:
|
|
|
|
continue
|
|
|
|
if expressions[0] and not expressions[0].isAssignmentOp:
|
|
|
|
self.reportError(token, 14, 2)
|
|
|
|
elif hasSideEffectsRecursive(expressions[1]):
|
|
|
|
self.reportError(token, 14, 2)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2019-12-15 18:23:12 +01:00
|
|
|
# Inspect modification of loop counter in loop body
|
|
|
|
counter_vars = getForLoopCounterVariables(token)
|
|
|
|
outer_scope = token.scope
|
|
|
|
body_scope = None
|
|
|
|
tn = token.next
|
|
|
|
while tn and tn.next != outer_scope.bodyEnd:
|
|
|
|
if tn.scope and tn.scope.nestedIn == outer_scope:
|
|
|
|
body_scope = tn.scope
|
|
|
|
break
|
|
|
|
tn = tn.next
|
|
|
|
if not body_scope:
|
|
|
|
continue
|
|
|
|
tn = body_scope.bodyStart
|
|
|
|
while tn and tn != body_scope.bodyEnd:
|
|
|
|
if tn.variable and tn.variable in counter_vars:
|
|
|
|
if tn.next:
|
|
|
|
# TODO: Check modifications in function calls
|
|
|
|
if hasSideEffectsRecursive(tn.next):
|
|
|
|
self.reportError(tn, 14, 2)
|
|
|
|
tn = tn.next
|
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_14_4(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str != '(':
|
|
|
|
continue
|
|
|
|
if not token.astOperand1 or not (token.astOperand1.str in ['if', 'while']):
|
2018-06-04 10:12:51 +02:00
|
|
|
continue
|
2018-09-29 09:05:13 +02:00
|
|
|
if not isBoolExpression(token.astOperand2):
|
|
|
|
self.reportError(token, 14, 4)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_15_1(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str == "goto":
|
|
|
|
self.reportError(token, 15, 1)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_15_2(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str != 'goto':
|
|
|
|
continue
|
|
|
|
if (not token.next) or (not token.next.isName):
|
|
|
|
continue
|
|
|
|
if not findGotoLabel(token):
|
|
|
|
self.reportError(token, 15, 2)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_15_3(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str != 'goto':
|
|
|
|
continue
|
|
|
|
if (not token.next) or (not token.next.isName):
|
|
|
|
continue
|
|
|
|
tok = findGotoLabel(token)
|
|
|
|
if not tok:
|
|
|
|
continue
|
|
|
|
scope = token.scope
|
|
|
|
while scope and scope != tok.scope:
|
|
|
|
scope = scope.nestedIn
|
|
|
|
if not scope:
|
|
|
|
self.reportError(token, 15, 3)
|
2020-02-16 00:03:18 +01:00
|
|
|
# Jump crosses from one switch-clause to another is non-compliant
|
|
|
|
elif scope.type == 'Switch':
|
|
|
|
# Search for start of a current case block
|
|
|
|
tcase_start = token
|
|
|
|
while tcase_start and tcase_start.str not in ('case', 'default'):
|
|
|
|
tcase_start = tcase_start.previous
|
|
|
|
# Make sure that goto label doesn't occurs in the other
|
|
|
|
# switch-clauses
|
|
|
|
if tcase_start:
|
|
|
|
t = scope.bodyStart
|
|
|
|
in_this_case = False
|
|
|
|
while t and t != scope.bodyEnd:
|
|
|
|
if t == tcase_start:
|
|
|
|
in_this_case = True
|
|
|
|
if in_this_case and t.str not in ('case', 'default'):
|
|
|
|
in_this_case = False
|
|
|
|
if t == tok and not in_this_case:
|
|
|
|
self.reportError(token, 15, 3)
|
|
|
|
break
|
|
|
|
t = t.next
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_15_5(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str == 'return' and token.scope.type != 'Function':
|
|
|
|
self.reportError(token, 15, 5)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_15_6(self, rawTokens):
|
|
|
|
state = 0
|
|
|
|
indent = 0
|
|
|
|
tok1 = None
|
|
|
|
for token in rawTokens:
|
|
|
|
if token.str in ['if', 'for', 'while']:
|
|
|
|
if simpleMatch(token.previous, '# if'):
|
|
|
|
continue
|
|
|
|
if simpleMatch(token.previous, "} while"):
|
|
|
|
# is there a 'do { .. } while'?
|
|
|
|
start = rawlink(token.previous)
|
|
|
|
if start and simpleMatch(start.previous, 'do {'):
|
|
|
|
continue
|
|
|
|
if state == 2:
|
|
|
|
self.reportError(tok1, 15, 6)
|
|
|
|
state = 1
|
|
|
|
indent = 0
|
|
|
|
tok1 = token
|
|
|
|
elif token.str == 'else':
|
|
|
|
if simpleMatch(token.previous, '# else'):
|
|
|
|
continue
|
|
|
|
if simpleMatch(token, 'else if'):
|
|
|
|
continue
|
|
|
|
if state == 2:
|
|
|
|
self.reportError(tok1, 15, 6)
|
|
|
|
state = 2
|
|
|
|
indent = 0
|
|
|
|
tok1 = token
|
|
|
|
elif state == 1:
|
|
|
|
if indent == 0 and token.str != '(':
|
|
|
|
state = 0
|
|
|
|
continue
|
|
|
|
if token.str == '(':
|
|
|
|
indent = indent + 1
|
|
|
|
elif token.str == ')':
|
|
|
|
if indent == 0:
|
|
|
|
state = 0
|
|
|
|
elif indent == 1:
|
|
|
|
state = 2
|
|
|
|
indent = indent - 1
|
|
|
|
elif state == 2:
|
|
|
|
if token.str.startswith('//') or token.str.startswith('/*'):
|
|
|
|
continue
|
|
|
|
state = 0
|
2020-05-07 16:54:27 +02:00
|
|
|
if token.str not in ('{', '#'):
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(tok1, 15, 6)
|
|
|
|
|
|
|
|
def misra_15_7(self, data):
|
2018-10-01 20:16:48 +02:00
|
|
|
for scope in data.scopes:
|
|
|
|
if scope.type != 'Else':
|
2018-09-29 09:05:13 +02:00
|
|
|
continue
|
2018-10-01 20:16:48 +02:00
|
|
|
if not simpleMatch(scope.bodyStart, '{ if ('):
|
2018-09-29 09:05:13 +02:00
|
|
|
continue
|
2019-08-18 12:19:05 +02:00
|
|
|
if scope.bodyStart.column > 0:
|
2019-08-11 17:54:02 +02:00
|
|
|
continue
|
2018-10-01 20:16:48 +02:00
|
|
|
tok = scope.bodyStart.next.next.link
|
|
|
|
if not simpleMatch(tok, ') {'):
|
2018-09-29 09:05:13 +02:00
|
|
|
continue
|
2018-10-01 20:16:48 +02:00
|
|
|
tok = tok.next.link
|
|
|
|
if not simpleMatch(tok, '} else'):
|
|
|
|
self.reportError(tok, 15, 7)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
# TODO add 16.1 rule
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_16_2(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str == 'case' and token.scope.type != 'Switch':
|
|
|
|
self.reportError(token, 16, 2)
|
|
|
|
|
|
|
|
def misra_16_3(self, rawTokens):
|
2020-01-08 06:54:43 +01:00
|
|
|
STATE_NONE = 0 # default state, not in switch case/default block
|
2018-09-29 09:05:13 +02:00
|
|
|
STATE_BREAK = 1 # break/comment is seen but not its ';'
|
2020-01-08 06:54:43 +01:00
|
|
|
STATE_OK = 2 # a case/default is allowed (we have seen 'break;'/'comment'/'{'/attribute)
|
|
|
|
STATE_SWITCH = 3 # walking through switch statement scope
|
2019-11-08 08:06:10 +01:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
state = STATE_NONE
|
2019-11-08 08:06:10 +01:00
|
|
|
end_swtich_token = None # end '}' for the switch scope
|
2018-09-29 09:05:13 +02:00
|
|
|
for token in rawTokens:
|
2019-11-08 08:06:10 +01:00
|
|
|
# Find switch scope borders
|
|
|
|
if token.str == 'switch':
|
|
|
|
state = STATE_SWITCH
|
|
|
|
if state == STATE_SWITCH:
|
|
|
|
if token.str == '{':
|
|
|
|
end_swtich_token = findRawLink(token)
|
|
|
|
else:
|
|
|
|
continue
|
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
if token.str == 'break' or token.str == 'return' or token.str == 'throw':
|
|
|
|
state = STATE_BREAK
|
|
|
|
elif token.str == ';':
|
|
|
|
if state == STATE_BREAK:
|
|
|
|
state = STATE_OK
|
2019-11-08 08:06:10 +01:00
|
|
|
elif token.next and token.next == end_swtich_token:
|
|
|
|
self.reportError(token.next, 16, 3)
|
2018-09-29 09:05:13 +02:00
|
|
|
else:
|
|
|
|
state = STATE_NONE
|
|
|
|
elif token.str.startswith('/*') or token.str.startswith('//'):
|
|
|
|
if 'fallthrough' in token.str.lower():
|
|
|
|
state = STATE_OK
|
|
|
|
elif simpleMatch(token, '[ [ fallthrough ] ] ;'):
|
|
|
|
state = STATE_BREAK
|
|
|
|
elif token.str == '{':
|
|
|
|
state = STATE_OK
|
|
|
|
elif token.str == '}' and state == STATE_OK:
|
|
|
|
# is this {} an unconditional block of code?
|
2019-08-11 19:08:57 +02:00
|
|
|
prev = findRawLink(token)
|
|
|
|
if prev:
|
|
|
|
prev = prev.previous
|
|
|
|
while prev and prev.str[:2] in ('//', '/*'):
|
|
|
|
prev = prev.previous
|
|
|
|
if (prev is None) or (prev.str not in ':;{}'):
|
2018-09-29 09:05:13 +02:00
|
|
|
state = STATE_NONE
|
|
|
|
elif token.str == 'case' or token.str == 'default':
|
|
|
|
if state != STATE_OK:
|
|
|
|
self.reportError(token, 16, 3)
|
|
|
|
state = STATE_OK
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_16_4(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str != 'switch':
|
2017-10-09 15:25:55 +02:00
|
|
|
continue
|
2018-09-29 09:05:13 +02:00
|
|
|
if not simpleMatch(token, 'switch ('):
|
2017-10-09 15:25:55 +02:00
|
|
|
continue
|
2018-09-29 09:05:13 +02:00
|
|
|
if not simpleMatch(token.next.link, ') {'):
|
|
|
|
continue
|
|
|
|
startTok = token.next.link.next
|
|
|
|
tok = startTok.next
|
|
|
|
while tok and tok.str != '}':
|
|
|
|
if tok.str == '{':
|
|
|
|
tok = tok.link
|
|
|
|
elif tok.str == 'default':
|
|
|
|
break
|
|
|
|
tok = tok.next
|
|
|
|
if tok and tok.str != 'default':
|
|
|
|
self.reportError(token, 16, 4)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_16_5(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str != 'default':
|
|
|
|
continue
|
|
|
|
if token.previous and token.previous.str == '{':
|
|
|
|
continue
|
|
|
|
tok2 = token
|
|
|
|
while tok2:
|
2019-08-10 18:12:23 +02:00
|
|
|
if tok2.str in ('}', 'case'):
|
2018-09-29 09:05:13 +02:00
|
|
|
break
|
|
|
|
if tok2.str == '{':
|
|
|
|
tok2 = tok2.link
|
|
|
|
tok2 = tok2.next
|
|
|
|
if tok2 and tok2.str == 'case':
|
|
|
|
self.reportError(token, 16, 5)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_16_6(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if not (simpleMatch(token, 'switch (') and simpleMatch(token.next.link, ') {')):
|
|
|
|
continue
|
|
|
|
tok = token.next.link.next.next
|
|
|
|
count = 0
|
|
|
|
while tok:
|
|
|
|
if tok.str in ['break', 'return', 'throw']:
|
|
|
|
count = count + 1
|
|
|
|
elif tok.str == '{':
|
|
|
|
tok = tok.link
|
|
|
|
if isNoReturnScope(tok):
|
|
|
|
count = count + 1
|
|
|
|
elif tok.str == '}':
|
|
|
|
break
|
|
|
|
tok = tok.next
|
|
|
|
if count < 2:
|
|
|
|
self.reportError(token, 16, 6)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_16_7(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if simpleMatch(token, 'switch (') and isBoolExpression(token.next.astOperand2):
|
|
|
|
self.reportError(token, 16, 7)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_17_1(self, data):
|
|
|
|
for token in data.tokenlist:
|
2020-01-08 06:54:43 +01:00
|
|
|
if isFunctionCall(token) and token.astOperand1.str in (
|
|
|
|
'va_list', 'va_arg', 'va_start', 'va_end', 'va_copy'):
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(token, 17, 1)
|
|
|
|
elif token.str == 'va_list':
|
|
|
|
self.reportError(token, 17, 1)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2019-07-13 15:09:50 +02:00
|
|
|
def misra_17_2(self, data):
|
|
|
|
# find recursions..
|
2019-11-13 14:45:34 +01:00
|
|
|
def find_recursive_call(search_for_function, direct_call, calls_map, visited=None):
|
|
|
|
if visited is None:
|
|
|
|
visited = set()
|
2019-07-13 15:09:50 +02:00
|
|
|
if direct_call == search_for_function:
|
|
|
|
return True
|
2019-07-13 15:17:19 +02:00
|
|
|
for indirect_call in calls_map.get(direct_call, []):
|
2019-07-13 15:09:50 +02:00
|
|
|
if indirect_call == search_for_function:
|
|
|
|
return True
|
|
|
|
if indirect_call in visited:
|
|
|
|
# This has already been handled
|
|
|
|
continue
|
|
|
|
visited.add(indirect_call)
|
|
|
|
if find_recursive_call(search_for_function, indirect_call, calls_map, visited):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
# List functions called in each function
|
|
|
|
function_calls = {}
|
|
|
|
for scope in data.scopes:
|
|
|
|
if scope.type != 'Function':
|
|
|
|
continue
|
|
|
|
calls = []
|
|
|
|
tok = scope.bodyStart
|
|
|
|
while tok != scope.bodyEnd:
|
|
|
|
tok = tok.next
|
2020-02-09 10:46:13 +01:00
|
|
|
if not isFunctionCall(tok, data.standards.c):
|
2019-07-13 15:09:50 +02:00
|
|
|
continue
|
|
|
|
f = tok.astOperand1.function
|
|
|
|
if f is not None and f not in calls:
|
|
|
|
calls.append(f)
|
|
|
|
function_calls[scope.function] = calls
|
|
|
|
|
|
|
|
# Report warnings for all recursions..
|
|
|
|
for func in function_calls:
|
|
|
|
for call in function_calls[func]:
|
|
|
|
if not find_recursive_call(func, call, function_calls):
|
|
|
|
# Function call is not recursive
|
|
|
|
continue
|
|
|
|
# Warn about all functions calls..
|
|
|
|
for scope in data.scopes:
|
|
|
|
if scope.type != 'Function' or scope.function != func:
|
|
|
|
continue
|
|
|
|
tok = scope.bodyStart
|
|
|
|
while tok != scope.bodyEnd:
|
|
|
|
if tok.function and tok.function == call:
|
|
|
|
self.reportError(tok, 17, 2)
|
|
|
|
tok = tok.next
|
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_17_6(self, rawTokens):
|
|
|
|
for token in rawTokens:
|
|
|
|
if simpleMatch(token, '[ static'):
|
|
|
|
self.reportError(token, 17, 6)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2019-05-22 21:39:33 +02:00
|
|
|
def misra_17_7(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if not token.scope.isExecutable:
|
|
|
|
continue
|
|
|
|
if token.str != '(' or token.astParent:
|
|
|
|
continue
|
|
|
|
if not token.previous.isName or token.previous.varId:
|
|
|
|
continue
|
|
|
|
if token.valueType is None:
|
|
|
|
continue
|
|
|
|
if token.valueType.type == 'void' and token.valueType.pointer == 0:
|
|
|
|
continue
|
|
|
|
self.reportError(token, 17, 7)
|
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_17_8(self, data):
|
|
|
|
for token in data.tokenlist:
|
2019-08-10 18:12:23 +02:00
|
|
|
if not (token.isAssignmentOp or (token.str in ('++', '--'))):
|
2018-09-29 09:05:13 +02:00
|
|
|
continue
|
|
|
|
if not token.astOperand1:
|
|
|
|
continue
|
|
|
|
var = token.astOperand1.variable
|
|
|
|
if var and var.isArgument:
|
|
|
|
self.reportError(token, 17, 8)
|
|
|
|
|
2019-07-09 15:57:12 +02:00
|
|
|
def misra_18_4(self, data):
|
|
|
|
for token in data.tokenlist:
|
2019-11-12 15:32:05 +01:00
|
|
|
if token.str not in ('+', '-', '+=', '-='):
|
2019-07-09 15:57:12 +02:00
|
|
|
continue
|
|
|
|
if token.astOperand1 is None or token.astOperand2 is None:
|
|
|
|
continue
|
|
|
|
vt1 = token.astOperand1.valueType
|
|
|
|
vt2 = token.astOperand2.valueType
|
|
|
|
if vt1 and vt1.pointer > 0:
|
|
|
|
self.reportError(token, 18, 4)
|
|
|
|
elif vt2 and vt2.pointer > 0:
|
|
|
|
self.reportError(token, 18, 4)
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
def misra_18_5(self, data):
|
|
|
|
for var in data.variables:
|
|
|
|
if not var.isPointer:
|
|
|
|
continue
|
|
|
|
typetok = var.nameToken
|
|
|
|
count = 0
|
|
|
|
while typetok:
|
|
|
|
if typetok.str == '*':
|
2017-10-09 15:25:55 +02:00
|
|
|
count = count + 1
|
2018-09-29 09:05:13 +02:00
|
|
|
elif not typetok.isName:
|
|
|
|
break
|
|
|
|
typetok = typetok.previous
|
|
|
|
if count > 2:
|
|
|
|
self.reportError(var.nameToken, 18, 5)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2019-07-01 07:00:43 +02:00
|
|
|
def misra_18_7(self, data):
|
|
|
|
for scope in data.scopes:
|
|
|
|
if scope.type != 'Struct':
|
|
|
|
continue
|
|
|
|
|
|
|
|
token = scope.bodyStart.next
|
|
|
|
while token != scope.bodyEnd and token is not None:
|
|
|
|
# Handle nested structures to not duplicate an error.
|
|
|
|
if token.str == '{':
|
|
|
|
token = token.link
|
|
|
|
|
|
|
|
if cppcheckdata.simpleMatch(token, "[ ]"):
|
|
|
|
self.reportError(token, 18, 7)
|
|
|
|
break
|
|
|
|
token = token.next
|
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_18_8(self, data):
|
|
|
|
for var in data.variables:
|
|
|
|
if not var.isArray or not var.isLocal:
|
|
|
|
continue
|
|
|
|
# TODO Array dimensions are not available in dump, must look in tokens
|
|
|
|
typetok = var.nameToken.next
|
|
|
|
if not typetok or typetok.str != '[':
|
|
|
|
continue
|
2019-10-26 17:29:38 +02:00
|
|
|
# Unknown define or syntax error
|
|
|
|
if not typetok.astOperand2:
|
|
|
|
continue
|
2018-09-29 09:05:13 +02:00
|
|
|
if not isConstantExpression(typetok.astOperand2):
|
|
|
|
self.reportError(var.nameToken, 18, 8)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_19_2(self, data):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str == 'union':
|
|
|
|
self.reportError(token, 19, 2)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_20_1(self, data):
|
2020-01-08 06:54:43 +01:00
|
|
|
token_in_file = {}
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.file not in token_in_file:
|
|
|
|
token_in_file[token.file] = int(token.linenr)
|
|
|
|
else:
|
|
|
|
token_in_file[token.file] = min(token_in_file[token.file], int(token.linenr))
|
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
for directive in data.directives:
|
|
|
|
if not directive.str.startswith('#include'):
|
|
|
|
continue
|
2020-01-08 06:54:43 +01:00
|
|
|
if directive.file not in token_in_file:
|
|
|
|
continue
|
|
|
|
if token_in_file[directive.file] < int(directive.linenr):
|
|
|
|
self.reportError(directive, 20, 1)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_20_2(self, data):
|
|
|
|
for directive in data.directives:
|
|
|
|
if not directive.str.startswith('#include '):
|
|
|
|
continue
|
2020-02-12 17:11:57 +01:00
|
|
|
for pattern in ('\\', '//', '/*', ',', "'"):
|
2018-09-29 09:05:13 +02:00
|
|
|
if pattern in directive.str:
|
|
|
|
self.reportError(directive, 20, 2)
|
|
|
|
break
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2020-02-10 08:56:26 +01:00
|
|
|
def misra_20_3(self, data):
|
|
|
|
for directive in data.directives:
|
|
|
|
if not directive.str.startswith('#include '):
|
2018-09-29 09:05:13 +02:00
|
|
|
continue
|
2020-02-10 08:56:26 +01:00
|
|
|
|
|
|
|
words = directive.str.split(' ')
|
|
|
|
|
|
|
|
# If include directive contains more than two words, here would be
|
|
|
|
# violation anyway.
|
|
|
|
if len(words) > 2:
|
|
|
|
self.reportError(directive, 20, 3)
|
|
|
|
|
|
|
|
# Handle include directives with not quoted argument
|
|
|
|
elif len(words) > 1:
|
|
|
|
filename = words[1]
|
|
|
|
if not ((filename.startswith('"') and
|
|
|
|
filename.endswith('"')) or
|
|
|
|
(filename.startswith('<') and
|
|
|
|
filename.endswith('>'))):
|
|
|
|
# We are handle only directly included files in the
|
|
|
|
# following format: #include file.h
|
|
|
|
# Cases with macro expansion provided by MISRA document are
|
|
|
|
# skipped because we don't always have access to directive
|
|
|
|
# definition.
|
|
|
|
if '.' in filename:
|
|
|
|
self.reportError(directive, 20, 3)
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
def misra_20_4(self, data):
|
|
|
|
for directive in data.directives:
|
|
|
|
res = re.search(r'#define ([a-z][a-z0-9_]+)', directive.str)
|
2020-02-09 10:46:13 +01:00
|
|
|
if res and isKeyword(res.group(1), data.standards.c):
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(directive, 20, 4)
|
|
|
|
|
|
|
|
def misra_20_5(self, data):
|
|
|
|
for directive in data.directives:
|
|
|
|
if directive.str.startswith('#undef '):
|
|
|
|
self.reportError(directive, 20, 5)
|
|
|
|
|
2019-04-11 10:36:02 +02:00
|
|
|
def misra_20_7(self, data):
|
2020-07-24 06:15:40 +02:00
|
|
|
def find_string_concat(exp, arg, directive_args):
|
|
|
|
# Handle concatenation of string literals, e.g.:
|
|
|
|
# #define MACRO(A, B) (A " " B)
|
|
|
|
# Addon should not report errors for both macro arguments.
|
|
|
|
arg_pos = exp.find(arg, 0)
|
|
|
|
need_check = False
|
|
|
|
skip_next = False
|
|
|
|
state_in_string = False
|
|
|
|
pos_search = arg_pos + 1
|
|
|
|
directive_args = [a.strip() for a in directive_args if a != arg]
|
|
|
|
arg = arg.strip()
|
|
|
|
while pos_search < len(exp):
|
|
|
|
if exp[pos_search] == '"':
|
|
|
|
if state_in_string:
|
|
|
|
state_in_string = False
|
|
|
|
else:
|
|
|
|
state_in_string = True
|
|
|
|
pos_search += 1
|
2020-08-08 12:03:19 +02:00
|
|
|
elif exp[pos_search].isalnum():
|
|
|
|
word = ""
|
|
|
|
while pos_search < len(exp) and exp[pos_search].isalnum():
|
|
|
|
word += exp[pos_search]
|
|
|
|
pos_search += 1
|
|
|
|
if word == arg:
|
|
|
|
pos_search += 1
|
|
|
|
elif word in directive_args:
|
|
|
|
skip_next = True
|
|
|
|
break
|
2020-07-24 06:15:40 +02:00
|
|
|
elif exp[pos_search] == ' ':
|
|
|
|
pos_search += 1
|
|
|
|
elif state_in_string:
|
|
|
|
pos_search += 1
|
|
|
|
else:
|
|
|
|
need_check = True
|
|
|
|
break
|
|
|
|
return need_check, skip_next
|
|
|
|
|
2019-04-11 10:36:02 +02:00
|
|
|
for directive in data.directives:
|
|
|
|
d = Define(directive)
|
|
|
|
exp = '(' + d.expansionList + ')'
|
2020-07-24 06:15:40 +02:00
|
|
|
skip_next = False
|
2019-04-11 10:36:02 +02:00
|
|
|
for arg in d.args:
|
2020-07-24 06:15:40 +02:00
|
|
|
if skip_next:
|
|
|
|
_, skip_next = find_string_concat(exp, arg, d.args)
|
|
|
|
continue
|
|
|
|
need_check, skip_next = find_string_concat(exp, arg, d.args)
|
|
|
|
if not need_check:
|
|
|
|
continue
|
|
|
|
|
2019-04-11 10:50:07 +02:00
|
|
|
pos = 0
|
|
|
|
while pos < len(exp):
|
|
|
|
pos = exp.find(arg, pos)
|
|
|
|
if pos < 0:
|
|
|
|
break
|
2019-12-08 20:07:52 +01:00
|
|
|
# is 'arg' used at position pos
|
2019-04-11 10:50:07 +02:00
|
|
|
pos1 = pos - 1
|
|
|
|
pos2 = pos + len(arg)
|
|
|
|
pos = pos2
|
2019-12-08 20:07:52 +01:00
|
|
|
if pos1 >= 0 and (isalnum(exp[pos1]) or exp[pos1] == '_'):
|
2019-04-11 10:50:07 +02:00
|
|
|
continue
|
2019-12-08 20:07:52 +01:00
|
|
|
if pos2 < len(exp) and (isalnum(exp[pos2]) or exp[pos2] == '_'):
|
2019-04-11 10:50:07 +02:00
|
|
|
continue
|
2019-12-08 20:07:52 +01:00
|
|
|
|
|
|
|
while pos1 >= 0 and exp[pos1] == ' ':
|
2019-04-11 10:50:07 +02:00
|
|
|
pos1 -= 1
|
2020-07-21 18:59:11 +02:00
|
|
|
if exp[pos1] == '#':
|
|
|
|
continue
|
|
|
|
if exp[pos1] not in '([,.':
|
2019-06-27 06:59:47 +02:00
|
|
|
self.reportError(directive, 20, 7)
|
2019-04-11 10:50:07 +02:00
|
|
|
break
|
2019-12-08 20:07:52 +01:00
|
|
|
while pos2 < len(exp) and exp[pos2] == ' ':
|
2019-04-11 10:50:07 +02:00
|
|
|
pos2 += 1
|
2019-12-14 16:31:41 +01:00
|
|
|
if pos2 < len(exp) and exp[pos2] not in ')]#,':
|
2019-06-27 06:59:47 +02:00
|
|
|
self.reportError(directive, 20, 7)
|
2019-04-11 10:50:07 +02:00
|
|
|
break
|
|
|
|
|
|
|
|
def misra_20_10(self, data):
|
|
|
|
for directive in data.directives:
|
|
|
|
d = Define(directive)
|
|
|
|
if d.expansionList.find('#') >= 0:
|
2019-06-27 06:59:47 +02:00
|
|
|
self.reportError(directive, 20, 10)
|
2019-04-11 10:36:02 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_20_13(self, data):
|
2019-04-08 18:26:17 +02:00
|
|
|
dir_pattern = re.compile(r'#[ ]*([^ (<]*)')
|
2018-09-29 09:05:13 +02:00
|
|
|
for directive in data.directives:
|
|
|
|
dir = directive.str
|
2019-04-08 18:26:17 +02:00
|
|
|
mo = dir_pattern.match(dir)
|
|
|
|
if mo:
|
|
|
|
dir = mo.group(1)
|
|
|
|
if dir not in ['define', 'elif', 'else', 'endif', 'error', 'if', 'ifdef', 'ifndef', 'include',
|
2020-01-08 06:54:43 +01:00
|
|
|
'pragma', 'undef', 'warning']:
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(directive, 20, 13)
|
|
|
|
|
|
|
|
def misra_20_14(self, data):
|
|
|
|
# stack for #if blocks. contains the #if directive until the corresponding #endif is seen.
|
|
|
|
# the size increases when there are inner #if directives.
|
|
|
|
ifStack = []
|
|
|
|
for directive in data.directives:
|
2020-01-08 06:54:43 +01:00
|
|
|
if directive.str.startswith('#if ') or directive.str.startswith('#ifdef ') or directive.str.startswith(
|
|
|
|
'#ifndef '):
|
2018-09-29 09:05:13 +02:00
|
|
|
ifStack.append(directive)
|
|
|
|
elif directive.str == '#else' or directive.str.startswith('#elif '):
|
|
|
|
if len(ifStack) == 0:
|
|
|
|
self.reportError(directive, 20, 14)
|
|
|
|
ifStack.append(directive)
|
|
|
|
elif directive.file != ifStack[-1].file:
|
|
|
|
self.reportError(directive, 20, 14)
|
|
|
|
elif directive.str == '#endif':
|
|
|
|
if len(ifStack) == 0:
|
|
|
|
self.reportError(directive, 20, 14)
|
|
|
|
elif directive.file != ifStack[-1].file:
|
|
|
|
self.reportError(directive, 20, 14)
|
|
|
|
ifStack.pop()
|
|
|
|
|
2019-10-26 08:32:46 +02:00
|
|
|
def misra_21_1(self, data):
|
2020-02-11 11:10:54 +01:00
|
|
|
re_forbidden_macro = re.compile(r'#(?:define|undef) _[_A-Z]+')
|
|
|
|
re_macro_name = re.compile(r'#(?:define|undef) (.+)[ $]')
|
2019-10-26 08:32:46 +02:00
|
|
|
|
2020-02-11 11:10:54 +01:00
|
|
|
for d in data.directives:
|
|
|
|
# Search for forbidden identifiers
|
|
|
|
m = re.search(re_forbidden_macro, d.str)
|
|
|
|
if m:
|
|
|
|
self.reportError(d, 21, 1)
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Search standard library identifiers in macro names
|
|
|
|
m = re.search(re_macro_name, d.str)
|
|
|
|
if not m:
|
|
|
|
continue
|
|
|
|
name = m.group(1)
|
|
|
|
if isStdLibId(name, data.standards.c):
|
|
|
|
self.reportError(d, 21, 1)
|
2019-10-26 08:32:46 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_21_3(self, data):
|
|
|
|
for token in data.tokenlist:
|
2019-08-10 18:12:23 +02:00
|
|
|
if isFunctionCall(token) and (token.astOperand1.str in ('malloc', 'calloc', 'realloc', 'free')):
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(token, 21, 3)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_21_4(self, data):
|
|
|
|
directive = findInclude(data.directives, '<setjmp.h>')
|
|
|
|
if directive:
|
|
|
|
self.reportError(directive, 21, 4)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_21_5(self, data):
|
|
|
|
directive = findInclude(data.directives, '<signal.h>')
|
|
|
|
if directive:
|
|
|
|
self.reportError(directive, 21, 5)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_21_6(self, data):
|
|
|
|
dir_stdio = findInclude(data.directives, '<stdio.h>')
|
|
|
|
dir_wchar = findInclude(data.directives, '<wchar.h>')
|
|
|
|
if dir_stdio:
|
|
|
|
self.reportError(dir_stdio, 21, 6)
|
|
|
|
if dir_wchar:
|
|
|
|
self.reportError(dir_wchar, 21, 6)
|
|
|
|
|
|
|
|
def misra_21_7(self, data):
|
2017-10-09 15:25:55 +02:00
|
|
|
for token in data.tokenlist:
|
2019-08-10 18:12:23 +02:00
|
|
|
if isFunctionCall(token) and (token.astOperand1.str in ('atof', 'atoi', 'atol', 'atoll')):
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(token, 21, 7)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_21_8(self, data):
|
|
|
|
for token in data.tokenlist:
|
2019-08-10 18:12:23 +02:00
|
|
|
if isFunctionCall(token) and (token.astOperand1.str in ('abort', 'exit', 'getenv', 'system')):
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(token, 21, 8)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_21_9(self, data):
|
|
|
|
for token in data.tokenlist:
|
2019-08-10 18:12:23 +02:00
|
|
|
if (token.str in ('bsearch', 'qsort')) and token.next and token.next.str == '(':
|
2018-09-29 09:05:13 +02:00
|
|
|
self.reportError(token, 21, 9)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_21_10(self, data):
|
|
|
|
directive = findInclude(data.directives, '<time.h>')
|
|
|
|
if directive:
|
|
|
|
self.reportError(directive, 21, 10)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
for token in data.tokenlist:
|
|
|
|
if (token.str == 'wcsftime') and token.next and token.next.str == '(':
|
|
|
|
self.reportError(token, 21, 10)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def misra_21_11(self, data):
|
|
|
|
directive = findInclude(data.directives, '<tgmath.h>')
|
|
|
|
if directive:
|
|
|
|
self.reportError(directive, 21, 11)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2019-10-26 08:32:46 +02:00
|
|
|
def misra_21_12(self, data):
|
|
|
|
if findInclude(data.directives, '<fenv.h>'):
|
|
|
|
for token in data.tokenlist:
|
|
|
|
if token.str == 'fexcept_t' and token.isName:
|
|
|
|
self.reportError(token, 21, 12)
|
|
|
|
if isFunctionCall(token) and (token.astOperand1.str in (
|
2019-11-12 15:32:05 +01:00
|
|
|
'feclearexcept',
|
|
|
|
'fegetexceptflag',
|
|
|
|
'feraiseexcept',
|
|
|
|
'fesetexceptflag',
|
|
|
|
'fetestexcept')):
|
2019-10-26 08:32:46 +02:00
|
|
|
self.reportError(token, 21, 12)
|
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def get_verify_expected(self):
|
|
|
|
"""Return the list of expected violations in the verify test"""
|
|
|
|
return self.verify_expected
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def get_verify_actual(self):
|
|
|
|
"""Return the list of actual violations in for the verify test"""
|
|
|
|
return self.verify_actual
|
2018-05-20 14:44:12 +02:00
|
|
|
|
2019-11-12 15:32:05 +01:00
|
|
|
def get_violations(self, violation_type=None):
|
2018-09-29 09:05:13 +02:00
|
|
|
"""Return the list of violations for a normal checker run"""
|
2019-11-12 15:32:05 +01:00
|
|
|
if violation_type is None:
|
2019-04-03 19:39:05 +02:00
|
|
|
return self.violations.items()
|
|
|
|
else:
|
|
|
|
return self.violations[violation_type]
|
|
|
|
|
|
|
|
def get_violation_types(self):
|
|
|
|
"""Return the list of violations for a normal checker run"""
|
|
|
|
return self.violations.keys()
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def addSuppressedRule(self, ruleNum,
|
2020-01-08 06:54:43 +01:00
|
|
|
fileName=None,
|
|
|
|
lineNumber=None,
|
|
|
|
symbolName=None):
|
2018-09-29 09:05:13 +02:00
|
|
|
"""
|
|
|
|
Add a suppression to the suppressions data structure
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
Suppressions are stored in a dictionary of dictionaries that
|
|
|
|
contains a list of tuples.
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
The first dictionary is keyed by the MISRA rule in hundreds
|
|
|
|
format. The value of that dictionary is a dictionary of filenames.
|
|
|
|
If the value is None then the rule is assumed to be suppressed for
|
|
|
|
all files.
|
2018-10-18 09:17:57 +02:00
|
|
|
If the filename exists then the value of that dictionary contains a list
|
|
|
|
with the scope of the suppression. If the list contains an item of None
|
2019-11-12 15:32:05 +01:00
|
|
|
then the rule is assumed to be suppressed for the entire file. Otherwise
|
2018-10-18 09:17:57 +02:00
|
|
|
the list contains line number, symbol name tuples.
|
2018-09-29 09:05:13 +02:00
|
|
|
For each tuple either line number or symbol name can can be none.
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
"""
|
2018-10-18 09:17:57 +02:00
|
|
|
normalized_filename = None
|
|
|
|
|
|
|
|
if fileName is not None:
|
2019-06-26 07:30:08 +02:00
|
|
|
normalized_filename = os.path.expanduser(fileName)
|
|
|
|
normalized_filename = os.path.normpath(normalized_filename)
|
2018-05-20 14:44:12 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
if lineNumber is not None or symbolName is not None:
|
|
|
|
line_symbol = (lineNumber, symbolName)
|
|
|
|
else:
|
|
|
|
line_symbol = None
|
2018-05-20 14:44:12 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
# If the rule is not in the dict already then add it
|
2019-11-12 15:32:05 +01:00
|
|
|
if ruleNum not in self.suppressedRules:
|
2018-09-29 09:05:13 +02:00
|
|
|
ruleItemList = list()
|
|
|
|
ruleItemList.append(line_symbol)
|
2018-05-20 14:44:12 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
fileDict = dict()
|
2018-10-18 09:17:57 +02:00
|
|
|
fileDict[normalized_filename] = ruleItemList
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
self.suppressedRules[ruleNum] = fileDict
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
# Rule is added. Done.
|
|
|
|
return
|
2018-05-24 06:31:20 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
# Rule existed in the dictionary. Check for
|
|
|
|
# filename entries.
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2019-03-01 01:01:39 +01:00
|
|
|
# Get the dictionary for the rule number
|
2018-09-29 09:05:13 +02:00
|
|
|
fileDict = self.suppressedRules[ruleNum]
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
# If the filename is not in the dict already add it
|
2019-11-12 15:32:05 +01:00
|
|
|
if normalized_filename not in fileDict:
|
2018-09-29 09:05:13 +02:00
|
|
|
ruleItemList = list()
|
|
|
|
ruleItemList.append(line_symbol)
|
2018-03-31 12:52:00 +02:00
|
|
|
|
2018-10-18 09:17:57 +02:00
|
|
|
fileDict[normalized_filename] = ruleItemList
|
2018-03-31 12:52:00 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
# Rule is added with a file scope. Done
|
|
|
|
return
|
2018-03-31 12:52:00 +02:00
|
|
|
|
2018-10-18 09:17:57 +02:00
|
|
|
# Rule has a matching filename. Get the rule item list.
|
2018-03-31 12:52:00 +02:00
|
|
|
|
2018-10-18 09:17:57 +02:00
|
|
|
# Check the lists of rule items
|
2019-11-12 15:32:05 +01:00
|
|
|
# to see if this (lineNumber, symbolName) combination
|
2018-10-18 09:17:57 +02:00
|
|
|
# or None already exists.
|
|
|
|
ruleItemList = fileDict[normalized_filename]
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
if line_symbol is None:
|
|
|
|
# is it already in the list?
|
2019-11-12 15:32:05 +01:00
|
|
|
if line_symbol not in ruleItemList:
|
2018-09-29 09:05:13 +02:00
|
|
|
ruleItemList.append(line_symbol)
|
|
|
|
else:
|
|
|
|
# Check the list looking for matches
|
|
|
|
matched = False
|
|
|
|
for each in ruleItemList:
|
|
|
|
if each is not None:
|
|
|
|
if (each[0] == line_symbol[0]) and (each[1] == line_symbol[1]):
|
|
|
|
matched = True
|
|
|
|
|
|
|
|
# Append the rule item if it was not already found
|
|
|
|
if not matched:
|
|
|
|
ruleItemList.append(line_symbol)
|
|
|
|
|
2019-11-04 06:58:33 +01:00
|
|
|
def isRuleSuppressed(self, file_path, linenr, ruleNum):
|
2018-09-29 09:05:13 +02:00
|
|
|
"""
|
|
|
|
Check to see if a rule is suppressed.
|
2019-11-04 06:58:33 +01:00
|
|
|
|
|
|
|
:param ruleNum: is the rule number in hundreds format
|
|
|
|
:param file_path: File path of checked location
|
|
|
|
:param linenr: Line number of checked location
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
If the rule exists in the dict then check for a filename
|
|
|
|
If the filename is None then rule is suppressed globally
|
|
|
|
for all files.
|
|
|
|
If the filename exists then look for list of
|
|
|
|
line number, symbol name tuples. If the list is None then
|
|
|
|
the rule is suppressed for the entire file
|
|
|
|
If the list of tuples exists then search the list looking for
|
2018-11-03 07:34:27 +01:00
|
|
|
matching line numbers. Symbol names are currently ignored
|
2018-09-29 09:05:13 +02:00
|
|
|
because they can include regular expressions.
|
|
|
|
TODO: Support symbol names and expression matching.
|
|
|
|
|
|
|
|
"""
|
|
|
|
ruleIsSuppressed = False
|
2018-10-18 09:17:57 +02:00
|
|
|
|
|
|
|
# Remove any prefix listed in command arguments from the filename.
|
|
|
|
filename = None
|
2019-11-04 06:58:33 +01:00
|
|
|
if file_path is not None:
|
2019-07-24 07:16:48 +02:00
|
|
|
if self.filePrefix is not None:
|
2019-11-04 06:58:33 +01:00
|
|
|
filename = remove_file_prefix(file_path, self.filePrefix)
|
2019-07-24 07:16:48 +02:00
|
|
|
else:
|
2019-11-04 06:58:33 +01:00
|
|
|
filename = os.path.basename(file_path)
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
if ruleNum in self.suppressedRules:
|
|
|
|
fileDict = self.suppressedRules[ruleNum]
|
|
|
|
|
|
|
|
# a file name entry of None means that the rule is suppressed
|
|
|
|
# globally
|
|
|
|
if None in fileDict:
|
|
|
|
ruleIsSuppressed = True
|
|
|
|
else:
|
|
|
|
# Does the filename match one of the names in
|
|
|
|
# the file list
|
|
|
|
if filename in fileDict:
|
|
|
|
# Get the list of ruleItems
|
|
|
|
ruleItemList = fileDict[filename]
|
|
|
|
|
2018-10-18 09:17:57 +02:00
|
|
|
if None in ruleItemList:
|
|
|
|
# Entry of None in the ruleItemList means the rule is
|
|
|
|
# suppressed for all lines in the filename
|
2018-09-29 09:05:13 +02:00
|
|
|
ruleIsSuppressed = True
|
|
|
|
else:
|
|
|
|
# Iterate though the the list of line numbers
|
|
|
|
# and symbols looking for a match of the line
|
|
|
|
# number. Matching the symbol is a TODO:
|
|
|
|
for each in ruleItemList:
|
|
|
|
if each is not None:
|
|
|
|
if each[0] == linenr:
|
|
|
|
ruleIsSuppressed = True
|
|
|
|
|
|
|
|
return ruleIsSuppressed
|
|
|
|
|
2019-11-04 06:57:41 +01:00
|
|
|
def isRuleGloballySuppressed(self, rule_num):
|
|
|
|
"""
|
|
|
|
Check to see if a rule is globally suppressed.
|
|
|
|
:param rule_num: is the rule number in hundreds format
|
|
|
|
"""
|
|
|
|
if rule_num not in self.suppressedRules:
|
|
|
|
return False
|
|
|
|
return None in self.suppressedRules[rule_num]
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
def parseSuppressions(self):
|
|
|
|
"""
|
|
|
|
Parse the suppression list provided by cppcheck looking for
|
|
|
|
rules that start with 'misra' or MISRA. The MISRA rule number
|
|
|
|
follows using either '_' or '.' to separate the numbers.
|
|
|
|
Examples:
|
|
|
|
misra_6.0
|
|
|
|
misra_7_0
|
|
|
|
misra.21.11
|
|
|
|
"""
|
|
|
|
rule_pattern = re.compile(r'^(misra|MISRA)[_.]([0-9]+)[_.]([0-9]+)')
|
|
|
|
|
|
|
|
for each in self.dumpfileSuppressions:
|
|
|
|
res = rule_pattern.match(each.errorId)
|
|
|
|
|
|
|
|
if res:
|
|
|
|
num1 = int(res.group(2)) * 100
|
|
|
|
ruleNum = num1 + int(res.group(3))
|
2019-07-16 22:32:41 +02:00
|
|
|
linenr = None
|
|
|
|
if each.lineNumber:
|
|
|
|
linenr = int(each.lineNumber)
|
|
|
|
self.addSuppressedRule(ruleNum, each.fileName, linenr, each.symbolName)
|
2018-09-29 09:05:13 +02:00
|
|
|
|
2018-10-18 09:17:57 +02:00
|
|
|
def showSuppressedRules(self):
|
2019-11-11 13:53:19 +01:00
|
|
|
"""
|
|
|
|
Print out rules in suppression list sorted by Rule Number
|
|
|
|
"""
|
|
|
|
print("Suppressed Rules List:")
|
|
|
|
outlist = list()
|
2018-10-18 09:17:57 +02:00
|
|
|
|
2019-11-11 13:53:19 +01:00
|
|
|
for ruleNum in self.suppressedRules:
|
|
|
|
fileDict = self.suppressedRules[ruleNum]
|
2018-10-18 09:17:57 +02:00
|
|
|
|
2019-11-11 13:53:19 +01:00
|
|
|
for fname in fileDict:
|
|
|
|
ruleItemList = fileDict[fname]
|
2018-10-18 09:17:57 +02:00
|
|
|
|
2019-11-11 13:53:19 +01:00
|
|
|
for item in ruleItemList:
|
|
|
|
if item is None:
|
|
|
|
item_str = "None"
|
|
|
|
else:
|
|
|
|
item_str = str(item[0])
|
2018-10-18 09:17:57 +02:00
|
|
|
|
2020-01-08 06:54:43 +01:00
|
|
|
outlist.append("%s: %s: %s (%d locations suppressed)" % (
|
|
|
|
float(ruleNum) / 100, fname, item_str, self.suppressionStats.get(ruleNum, 0)))
|
2018-10-18 09:17:57 +02:00
|
|
|
|
2019-11-11 13:53:19 +01:00
|
|
|
for line in sorted(outlist, reverse=True):
|
|
|
|
print(" %s" % line)
|
2018-10-18 09:17:57 +02:00
|
|
|
|
2019-07-24 07:16:48 +02:00
|
|
|
def setFilePrefix(self, prefix):
|
|
|
|
"""
|
2019-11-12 15:32:05 +01:00
|
|
|
Set the file prefix to ignore from files when matching
|
2019-07-24 07:16:48 +02:00
|
|
|
suppression files
|
|
|
|
"""
|
|
|
|
self.filePrefix = prefix
|
2018-10-18 09:17:57 +02:00
|
|
|
|
2020-06-08 15:58:17 +02:00
|
|
|
def setSeverity(self, severity):
|
|
|
|
"""
|
|
|
|
Set the severity for all errors.
|
|
|
|
"""
|
|
|
|
self.severity = severity
|
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def setSuppressionList(self, suppressionlist):
|
|
|
|
num1 = 0
|
|
|
|
num2 = 0
|
|
|
|
rule_pattern = re.compile(r'([0-9]+).([0-9]+)')
|
|
|
|
strlist = suppressionlist.split(",")
|
|
|
|
|
|
|
|
# build ignore list
|
|
|
|
for item in strlist:
|
|
|
|
res = rule_pattern.match(item)
|
|
|
|
if res:
|
|
|
|
num1 = int(res.group(1))
|
|
|
|
num2 = int(res.group(2))
|
2020-01-08 06:54:43 +01:00
|
|
|
ruleNum = (num1 * 100) + num2
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
self.addSuppressedRule(ruleNum)
|
|
|
|
|
|
|
|
def reportError(self, location, num1, num2):
|
|
|
|
ruleNum = num1 * 100 + num2
|
|
|
|
|
2019-06-17 21:17:29 +02:00
|
|
|
if self.settings.verify:
|
2018-09-29 09:05:13 +02:00
|
|
|
self.verify_actual.append(str(location.linenr) + ':' + str(num1) + '.' + str(num2))
|
2019-11-04 06:58:33 +01:00
|
|
|
elif self.isRuleSuppressed(location.file, location.linenr, ruleNum):
|
2018-09-29 09:05:13 +02:00
|
|
|
# Error is suppressed. Ignore
|
2019-11-04 06:58:33 +01:00
|
|
|
self.suppressionStats.setdefault(ruleNum, 0)
|
|
|
|
self.suppressionStats[ruleNum] += 1
|
2018-09-29 09:05:13 +02:00
|
|
|
return
|
|
|
|
else:
|
2019-04-13 10:22:13 +02:00
|
|
|
errorId = 'c2012-' + str(num1) + '.' + str(num2)
|
2019-06-23 14:08:05 +02:00
|
|
|
misra_severity = 'Undefined'
|
|
|
|
cppcheck_severity = 'style'
|
2018-09-29 09:05:13 +02:00
|
|
|
if ruleNum in self.ruleTexts:
|
2019-04-13 10:22:13 +02:00
|
|
|
errmsg = self.ruleTexts[ruleNum].text
|
2019-06-23 14:08:05 +02:00
|
|
|
if self.ruleTexts[ruleNum].misra_severity:
|
|
|
|
misra_severity = self.ruleTexts[ruleNum].misra_severity
|
|
|
|
cppcheck_severity = self.ruleTexts[ruleNum].cppcheck_severity
|
2018-09-29 09:05:13 +02:00
|
|
|
elif len(self.ruleTexts) == 0:
|
2019-04-13 10:22:13 +02:00
|
|
|
errmsg = 'misra violation (use --rule-texts=<file> to get proper output)'
|
2018-09-29 09:05:13 +02:00
|
|
|
else:
|
|
|
|
return
|
2020-06-08 15:58:17 +02:00
|
|
|
|
|
|
|
if self.severity:
|
|
|
|
cppcheck_severity = self.severity
|
|
|
|
|
2020-07-16 21:29:17 +02:00
|
|
|
this_violation = '{}-{}-{}-{}'.format(location.file, location.linenr, location.column, ruleNum)
|
|
|
|
|
|
|
|
# If this is new violation then record it and show it. If not then
|
|
|
|
# skip it since it has already been displayed.
|
|
|
|
if not this_violation in self.existing_violations:
|
|
|
|
self.existing_violations.add(this_violation)
|
|
|
|
cppcheckdata.reportError(location, cppcheck_severity, errmsg, 'misra', errorId, misra_severity)
|
2019-04-13 10:22:13 +02:00
|
|
|
|
2020-07-16 21:29:17 +02:00
|
|
|
if misra_severity not in self.violations:
|
|
|
|
self.violations[misra_severity] = []
|
|
|
|
self.violations[misra_severity].append('misra-' + errorId)
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
def loadRuleTexts(self, filename):
|
|
|
|
num1 = 0
|
|
|
|
num2 = 0
|
|
|
|
appendixA = False
|
|
|
|
ruleText = False
|
2019-06-17 21:17:29 +02:00
|
|
|
expect_more = False
|
2018-09-29 09:05:13 +02:00
|
|
|
|
|
|
|
Rule_pattern = re.compile(r'^Rule ([0-9]+).([0-9]+)')
|
2019-09-09 21:43:44 +02:00
|
|
|
severity_pattern = re.compile(r'.*[ ]*(Advisory|Required|Mandatory)$')
|
2018-09-29 09:05:13 +02:00
|
|
|
xA_Z_pattern = re.compile(r'^[#A-Z].*')
|
|
|
|
a_z_pattern = re.compile(r'^[a-z].*')
|
2019-01-18 21:30:08 +01:00
|
|
|
# Try to detect the file encoding
|
|
|
|
file_stream = None
|
|
|
|
encodings = ['ascii', 'utf-8', 'windows-1250', 'windows-1252']
|
|
|
|
for e in encodings:
|
|
|
|
try:
|
|
|
|
file_stream = codecs.open(filename, 'r', encoding=e)
|
|
|
|
file_stream.readlines()
|
|
|
|
file_stream.seek(0)
|
|
|
|
except UnicodeDecodeError:
|
|
|
|
file_stream = None
|
|
|
|
else:
|
|
|
|
break
|
|
|
|
if not file_stream:
|
|
|
|
print('Could not find a suitable codec for "' + filename + '".')
|
|
|
|
print('If you know the codec please report it to the developers so the list can be enhanced.')
|
|
|
|
print('Trying with default codec now and ignoring errors if possible ...')
|
|
|
|
try:
|
|
|
|
file_stream = open(filename, 'rt', errors='ignore')
|
|
|
|
except TypeError:
|
|
|
|
# Python 2 does not support the errors parameter
|
|
|
|
file_stream = open(filename, 'rt')
|
2019-06-17 21:17:29 +02:00
|
|
|
|
2019-04-03 19:39:05 +02:00
|
|
|
rule = None
|
2019-09-09 21:43:44 +02:00
|
|
|
have_severity = False
|
2020-01-08 06:54:43 +01:00
|
|
|
severity_loc = 0
|
2019-09-09 21:43:44 +02:00
|
|
|
|
2019-01-18 21:30:08 +01:00
|
|
|
for line in file_stream:
|
2019-09-09 21:43:44 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
line = line.replace('\r', '').replace('\n', '')
|
2019-09-09 21:43:44 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
if not appendixA:
|
|
|
|
if line.find('Appendix A') >= 0 and line.find('Summary of guidelines') >= 10:
|
|
|
|
appendixA = True
|
|
|
|
continue
|
|
|
|
if line.find('Appendix B') >= 0:
|
|
|
|
break
|
2019-04-16 10:57:31 +02:00
|
|
|
if len(line) == 0:
|
|
|
|
continue
|
2019-06-17 21:17:29 +02:00
|
|
|
|
|
|
|
# Parse rule declaration.
|
2018-09-29 09:05:13 +02:00
|
|
|
res = Rule_pattern.match(line)
|
2019-09-09 21:43:44 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
if res:
|
2019-09-09 21:43:44 +02:00
|
|
|
have_severity = False
|
|
|
|
expect_more = False
|
|
|
|
severity_loc = 0
|
2018-09-29 09:05:13 +02:00
|
|
|
num1 = int(res.group(1))
|
|
|
|
num2 = int(res.group(2))
|
2019-04-03 19:39:05 +02:00
|
|
|
rule = Rule(num1, num2)
|
2019-09-09 21:43:44 +02:00
|
|
|
|
|
|
|
if not have_severity and rule is not None:
|
|
|
|
res = severity_pattern.match(line)
|
|
|
|
|
2019-06-17 21:17:29 +02:00
|
|
|
if res:
|
2019-06-23 14:08:05 +02:00
|
|
|
rule.misra_severity = res.group(1)
|
2019-09-09 21:43:44 +02:00
|
|
|
have_severity = True
|
|
|
|
else:
|
2019-11-06 13:40:15 +01:00
|
|
|
severity_loc += 1
|
2019-09-09 21:43:44 +02:00
|
|
|
|
|
|
|
# Only look for severity on the Rule line
|
|
|
|
# or the next non-blank line after
|
|
|
|
# If it's not in either of those locations then
|
|
|
|
# assume a severity was not provided.
|
|
|
|
|
|
|
|
if severity_loc < 2:
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
rule.misra_severity = ''
|
|
|
|
have_severity = True
|
|
|
|
|
2019-04-16 10:57:31 +02:00
|
|
|
if rule is None:
|
|
|
|
continue
|
2019-06-17 21:17:29 +02:00
|
|
|
|
|
|
|
# Parse continuing of rule text.
|
|
|
|
if expect_more:
|
2019-04-16 10:57:31 +02:00
|
|
|
if a_z_pattern.match(line):
|
|
|
|
self.ruleTexts[rule.num].text += ' ' + line
|
2019-06-17 21:17:29 +02:00
|
|
|
continue
|
2019-09-09 21:43:44 +02:00
|
|
|
|
2019-06-17 21:17:29 +02:00
|
|
|
expect_more = False
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Parse beginning of rule text.
|
|
|
|
if xA_Z_pattern.match(line):
|
|
|
|
rule.text = line
|
|
|
|
self.ruleTexts[rule.num] = rule
|
|
|
|
expect_more = True
|
|
|
|
|
2019-06-19 21:57:28 +02:00
|
|
|
def verifyRuleTexts(self):
|
|
|
|
"""Prints rule numbers without rule text."""
|
|
|
|
rule_texts_rules = []
|
|
|
|
for rule_num in self.ruleTexts:
|
|
|
|
rule = self.ruleTexts[rule_num]
|
|
|
|
rule_texts_rules.append(str(rule.num1) + '.' + str(rule.num2))
|
|
|
|
|
|
|
|
all_rules = list(getAddonRules() + getCppcheckRules())
|
|
|
|
|
|
|
|
missing_rules = list(set(all_rules) - set(rule_texts_rules))
|
|
|
|
if len(missing_rules) == 0:
|
|
|
|
print("Rule texts are correct.")
|
|
|
|
else:
|
|
|
|
print("Missing rule texts: " + ', '.join(missing_rules))
|
|
|
|
|
2019-06-17 21:17:29 +02:00
|
|
|
def printStatus(self, *args, **kwargs):
|
|
|
|
if not self.settings.quiet:
|
|
|
|
print(*args, **kwargs)
|
2018-03-31 12:52:00 +02:00
|
|
|
|
2019-12-19 08:36:10 +01:00
|
|
|
def executeCheck(self, rule_num, check_function, *args):
|
2019-11-04 06:57:41 +01:00
|
|
|
"""Execute check function for a single MISRA rule.
|
|
|
|
|
|
|
|
:param rule_num: Number of rule in hundreds format
|
|
|
|
:param check_function: Check function to execute
|
2019-12-19 08:36:10 +01:00
|
|
|
:param args: Check function arguments
|
2019-11-04 06:57:41 +01:00
|
|
|
"""
|
|
|
|
if not self.isRuleGloballySuppressed(rule_num):
|
2019-12-19 08:36:10 +01:00
|
|
|
check_function(*args)
|
2019-11-04 06:57:41 +01:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
def parseDump(self, dumpfile):
|
2019-12-19 08:36:10 +01:00
|
|
|
filename = '.'.join(dumpfile.split('.')[:-1])
|
2018-09-29 09:05:13 +02:00
|
|
|
data = cppcheckdata.parsedump(dumpfile)
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
self.dumpfileSuppressions = data.suppressions
|
|
|
|
self.parseSuppressions()
|
2018-06-04 21:50:21 +02:00
|
|
|
|
2018-09-29 09:05:13 +02:00
|
|
|
typeBits['CHAR'] = data.platform.char_bit
|
|
|
|
typeBits['SHORT'] = data.platform.short_bit
|
|
|
|
typeBits['INT'] = data.platform.int_bit
|
|
|
|
typeBits['LONG'] = data.platform.long_bit
|
|
|
|
typeBits['LONG_LONG'] = data.platform.long_long_bit
|
|
|
|
typeBits['POINTER'] = data.platform.pointer_bit
|
2017-10-09 15:25:55 +02:00
|
|
|
|
2019-06-17 21:17:29 +02:00
|
|
|
if self.settings.verify:
|
2018-09-29 09:05:13 +02:00
|
|
|
for tok in data.rawTokens:
|
|
|
|
if tok.str.startswith('//') and 'TODO' not in tok.str:
|
|
|
|
compiled = re.compile(r'[0-9]+\.[0-9]+')
|
|
|
|
for word in tok.str[2:].split(' '):
|
|
|
|
if compiled.match(word):
|
|
|
|
self.verify_expected.append(str(tok.linenr) + ':' + word)
|
|
|
|
else:
|
2019-06-17 21:17:29 +02:00
|
|
|
self.printStatus('Checking ' + dumpfile + '...')
|
2018-09-29 09:05:13 +02:00
|
|
|
|
2019-12-27 08:50:56 +01:00
|
|
|
for cfgNumber, cfg in enumerate(data.iterconfigurations()):
|
|
|
|
if not self.settings.quiet:
|
|
|
|
self.printStatus('Checking %s, config %s...' % (dumpfile, cfg.name))
|
2018-09-29 09:05:13 +02:00
|
|
|
|
2019-11-18 06:56:30 +01:00
|
|
|
self.executeCheck(207, self.misra_2_7, cfg)
|
2019-12-27 08:50:56 +01:00
|
|
|
# data.rawTokens is same for all configurations
|
|
|
|
if cfgNumber == 0:
|
2019-11-04 06:57:41 +01:00
|
|
|
self.executeCheck(301, self.misra_3_1, data.rawTokens)
|
|
|
|
self.executeCheck(302, self.misra_3_2, data.rawTokens)
|
|
|
|
self.executeCheck(401, self.misra_4_1, data.rawTokens)
|
|
|
|
self.executeCheck(402, self.misra_4_2, data.rawTokens)
|
|
|
|
self.executeCheck(501, self.misra_5_1, cfg)
|
|
|
|
self.executeCheck(502, self.misra_5_2, cfg)
|
|
|
|
self.executeCheck(504, self.misra_5_4, cfg)
|
|
|
|
self.executeCheck(505, self.misra_5_5, cfg)
|
2020-10-27 16:00:19 +01:00
|
|
|
self.executeCheck(601, self.misra_6_1, cfg)
|
|
|
|
self.executeCheck(602, self.misra_6_2, cfg)
|
2019-12-27 08:50:56 +01:00
|
|
|
if cfgNumber == 0:
|
2019-11-04 06:57:41 +01:00
|
|
|
self.executeCheck(701, self.misra_7_1, data.rawTokens)
|
|
|
|
self.executeCheck(703, self.misra_7_3, data.rawTokens)
|
|
|
|
self.executeCheck(811, self.misra_8_11, cfg)
|
|
|
|
self.executeCheck(812, self.misra_8_12, cfg)
|
2019-12-27 08:50:56 +01:00
|
|
|
if cfgNumber == 0:
|
2019-11-04 06:57:41 +01:00
|
|
|
self.executeCheck(814, self.misra_8_14, data.rawTokens)
|
|
|
|
self.executeCheck(905, self.misra_9_5, data.rawTokens)
|
|
|
|
self.executeCheck(1001, self.misra_10_1, cfg)
|
|
|
|
self.executeCheck(1004, self.misra_10_4, cfg)
|
|
|
|
self.executeCheck(1006, self.misra_10_6, cfg)
|
|
|
|
self.executeCheck(1008, self.misra_10_8, cfg)
|
|
|
|
self.executeCheck(1103, self.misra_11_3, cfg)
|
|
|
|
self.executeCheck(1104, self.misra_11_4, cfg)
|
|
|
|
self.executeCheck(1105, self.misra_11_5, cfg)
|
|
|
|
self.executeCheck(1106, self.misra_11_6, cfg)
|
|
|
|
self.executeCheck(1107, self.misra_11_7, cfg)
|
|
|
|
self.executeCheck(1108, self.misra_11_8, cfg)
|
|
|
|
self.executeCheck(1109, self.misra_11_9, cfg)
|
2019-12-27 08:50:56 +01:00
|
|
|
if cfgNumber == 0:
|
2019-11-04 06:57:41 +01:00
|
|
|
self.executeCheck(1201, self.misra_12_1_sizeof, data.rawTokens)
|
|
|
|
self.executeCheck(1201, self.misra_12_1, cfg)
|
|
|
|
self.executeCheck(1202, self.misra_12_2, cfg)
|
2020-09-06 11:33:37 +02:00
|
|
|
self.executeCheck(1203, self.misra_12_3, cfg)
|
2019-11-04 06:57:41 +01:00
|
|
|
self.executeCheck(1204, self.misra_12_4, cfg)
|
|
|
|
self.executeCheck(1301, self.misra_13_1, cfg)
|
|
|
|
self.executeCheck(1303, self.misra_13_3, cfg)
|
|
|
|
self.executeCheck(1304, self.misra_13_4, cfg)
|
|
|
|
self.executeCheck(1305, self.misra_13_5, cfg)
|
|
|
|
self.executeCheck(1306, self.misra_13_6, cfg)
|
|
|
|
self.executeCheck(1401, self.misra_14_1, cfg)
|
|
|
|
self.executeCheck(1402, self.misra_14_2, cfg)
|
|
|
|
self.executeCheck(1404, self.misra_14_4, cfg)
|
|
|
|
self.executeCheck(1501, self.misra_15_1, cfg)
|
|
|
|
self.executeCheck(1502, self.misra_15_2, cfg)
|
|
|
|
self.executeCheck(1503, self.misra_15_3, cfg)
|
|
|
|
self.executeCheck(1505, self.misra_15_5, cfg)
|
2019-12-27 08:50:56 +01:00
|
|
|
if cfgNumber == 0:
|
2019-11-04 06:57:41 +01:00
|
|
|
self.executeCheck(1506, self.misra_15_6, data.rawTokens)
|
|
|
|
self.executeCheck(1507, self.misra_15_7, cfg)
|
|
|
|
self.executeCheck(1602, self.misra_16_2, cfg)
|
2019-12-27 08:50:56 +01:00
|
|
|
if cfgNumber == 0:
|
2019-11-04 06:57:41 +01:00
|
|
|
self.executeCheck(1603, self.misra_16_3, data.rawTokens)
|
|
|
|
self.executeCheck(1604, self.misra_16_4, cfg)
|
|
|
|
self.executeCheck(1605, self.misra_16_5, cfg)
|
|
|
|
self.executeCheck(1606, self.misra_16_6, cfg)
|
|
|
|
self.executeCheck(1607, self.misra_16_7, cfg)
|
|
|
|
self.executeCheck(1701, self.misra_17_1, cfg)
|
|
|
|
self.executeCheck(1702, self.misra_17_2, cfg)
|
2019-12-27 08:50:56 +01:00
|
|
|
if cfgNumber == 0:
|
2019-11-04 06:57:41 +01:00
|
|
|
self.executeCheck(1706, self.misra_17_6, data.rawTokens)
|
|
|
|
self.executeCheck(1707, self.misra_17_7, cfg)
|
|
|
|
self.executeCheck(1708, self.misra_17_8, cfg)
|
|
|
|
self.executeCheck(1804, self.misra_18_4, cfg)
|
|
|
|
self.executeCheck(1805, self.misra_18_5, cfg)
|
|
|
|
self.executeCheck(1807, self.misra_18_7, cfg)
|
|
|
|
self.executeCheck(1808, self.misra_18_8, cfg)
|
|
|
|
self.executeCheck(1902, self.misra_19_2, cfg)
|
|
|
|
self.executeCheck(2001, self.misra_20_1, cfg)
|
|
|
|
self.executeCheck(2002, self.misra_20_2, cfg)
|
2020-02-10 08:56:26 +01:00
|
|
|
self.executeCheck(2003, self.misra_20_3, cfg)
|
2019-11-04 06:57:41 +01:00
|
|
|
self.executeCheck(2004, self.misra_20_4, cfg)
|
|
|
|
self.executeCheck(2005, self.misra_20_5, cfg)
|
|
|
|
self.executeCheck(2006, self.misra_20_7, cfg)
|
|
|
|
self.executeCheck(2010, self.misra_20_10, cfg)
|
|
|
|
self.executeCheck(2013, self.misra_20_13, cfg)
|
|
|
|
self.executeCheck(2014, self.misra_20_14, cfg)
|
|
|
|
self.executeCheck(2101, self.misra_21_1, cfg)
|
|
|
|
self.executeCheck(2103, self.misra_21_3, cfg)
|
|
|
|
self.executeCheck(2104, self.misra_21_4, cfg)
|
|
|
|
self.executeCheck(2105, self.misra_21_5, cfg)
|
|
|
|
self.executeCheck(2106, self.misra_21_6, cfg)
|
|
|
|
self.executeCheck(2107, self.misra_21_7, cfg)
|
|
|
|
self.executeCheck(2108, self.misra_21_8, cfg)
|
|
|
|
self.executeCheck(2109, self.misra_21_9, cfg)
|
|
|
|
self.executeCheck(2110, self.misra_21_10, cfg)
|
|
|
|
self.executeCheck(2111, self.misra_21_11, cfg)
|
|
|
|
self.executeCheck(2112, self.misra_21_12, cfg)
|
2018-09-29 09:05:13 +02:00
|
|
|
# 22.4 is already covered by Cppcheck writeReadOnlyFile
|
2018-05-24 06:31:20 +02:00
|
|
|
|
|
|
|
|
|
|
|
RULE_TEXTS_HELP = '''Path to text file of MISRA rules
|
2018-05-20 14:48:01 +02:00
|
|
|
|
2018-05-24 06:31:20 +02:00
|
|
|
If you have the tool 'pdftotext' you might be able
|
|
|
|
to generate this textfile with such command:
|
2018-05-20 14:48:01 +02:00
|
|
|
|
2018-05-24 06:31:20 +02:00
|
|
|
pdftotext MISRA_C_2012.pdf MISRA_C_2012.txt
|
|
|
|
|
|
|
|
Otherwise you can more or less copy/paste the chapter
|
|
|
|
Appendix A Summary of guidelines
|
|
|
|
from the MISRA pdf. You can buy the MISRA pdf from
|
|
|
|
http://www.misra.org.uk/
|
|
|
|
|
|
|
|
Format:
|
|
|
|
|
|
|
|
<..arbitrary text..>
|
|
|
|
Appendix A Summary of guidelines
|
|
|
|
Rule 1.1
|
|
|
|
Rule text for 1.1
|
|
|
|
Rule 1.2
|
|
|
|
Rule text for 1.2
|
|
|
|
<...>
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
|
|
|
SUPPRESS_RULES_HELP = '''MISRA rules to suppress (comma-separated)
|
|
|
|
|
|
|
|
For example, if you'd like to suppress rules 15.1, 11.3,
|
|
|
|
and 20.13, run:
|
|
|
|
|
|
|
|
python misra.py --suppress-rules 15.1,11.3,20.13 ...
|
|
|
|
|
|
|
|
'''
|
|
|
|
|
2019-06-17 21:17:29 +02:00
|
|
|
|
|
|
|
def get_args():
|
|
|
|
"""Generates list of command-line arguments acceptable by misra.py script."""
|
|
|
|
parser = cppcheckdata.ArgumentParser()
|
|
|
|
parser.add_argument("--rule-texts", type=str, help=RULE_TEXTS_HELP)
|
2020-01-08 06:54:43 +01:00
|
|
|
parser.add_argument("--verify-rule-texts",
|
|
|
|
help="Verify that all supported rules texts are present in given file and exit.",
|
|
|
|
action="store_true")
|
2019-06-17 21:17:29 +02:00
|
|
|
parser.add_argument("--suppress-rules", type=str, help=SUPPRESS_RULES_HELP)
|
|
|
|
parser.add_argument("--no-summary", help="Hide summary of violations", action="store_true")
|
|
|
|
parser.add_argument("--show-suppressed-rules", help="Print rule suppression list", action="store_true")
|
2019-07-24 07:16:48 +02:00
|
|
|
parser.add_argument("-P", "--file-prefix", type=str, help="Prefix to strip when matching suppression file rules")
|
2019-11-15 20:14:30 +01:00
|
|
|
parser.add_argument("-generate-table", help=argparse.SUPPRESS, action="store_true")
|
|
|
|
parser.add_argument("-verify", help=argparse.SUPPRESS, action="store_true")
|
2020-06-08 15:58:17 +02:00
|
|
|
parser.add_argument("--severity", type=str, help="Set a custom severity string, for example 'error' or 'warning'. ")
|
2019-06-17 21:17:29 +02:00
|
|
|
return parser.parse_args()
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
args = get_args()
|
|
|
|
settings = MisraSettings(args)
|
|
|
|
checker = MisraChecker(settings)
|
|
|
|
|
|
|
|
if args.generate_table:
|
|
|
|
generateTable()
|
|
|
|
sys.exit(0)
|
|
|
|
|
2018-05-24 06:31:20 +02:00
|
|
|
if args.rule_texts:
|
2019-06-26 07:30:08 +02:00
|
|
|
filename = os.path.expanduser(args.rule_texts)
|
|
|
|
filename = os.path.normpath(filename)
|
2018-05-24 06:31:20 +02:00
|
|
|
if not os.path.isfile(filename):
|
|
|
|
print('Fatal error: file is not found: ' + filename)
|
|
|
|
sys.exit(1)
|
2018-09-29 09:05:13 +02:00
|
|
|
checker.loadRuleTexts(filename)
|
2019-06-19 21:57:28 +02:00
|
|
|
if args.verify_rule_texts:
|
|
|
|
checker.verifyRuleTexts()
|
|
|
|
sys.exit(0)
|
|
|
|
|
|
|
|
if args.verify_rule_texts and not args.rule_texts:
|
|
|
|
print("Error: Please specify rule texts file with --rule-texts=<file>")
|
|
|
|
sys.exit(1)
|
2018-09-29 09:05:13 +02:00
|
|
|
|
2018-05-24 06:31:20 +02:00
|
|
|
if args.suppress_rules:
|
2018-09-29 09:05:13 +02:00
|
|
|
checker.setSuppressionList(args.suppress_rules)
|
|
|
|
|
2019-07-24 07:16:48 +02:00
|
|
|
if args.file_prefix:
|
|
|
|
checker.setFilePrefix(args.file_prefix)
|
|
|
|
|
2019-11-15 20:14:30 +01:00
|
|
|
if not args.dumpfile:
|
|
|
|
if not args.quiet:
|
|
|
|
print("No input files.")
|
|
|
|
sys.exit(0)
|
|
|
|
|
2020-06-08 15:58:17 +02:00
|
|
|
if args.severity:
|
|
|
|
checker.setSeverity(args.severity)
|
|
|
|
|
2019-11-15 20:14:30 +01:00
|
|
|
for item in args.dumpfile:
|
|
|
|
checker.parseDump(item)
|
|
|
|
|
|
|
|
if settings.verify:
|
|
|
|
verify_expected = checker.get_verify_expected()
|
2020-01-08 06:54:43 +01:00
|
|
|
verify_actual = checker.get_verify_actual()
|
2019-11-15 20:14:30 +01:00
|
|
|
|
2020-08-29 07:44:13 +02:00
|
|
|
exitCode = 0
|
2019-11-15 20:14:30 +01:00
|
|
|
for expected in verify_expected:
|
|
|
|
if expected not in verify_actual:
|
|
|
|
print('Expected but not seen: ' + expected)
|
|
|
|
exitCode = 1
|
|
|
|
for actual in verify_actual:
|
|
|
|
if actual not in verify_expected:
|
|
|
|
print('Not expected: ' + actual)
|
|
|
|
exitCode = 1
|
|
|
|
|
|
|
|
# Existing behavior of verify mode is to exit
|
|
|
|
# on the first un-expected output.
|
|
|
|
# TODO: Is this required? or can it be moved to after
|
|
|
|
# all input files have been processed
|
|
|
|
if exitCode != 0:
|
|
|
|
sys.exit(exitCode)
|
|
|
|
|
2020-08-29 07:44:13 +02:00
|
|
|
if settings.verify:
|
|
|
|
sys.exit(exitCode)
|
|
|
|
|
|
|
|
number_of_violations = len(checker.get_violations())
|
|
|
|
if number_of_violations > 0:
|
|
|
|
if settings.show_summary:
|
|
|
|
print("\nMISRA rules violations found:\n\t%s\n" % (
|
|
|
|
"\n\t".join(["%s: %d" % (viol, len(checker.get_violations(viol))) for viol in
|
|
|
|
checker.get_violation_types()])))
|
|
|
|
|
|
|
|
rules_violated = {}
|
|
|
|
for severity, ids in checker.get_violations():
|
|
|
|
for misra_id in ids:
|
|
|
|
rules_violated[misra_id] = rules_violated.get(misra_id, 0) + 1
|
|
|
|
print("MISRA rules violated:")
|
|
|
|
convert = lambda text: int(text) if text.isdigit() else text
|
|
|
|
misra_sort = lambda key: [convert(c) for c in re.split(r'[\.-]([0-9]*)', key)]
|
|
|
|
for misra_id in sorted(rules_violated.keys(), key=misra_sort):
|
|
|
|
res = re.match(r'misra-c2012-([0-9]+)\\.([0-9]+)', misra_id)
|
|
|
|
if res is None:
|
|
|
|
num = 0
|
|
|
|
else:
|
|
|
|
num = int(res.group(1)) * 100 + int(res.group(2))
|
|
|
|
severity = '-'
|
|
|
|
if num in checker.ruleTexts:
|
|
|
|
severity = checker.ruleTexts[num].cppcheck_severity
|
|
|
|
print("\t%15s (%s): %d" % (misra_id, severity, rules_violated[misra_id]))
|
2018-09-29 09:05:13 +02:00
|
|
|
|
2019-11-15 20:14:30 +01:00
|
|
|
if args.show_suppressed_rules:
|
|
|
|
checker.showSuppressedRules()
|
2018-10-18 09:17:57 +02:00
|
|
|
|
2019-06-17 21:17:29 +02:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
main()
|
2020-08-29 07:44:13 +02:00
|
|
|
sys.exit(cppcheckdata.EXIT_CODE)
|