2008-11-20 20:23:05 +01:00
|
|
|
/*
|
2008-10-26 08:55:15 +01:00
|
|
|
* c++check - c/c++ syntax checking
|
|
|
|
* Copyright (C) 2007 Daniel Marjamäki
|
|
|
|
*
|
|
|
|
* This program is free software: you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation, either version 3 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
2007-05-24 07:40:45 +02:00
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
#include "tokenize.h"
|
2008-11-20 23:19:26 +01:00
|
|
|
|
2007-05-24 07:40:45 +02:00
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
#include <locale>
|
|
|
|
#include <fstream>
|
|
|
|
|
2008-11-12 23:50:40 +01:00
|
|
|
|
2007-05-28 08:17:18 +02:00
|
|
|
#include <string>
|
2008-11-20 20:18:55 +01:00
|
|
|
#include <cstring>
|
2008-11-22 10:44:02 +01:00
|
|
|
#include <iostream>
|
|
|
|
#include <sstream>
|
2008-11-21 22:14:24 +01:00
|
|
|
#include <list>
|
2008-11-15 23:54:39 +01:00
|
|
|
#include <algorithm>
|
2007-05-24 07:40:45 +02:00
|
|
|
#include <stdlib.h> // <- strtoul
|
2007-07-17 08:15:50 +02:00
|
|
|
#include <stdio.h>
|
2007-05-24 07:40:45 +02:00
|
|
|
|
2008-02-18 18:11:34 +01:00
|
|
|
#ifdef __BORLANDC__
|
2008-09-11 19:03:58 +02:00
|
|
|
#include <ctype.h>
|
2008-02-18 18:11:34 +01:00
|
|
|
#include <mem.h>
|
|
|
|
#endif
|
|
|
|
|
2008-09-11 19:03:58 +02:00
|
|
|
#ifndef _MSC_VER
|
|
|
|
#define _strdup(str) strdup(str)
|
|
|
|
#endif
|
|
|
|
|
2008-11-20 20:18:55 +01:00
|
|
|
|
2007-05-24 07:40:45 +02:00
|
|
|
|
2007-05-28 08:17:18 +02:00
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
2008-11-22 20:39:12 +01:00
|
|
|
Tokenizer::Tokenizer()
|
2008-11-20 20:18:55 +01:00
|
|
|
{
|
|
|
|
_tokens = 0;
|
|
|
|
tokens_back = 0;
|
2008-11-22 10:44:02 +01:00
|
|
|
dsymlist = 0;
|
2008-11-20 20:18:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Tokenizer::~Tokenizer()
|
|
|
|
{
|
2008-11-22 10:44:02 +01:00
|
|
|
DeallocateTokens();
|
2008-11-12 23:50:40 +01:00
|
|
|
}
|
2008-11-20 20:18:55 +01:00
|
|
|
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
// Helper functions..
|
|
|
|
|
|
|
|
TOKEN *Tokenizer::_gettok(TOKEN *tok, int index)
|
|
|
|
{
|
|
|
|
while (tok && index>0)
|
|
|
|
{
|
|
|
|
tok = tok->next;
|
|
|
|
index--;
|
|
|
|
}
|
|
|
|
return tok;
|
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
const TOKEN *Tokenizer::tokens() const
|
|
|
|
{
|
|
|
|
return _tokens;
|
2008-11-12 23:50:40 +01:00
|
|
|
}
|
2007-05-28 08:17:18 +02:00
|
|
|
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
// Defined symbols.
|
|
|
|
// "#define abc 123" will create a defined symbol "abc" with the value 123
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
2008-11-16 16:18:50 +01:00
|
|
|
|
2008-11-20 20:18:55 +01:00
|
|
|
|
|
|
|
std::vector<std::string> *Tokenizer::getFiles()
|
|
|
|
{
|
|
|
|
return &Files;
|
|
|
|
}
|
2007-05-24 07:40:45 +02:00
|
|
|
|
2008-11-09 08:19:53 +01:00
|
|
|
void Tokenizer::Define(const char Name[], const char Value[])
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
|
|
|
if (!(Name && Name[0]))
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!(Value && Value[0]))
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Is 'Value' a decimal value..
|
|
|
|
bool dec = true, hex = true;
|
|
|
|
for (int i = 0; Value[i]; i++)
|
|
|
|
{
|
2008-09-11 19:03:58 +02:00
|
|
|
if ( ! isdigit(Value[i]) )
|
2007-05-24 07:40:45 +02:00
|
|
|
dec = false;
|
|
|
|
|
2008-09-11 19:03:58 +02:00
|
|
|
if ( ! isxdigit(Value[i]) && (!(i==1 && Value[i]=='x')))
|
2007-05-24 07:40:45 +02:00
|
|
|
hex = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!dec && !hex)
|
|
|
|
return;
|
|
|
|
|
2008-09-11 19:03:58 +02:00
|
|
|
char *strValue = _strdup(Value);
|
2007-05-24 07:40:45 +02:00
|
|
|
|
|
|
|
if (!dec && hex)
|
|
|
|
{
|
2008-09-11 19:03:58 +02:00
|
|
|
// Convert Value from hexadecimal to decimal
|
|
|
|
unsigned long value;
|
|
|
|
std::istringstream istr(Value+2);
|
|
|
|
istr >> std::hex >> value;
|
|
|
|
std::ostringstream ostr;
|
|
|
|
ostr << value;
|
2007-05-24 07:40:45 +02:00
|
|
|
free(strValue);
|
2008-09-11 19:03:58 +02:00
|
|
|
strValue = _strdup(ostr.str().c_str());
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
DefineSymbol *NewSym = new DefineSymbol;
|
|
|
|
memset(NewSym, 0, sizeof(DefineSymbol));
|
2008-09-11 19:03:58 +02:00
|
|
|
NewSym->name = _strdup(Name);
|
2007-05-24 07:40:45 +02:00
|
|
|
NewSym->value = strValue;
|
|
|
|
NewSym->next = dsymlist;
|
|
|
|
dsymlist = NewSym;
|
|
|
|
}
|
2007-05-28 08:17:18 +02:00
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2007-05-24 07:40:45 +02:00
|
|
|
|
2007-05-28 08:17:18 +02:00
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
// addtoken
|
|
|
|
// add a token. Used by 'Tokenizer'
|
|
|
|
//---------------------------------------------------------------------------
|
2007-05-24 07:40:45 +02:00
|
|
|
|
2008-11-09 08:19:53 +01:00
|
|
|
void Tokenizer::addtoken(const char str[], const unsigned int lineno, const unsigned int fileno)
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
|
|
|
if (str[0] == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// Replace hexadecimal value with decimal
|
2008-09-11 19:03:58 +02:00
|
|
|
std::ostringstream str2;
|
|
|
|
if (strncmp(str,"0x",2)==0)
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
2008-09-11 19:03:58 +02:00
|
|
|
str2 << strtoul(str+2, NULL, 16);
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
2008-09-11 19:03:58 +02:00
|
|
|
else
|
|
|
|
{
|
|
|
|
str2 << str;
|
|
|
|
}
|
2007-05-24 07:40:45 +02:00
|
|
|
|
|
|
|
TOKEN *newtoken = new TOKEN;
|
2008-11-06 19:31:39 +01:00
|
|
|
newtoken->setstr(str2.str().c_str());
|
2007-05-24 07:40:45 +02:00
|
|
|
newtoken->linenr = lineno;
|
|
|
|
newtoken->FileIndex = fileno;
|
|
|
|
if (tokens_back)
|
|
|
|
{
|
|
|
|
tokens_back->next = newtoken;
|
|
|
|
tokens_back = newtoken;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2008-11-16 16:58:52 +01:00
|
|
|
_tokens = tokens_back = newtoken;
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Check if str is defined..
|
|
|
|
for (DefineSymbol *sym = dsymlist; sym; sym = sym->next)
|
|
|
|
{
|
|
|
|
if (strcmp(str,sym->name)==0)
|
|
|
|
{
|
2008-11-06 19:31:39 +01:00
|
|
|
newtoken->setstr(sym->value);
|
2007-05-24 07:40:45 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
2007-05-28 08:17:18 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
// combine_2tokens
|
|
|
|
// Combine two tokens that belong to each other. Ex: "<" and "=" may become "<="
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
2008-11-09 08:19:53 +01:00
|
|
|
void Tokenizer::combine_2tokens(TOKEN *tok, const char str1[], const char str2[])
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
|
|
|
if (!(tok && tok->next))
|
|
|
|
return;
|
|
|
|
if (strcmp(tok->str,str1) || strcmp(tok->next->str,str2))
|
|
|
|
return;
|
|
|
|
|
2008-09-11 19:03:58 +02:00
|
|
|
std::string newstr(std::string(str1) + std::string(str2));
|
2008-11-06 19:31:39 +01:00
|
|
|
tok->setstr( newstr.c_str() );
|
2007-05-24 07:40:45 +02:00
|
|
|
|
2007-05-28 08:17:18 +02:00
|
|
|
DeleteNextToken(tok);
|
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
// SizeOfType - gives the size of a type
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
2008-11-12 23:50:40 +01:00
|
|
|
|
2007-05-28 08:17:18 +02:00
|
|
|
|
2008-11-09 08:19:53 +01:00
|
|
|
int Tokenizer::SizeOfType(const char type[])
|
2007-05-28 08:17:18 +02:00
|
|
|
{
|
|
|
|
if (!type)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return TypeSize[type];
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
2007-05-28 08:17:18 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
// DeleteNextToken. Unlink and delete next token.
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
2008-11-09 08:19:53 +01:00
|
|
|
void Tokenizer::DeleteNextToken(TOKEN *tok)
|
2007-05-28 08:17:18 +02:00
|
|
|
{
|
|
|
|
TOKEN *next = tok->next;
|
|
|
|
tok->next = next->next;
|
|
|
|
delete next;
|
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2007-05-29 08:24:36 +02:00
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
// InsertTokens - Copy and insert tokens
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
2008-11-09 08:19:53 +01:00
|
|
|
void Tokenizer::InsertTokens(TOKEN *dest, TOKEN *src, unsigned int n)
|
2007-05-29 08:24:36 +02:00
|
|
|
{
|
|
|
|
while (n > 0)
|
|
|
|
{
|
|
|
|
TOKEN *NewToken = new TOKEN;
|
|
|
|
NewToken->FileIndex = src->FileIndex;
|
|
|
|
NewToken->linenr = src->linenr;
|
2008-11-06 19:31:39 +01:00
|
|
|
NewToken->setstr(src->str);
|
2007-05-29 08:24:36 +02:00
|
|
|
|
|
|
|
NewToken->next = dest->next;
|
|
|
|
dest->next = NewToken;
|
|
|
|
|
|
|
|
dest = dest->next;
|
|
|
|
src = src->next;
|
|
|
|
n--;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2007-05-28 08:17:18 +02:00
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
// Tokenize - tokenizes a given file.
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
2008-11-09 08:19:53 +01:00
|
|
|
void Tokenizer::Tokenize(std::istream &code, const char FileName[])
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
|
|
|
// Has this file been tokenized already?
|
|
|
|
for (unsigned int i = 0; i < Files.size(); i++)
|
|
|
|
{
|
2008-04-06 08:26:11 +02:00
|
|
|
if ( SameFileName( Files[i].c_str(), FileName ) )
|
2007-05-24 07:40:45 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2007-10-23 08:36:29 +02:00
|
|
|
// The "Files" vector remembers what files have been tokenized..
|
2007-05-24 07:40:45 +02:00
|
|
|
Files.push_back(FileName);
|
|
|
|
|
2008-02-16 16:46:32 +01:00
|
|
|
// Tokenize the file..
|
2008-10-31 09:29:59 +01:00
|
|
|
TokenizeCode( code, (unsigned int)(Files.size() - 1) );
|
2008-02-16 16:46:32 +01:00
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
// Tokenize - tokenizes input stream
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
2008-11-09 08:19:53 +01:00
|
|
|
void Tokenizer::TokenizeCode(std::istream &code, const unsigned int FileIndex)
|
2008-02-16 16:46:32 +01:00
|
|
|
{
|
2007-10-23 08:36:29 +02:00
|
|
|
// Tokenize the file.
|
2007-05-24 07:40:45 +02:00
|
|
|
unsigned int lineno = 1;
|
2008-11-04 20:09:31 +01:00
|
|
|
std::string CurrentToken;
|
2008-10-30 20:42:34 +01:00
|
|
|
for (char ch = (char)code.get(); code.good(); ch = (char)code.get())
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
2008-09-20 19:34:37 +02:00
|
|
|
// Todo
|
|
|
|
if ( ch < 0 )
|
|
|
|
continue;
|
|
|
|
|
2007-10-23 08:36:29 +02:00
|
|
|
// Preprocessor stuff?
|
2008-11-04 20:09:31 +01:00
|
|
|
if (ch == '#' && CurrentToken.empty())
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
2008-11-03 08:53:30 +01:00
|
|
|
std::string line("#");
|
|
|
|
{
|
|
|
|
char chPrev = '#';
|
|
|
|
while ( code.good() )
|
|
|
|
{
|
|
|
|
ch = (char)code.get();
|
|
|
|
if (chPrev!='\\' && ch=='\n')
|
|
|
|
break;
|
|
|
|
if (ch!=' ')
|
|
|
|
chPrev = ch;
|
|
|
|
if (ch!='\\' && ch!='\n')
|
|
|
|
line += ch;
|
|
|
|
if (ch=='\n')
|
|
|
|
++lineno;
|
|
|
|
}
|
|
|
|
}
|
2007-05-24 07:40:45 +02:00
|
|
|
if (strncmp(line.c_str(),"#include",8)==0 &&
|
|
|
|
line.find("\"") != std::string::npos)
|
|
|
|
{
|
|
|
|
// Extract the filename
|
|
|
|
line.erase(0, line.find("\"")+1);
|
|
|
|
line.erase(line.find("\""));
|
|
|
|
|
|
|
|
// Relative path..
|
2008-02-16 16:46:32 +01:00
|
|
|
if (Files.back().find_first_of("\\/") != std::string::npos)
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
2008-02-16 16:46:32 +01:00
|
|
|
std::string path = Files.back();
|
|
|
|
path.erase( 1 + path.find_last_of("\\/") );
|
2007-05-24 07:40:45 +02:00
|
|
|
line = path + line;
|
|
|
|
}
|
|
|
|
|
2008-02-16 16:46:32 +01:00
|
|
|
addtoken("#include", lineno, FileIndex);
|
|
|
|
addtoken(line.c_str(), lineno, FileIndex);
|
2007-05-24 07:40:45 +02:00
|
|
|
|
2008-10-30 20:42:34 +01:00
|
|
|
std::ifstream fin( line.c_str() );
|
|
|
|
Tokenize(fin, line.c_str());
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
else if (strncmp(line.c_str(), "#define", 7) == 0)
|
|
|
|
{
|
2008-11-04 20:09:31 +01:00
|
|
|
std::string strId;
|
2007-05-24 07:40:45 +02:00
|
|
|
enum {Space1, Id, Space2, Value} State;
|
|
|
|
State = Space1;
|
|
|
|
for (unsigned int i = 8; i < line.length(); i++)
|
|
|
|
{
|
|
|
|
if (State==Space1 || State==Space2)
|
|
|
|
{
|
2008-09-11 19:03:58 +02:00
|
|
|
if (isspace(line[i]))
|
2007-05-24 07:40:45 +02:00
|
|
|
continue;
|
|
|
|
State = (State==Space1) ? Id : Value;
|
|
|
|
}
|
|
|
|
|
2008-03-18 08:45:35 +01:00
|
|
|
else if (State==Id)
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
2008-09-11 19:03:58 +02:00
|
|
|
if ( isspace( line[i] ) )
|
2008-03-18 08:45:35 +01:00
|
|
|
{
|
2008-11-04 20:09:31 +01:00
|
|
|
strId = CurrentToken;
|
|
|
|
CurrentToken.clear();
|
2008-03-18 08:45:35 +01:00
|
|
|
State = Space2;
|
|
|
|
continue;
|
|
|
|
}
|
2008-09-11 19:03:58 +02:00
|
|
|
else if ( ! isalnum(line[i]) )
|
2008-03-18 08:45:35 +01:00
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
|
|
|
|
2008-11-04 20:09:31 +01:00
|
|
|
CurrentToken += line[i];
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (State==Value)
|
|
|
|
{
|
2008-02-16 16:46:32 +01:00
|
|
|
addtoken("def", lineno, FileIndex);
|
2008-11-04 20:09:31 +01:00
|
|
|
addtoken(strId.c_str(), lineno, FileIndex);
|
2008-02-16 16:46:32 +01:00
|
|
|
addtoken(";", lineno, FileIndex);
|
2008-11-04 20:09:31 +01:00
|
|
|
Define(strId.c_str(), CurrentToken.c_str());
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
|
|
|
|
2008-11-04 20:09:31 +01:00
|
|
|
CurrentToken.clear();
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
|
|
|
|
2007-06-02 18:32:07 +02:00
|
|
|
else
|
|
|
|
{
|
2008-02-16 16:46:32 +01:00
|
|
|
addtoken("#", lineno, FileIndex);
|
|
|
|
addtoken(";", lineno, FileIndex);
|
2007-06-02 18:32:07 +02:00
|
|
|
}
|
|
|
|
|
2007-05-24 07:40:45 +02:00
|
|
|
lineno++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (ch == '\n')
|
|
|
|
{
|
|
|
|
// Add current token..
|
2008-11-04 20:09:31 +01:00
|
|
|
addtoken(CurrentToken.c_str(), lineno++, FileIndex);
|
|
|
|
CurrentToken.clear();
|
2007-05-24 07:40:45 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Comments..
|
2008-10-30 20:42:34 +01:00
|
|
|
if (ch == '/' && code.good())
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
2008-11-04 20:09:31 +01:00
|
|
|
bool newstatement = bool( strchr(";{}", CurrentToken.empty() ? '\0' : CurrentToken[0]) != NULL );
|
2008-04-11 20:37:15 +02:00
|
|
|
|
2007-05-24 07:40:45 +02:00
|
|
|
// Add current token..
|
2008-11-04 20:09:31 +01:00
|
|
|
addtoken(CurrentToken.c_str(), lineno, FileIndex);
|
|
|
|
CurrentToken.clear();
|
2007-05-24 07:40:45 +02:00
|
|
|
|
|
|
|
// Read next character..
|
2008-02-16 16:46:32 +01:00
|
|
|
ch = (char)code.get();
|
2007-05-24 07:40:45 +02:00
|
|
|
|
|
|
|
// If '//'..
|
|
|
|
if (ch == '/')
|
|
|
|
{
|
2008-04-11 20:37:15 +02:00
|
|
|
std::string comment;
|
2008-04-12 08:33:45 +02:00
|
|
|
getline( code, comment ); // Parse in the whole comment
|
|
|
|
|
|
|
|
// If the comment says something like "fred is deleted" then generate appropriate tokens for that
|
|
|
|
comment = comment + " ";
|
|
|
|
if ( newstatement && comment.find(" deleted ")!=std::string::npos )
|
2008-04-11 20:37:15 +02:00
|
|
|
{
|
2008-04-12 08:33:45 +02:00
|
|
|
// delete
|
|
|
|
addtoken( "delete", lineno, FileIndex );
|
|
|
|
|
|
|
|
// fred
|
|
|
|
std::string::size_type pos1 = comment.find_first_not_of(" \t");
|
|
|
|
std::string::size_type pos2 = comment.find(" ", pos1);
|
|
|
|
std::string firstWord = comment.substr( pos1, pos2-pos1 );
|
|
|
|
addtoken( firstWord.c_str(), lineno, FileIndex );
|
|
|
|
|
|
|
|
// ;
|
|
|
|
addtoken( ";", lineno, FileIndex );
|
2008-04-11 20:37:15 +02:00
|
|
|
}
|
2008-04-12 08:33:45 +02:00
|
|
|
|
2007-05-24 07:40:45 +02:00
|
|
|
lineno++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If '/*'..
|
|
|
|
if (ch == '*')
|
|
|
|
{
|
|
|
|
char chPrev;
|
|
|
|
ch = chPrev = 'A';
|
2008-10-30 20:42:34 +01:00
|
|
|
while (code.good() && (chPrev!='*' || ch!='/'))
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
|
|
|
chPrev = ch;
|
2008-02-16 16:46:32 +01:00
|
|
|
ch = (char)code.get();
|
2007-05-24 07:40:45 +02:00
|
|
|
if (ch == '\n')
|
|
|
|
lineno++;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Not a comment.. add token..
|
2008-02-16 16:46:32 +01:00
|
|
|
addtoken("/", lineno, FileIndex);
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// char..
|
|
|
|
if (ch == '\'')
|
|
|
|
{
|
|
|
|
// Add previous token
|
2008-11-04 20:09:31 +01:00
|
|
|
addtoken(CurrentToken.c_str(), lineno, FileIndex);
|
|
|
|
CurrentToken.clear();
|
2007-05-24 07:40:45 +02:00
|
|
|
|
|
|
|
// Read this ..
|
2008-11-04 20:09:31 +01:00
|
|
|
CurrentToken += ch;
|
|
|
|
CurrentToken += (char)code.get();
|
|
|
|
CurrentToken += (char)code.get();
|
2007-05-24 07:40:45 +02:00
|
|
|
if (CurrentToken[1] == '\\')
|
2008-11-04 20:09:31 +01:00
|
|
|
CurrentToken += (char)code.get();
|
2007-05-24 07:40:45 +02:00
|
|
|
|
|
|
|
// Add token and start on next..
|
2008-11-04 20:09:31 +01:00
|
|
|
addtoken(CurrentToken.c_str(), lineno, FileIndex);
|
|
|
|
CurrentToken.clear();
|
2007-05-24 07:40:45 +02:00
|
|
|
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// String..
|
|
|
|
if (ch == '\"')
|
|
|
|
{
|
2008-11-04 20:09:31 +01:00
|
|
|
addtoken(CurrentToken.c_str(), lineno, FileIndex);
|
|
|
|
CurrentToken.clear();
|
2007-05-24 07:40:45 +02:00
|
|
|
bool special = false;
|
|
|
|
char c = ch;
|
|
|
|
do
|
|
|
|
{
|
|
|
|
// Append token..
|
2008-11-04 20:09:31 +01:00
|
|
|
CurrentToken += c;
|
2007-05-24 07:40:45 +02:00
|
|
|
|
|
|
|
// Special sequence '\.'
|
|
|
|
if (special)
|
|
|
|
special = false;
|
|
|
|
else
|
|
|
|
special = (c == '\\');
|
|
|
|
|
|
|
|
// Get next character
|
2008-02-16 16:46:32 +01:00
|
|
|
c = (char)code.get();
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
2008-10-30 20:42:34 +01:00
|
|
|
while (code.good() && (special || c != '\"'));
|
2008-11-04 20:09:31 +01:00
|
|
|
CurrentToken += '\"';
|
|
|
|
addtoken(CurrentToken.c_str(), lineno, FileIndex);
|
|
|
|
CurrentToken.clear();
|
2007-05-24 07:40:45 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (strchr("+-*/%&|^?!=<>[](){};:,.",ch))
|
|
|
|
{
|
2008-11-04 20:09:31 +01:00
|
|
|
addtoken(CurrentToken.c_str(), lineno, FileIndex);
|
|
|
|
CurrentToken.clear();
|
|
|
|
CurrentToken += ch;
|
|
|
|
addtoken(CurrentToken.c_str(), lineno, FileIndex);
|
|
|
|
CurrentToken.clear();
|
2007-05-24 07:40:45 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-09-11 19:03:58 +02:00
|
|
|
if (isspace(ch) || iscntrl(ch))
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
2008-11-04 20:09:31 +01:00
|
|
|
addtoken(CurrentToken.c_str(), lineno, FileIndex);
|
|
|
|
CurrentToken.clear();
|
2007-05-24 07:40:45 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2008-11-04 20:09:31 +01:00
|
|
|
CurrentToken += ch;
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
2008-11-04 20:09:31 +01:00
|
|
|
addtoken( CurrentToken.c_str(), lineno, FileIndex );
|
2007-05-24 07:40:45 +02:00
|
|
|
|
|
|
|
// Combine tokens..
|
2008-11-16 16:58:52 +01:00
|
|
|
for (TOKEN *tok = _tokens; tok && tok->next; tok = tok->next)
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
|
|
|
combine_2tokens(tok, "<", "<");
|
|
|
|
combine_2tokens(tok, ">", ">");
|
|
|
|
|
|
|
|
combine_2tokens(tok, "&", "&");
|
|
|
|
combine_2tokens(tok, "|", "|");
|
|
|
|
|
|
|
|
combine_2tokens(tok, "+", "=");
|
|
|
|
combine_2tokens(tok, "-", "=");
|
|
|
|
combine_2tokens(tok, "*", "=");
|
|
|
|
combine_2tokens(tok, "/", "=");
|
|
|
|
combine_2tokens(tok, "&", "=");
|
|
|
|
combine_2tokens(tok, "|", "=");
|
|
|
|
|
|
|
|
combine_2tokens(tok, "=", "=");
|
|
|
|
combine_2tokens(tok, "!", "=");
|
|
|
|
combine_2tokens(tok, "<", "=");
|
|
|
|
combine_2tokens(tok, ">", "=");
|
|
|
|
|
|
|
|
combine_2tokens(tok, ":", ":");
|
|
|
|
combine_2tokens(tok, "-", ">");
|
|
|
|
|
|
|
|
combine_2tokens(tok, "private", ":");
|
|
|
|
combine_2tokens(tok, "protected", ":");
|
|
|
|
combine_2tokens(tok, "public", ":");
|
|
|
|
}
|
2008-03-23 15:15:44 +01:00
|
|
|
|
|
|
|
// Replace "->" with "."
|
2008-11-16 16:58:52 +01:00
|
|
|
for ( TOKEN *tok = _tokens; tok; tok = tok->next )
|
2008-03-23 15:15:44 +01:00
|
|
|
{
|
|
|
|
if ( strcmp(tok->str, "->") == 0 )
|
|
|
|
{
|
2008-11-06 19:31:39 +01:00
|
|
|
tok->setstr(".");
|
2008-03-23 15:15:44 +01:00
|
|
|
}
|
|
|
|
}
|
2008-08-28 08:37:11 +02:00
|
|
|
|
|
|
|
// typedef..
|
2008-11-16 16:58:52 +01:00
|
|
|
for ( TOKEN *tok = _tokens; tok; tok = tok->next )
|
2008-08-28 08:37:11 +02:00
|
|
|
{
|
2008-11-21 22:14:24 +01:00
|
|
|
if (Tokenizer::Match(tok, "typedef %type% %type% ;"))
|
2008-08-28 08:37:11 +02:00
|
|
|
{
|
|
|
|
const char *type1 = getstr(tok, 1);
|
|
|
|
const char *type2 = getstr(tok, 2);
|
|
|
|
for ( TOKEN *tok2 = tok; tok2; tok2 = tok2->next )
|
|
|
|
{
|
2008-09-11 20:37:36 +02:00
|
|
|
if (tok2->str!=type1 && tok2->str!=type2 && strcmp(tok2->str,type2)==0)
|
2008-08-28 08:37:11 +02:00
|
|
|
{
|
2008-11-06 19:31:39 +01:00
|
|
|
tok2->setstr(type1);
|
2008-08-28 08:37:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
else if (Tokenizer::Match(tok, "typedef %type% %type% %type% ;"))
|
2008-08-28 08:37:11 +02:00
|
|
|
{
|
|
|
|
const char *type1 = getstr(tok, 1);
|
|
|
|
const char *type2 = getstr(tok, 2);
|
|
|
|
const char *type3 = getstr(tok, 3);
|
2008-10-19 08:21:01 +02:00
|
|
|
|
|
|
|
TOKEN *tok2 = tok;
|
2008-11-21 22:14:24 +01:00
|
|
|
while ( ! Tokenizer::Match(tok2, ";") )
|
2008-10-19 08:21:01 +02:00
|
|
|
tok2 = tok2->next;
|
|
|
|
|
|
|
|
for ( ; tok2; tok2 = tok2->next )
|
2008-08-28 08:37:11 +02:00
|
|
|
{
|
|
|
|
if (tok2->str!=type3 && strcmp(tok2->str,type3)==0)
|
|
|
|
{
|
2008-11-06 19:31:39 +01:00
|
|
|
tok2->setstr(type1);
|
2008-08-28 08:37:11 +02:00
|
|
|
|
|
|
|
TOKEN *newtok = new TOKEN;
|
2008-11-06 19:31:39 +01:00
|
|
|
newtok->setstr(type2);
|
2008-08-28 08:37:11 +02:00
|
|
|
newtok->FileIndex = tok2->FileIndex;
|
|
|
|
newtok->linenr = tok2->linenr;
|
|
|
|
newtok->next = tok2->next;
|
|
|
|
tok2->next = newtok;
|
2008-10-19 08:21:01 +02:00
|
|
|
tok2 = newtok;
|
2008-08-28 08:37:11 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-11-05 08:28:40 +01:00
|
|
|
|
|
|
|
|
|
|
|
// Remove __asm..
|
2008-11-16 16:58:52 +01:00
|
|
|
for ( TOKEN *tok = _tokens; tok; tok = tok->next )
|
2008-11-05 08:28:40 +01:00
|
|
|
{
|
2008-11-21 22:14:24 +01:00
|
|
|
if ( Tokenizer::Match(tok->next, "__asm {") )
|
2008-11-05 08:28:40 +01:00
|
|
|
{
|
|
|
|
while ( tok->next )
|
|
|
|
{
|
2008-11-21 22:14:24 +01:00
|
|
|
bool last = Tokenizer::Match( tok->next, "}" );
|
2008-11-05 08:28:40 +01:00
|
|
|
|
|
|
|
// Unlink and delete tok->next
|
|
|
|
TOKEN *next = tok->next;
|
|
|
|
tok->next = tok->next->next;
|
|
|
|
delete next;
|
|
|
|
|
|
|
|
// break if this was the last token to delete..
|
|
|
|
if (last)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-05-29 08:24:36 +02:00
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2007-05-24 07:40:45 +02:00
|
|
|
|
2007-05-29 08:24:36 +02:00
|
|
|
|
|
|
|
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
// Simplify token list
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
2008-11-09 08:19:53 +01:00
|
|
|
void Tokenizer::SimplifyTokenList()
|
2007-05-29 08:24:36 +02:00
|
|
|
{
|
2008-02-20 19:20:59 +01:00
|
|
|
|
|
|
|
// Remove the keyword 'unsigned'
|
2008-11-16 16:58:52 +01:00
|
|
|
for ( TOKEN *tok = _tokens; tok; tok = tok->next )
|
2008-02-20 19:20:59 +01:00
|
|
|
{
|
|
|
|
if (tok->next && strcmp(tok->next->str,"unsigned")==0)
|
|
|
|
{
|
|
|
|
DeleteNextToken( tok );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-05-24 07:40:45 +02:00
|
|
|
// Replace constants..
|
2008-11-16 16:58:52 +01:00
|
|
|
for (TOKEN *tok = _tokens; tok; tok = tok->next)
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
2008-11-21 22:14:24 +01:00
|
|
|
if (Tokenizer::Match(tok,"const %type% %var% = %num% ;"))
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
2007-05-28 08:17:18 +02:00
|
|
|
const char *sym = getstr(tok,2);
|
|
|
|
const char *num = getstr(tok,4);
|
|
|
|
|
2008-03-22 12:46:06 +01:00
|
|
|
for (TOKEN *tok2 = _gettok(tok,6); tok2; tok2 = tok2->next)
|
2007-05-24 07:40:45 +02:00
|
|
|
{
|
|
|
|
if (strcmp(tok2->str,sym) == 0)
|
|
|
|
{
|
2008-11-06 19:31:39 +01:00
|
|
|
tok2->setstr(num);
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-05-25 08:50:16 +02:00
|
|
|
}
|
|
|
|
|
2007-05-28 08:17:18 +02:00
|
|
|
|
|
|
|
// Fill the map TypeSize..
|
|
|
|
TypeSize.clear();
|
|
|
|
TypeSize["char"] = sizeof(char);
|
|
|
|
TypeSize["short"] = sizeof(short);
|
|
|
|
TypeSize["int"] = sizeof(int);
|
|
|
|
TypeSize["long"] = sizeof(long);
|
|
|
|
TypeSize["float"] = sizeof(float);
|
|
|
|
TypeSize["double"] = sizeof(double);
|
2008-11-16 16:58:52 +01:00
|
|
|
for (TOKEN *tok = _tokens; tok; tok = tok->next)
|
2007-05-25 08:50:16 +02:00
|
|
|
{
|
2008-11-21 22:14:24 +01:00
|
|
|
if (Tokenizer::Match(tok,"class %var%"))
|
2007-05-28 08:17:18 +02:00
|
|
|
{
|
|
|
|
TypeSize[getstr(tok,1)] = 11;
|
|
|
|
}
|
2007-05-25 08:50:16 +02:00
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
else if (Tokenizer::Match(tok, "struct %var%"))
|
2007-05-25 08:50:16 +02:00
|
|
|
{
|
2007-05-28 08:17:18 +02:00
|
|
|
TypeSize[getstr(tok,1)] = 13;
|
2007-05-25 08:50:16 +02:00
|
|
|
}
|
2007-05-28 08:17:18 +02:00
|
|
|
}
|
|
|
|
|
2007-05-25 08:50:16 +02:00
|
|
|
|
2007-05-28 08:17:18 +02:00
|
|
|
// Replace 'sizeof(type)'..
|
2008-11-16 16:58:52 +01:00
|
|
|
for (TOKEN *tok = _tokens; tok; tok = tok->next)
|
2007-05-28 08:17:18 +02:00
|
|
|
{
|
|
|
|
if (strcmp(tok->str,"sizeof") != 0)
|
|
|
|
continue;
|
2007-05-25 08:50:16 +02:00
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
if (Tokenizer::Match(tok, "sizeof ( %type% * )"))
|
2007-05-25 08:50:16 +02:00
|
|
|
{
|
2008-11-06 19:31:39 +01:00
|
|
|
std::ostringstream str;
|
2007-05-28 08:17:18 +02:00
|
|
|
// 'sizeof(type *)' has the same size as 'sizeof(char *)'
|
2008-09-11 19:03:58 +02:00
|
|
|
str << sizeof(char *);
|
2008-11-06 19:31:39 +01:00
|
|
|
tok->setstr( str.str().c_str() );
|
2007-05-24 07:40:45 +02:00
|
|
|
|
2007-05-25 08:50:16 +02:00
|
|
|
for (int i = 0; i < 4; i++)
|
|
|
|
{
|
2007-05-28 08:17:18 +02:00
|
|
|
DeleteNextToken(tok);
|
2007-05-25 08:50:16 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
else if (Tokenizer::Match(tok, "sizeof ( %type% )"))
|
2007-05-25 08:50:16 +02:00
|
|
|
{
|
|
|
|
const char *type = getstr(tok, 2);
|
2007-05-28 08:17:18 +02:00
|
|
|
int size = SizeOfType(type);
|
|
|
|
if (size > 0)
|
2007-05-25 08:50:16 +02:00
|
|
|
{
|
2008-11-06 19:31:39 +01:00
|
|
|
std::ostringstream str;
|
|
|
|
str << size;
|
|
|
|
tok->setstr( str.str().c_str() );
|
2007-05-28 08:17:18 +02:00
|
|
|
for (int i = 0; i < 3; i++)
|
|
|
|
{
|
|
|
|
DeleteNextToken(tok);
|
|
|
|
}
|
2007-05-25 08:50:16 +02:00
|
|
|
}
|
2008-11-20 20:18:55 +01:00
|
|
|
}
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
else if (Tokenizer::Match(tok, "sizeof ( * %var% )"))
|
2008-11-20 20:18:55 +01:00
|
|
|
{
|
|
|
|
tok->setstr("100");
|
|
|
|
for ( int i = 0; i < 4; ++i )
|
|
|
|
DeleteNextToken(tok);
|
2007-05-25 08:50:16 +02:00
|
|
|
}
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
2007-05-26 08:44:28 +02:00
|
|
|
|
2007-05-28 08:17:18 +02:00
|
|
|
// Replace 'sizeof(var)'
|
2008-11-16 16:58:52 +01:00
|
|
|
for (TOKEN *tok = _tokens; tok; tok = tok->next)
|
2007-05-26 08:44:28 +02:00
|
|
|
{
|
2007-05-28 08:17:18 +02:00
|
|
|
// type array [ num ] ;
|
2008-11-21 22:14:24 +01:00
|
|
|
if ( ! Tokenizer::Match(tok, "%type% %var% [ %num% ] ;") )
|
2007-05-26 08:44:28 +02:00
|
|
|
continue;
|
|
|
|
|
2007-05-28 08:17:18 +02:00
|
|
|
int size = SizeOfType(tok->str);
|
2007-05-26 08:44:28 +02:00
|
|
|
if (size <= 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
const char *varname = getstr(tok, 1);
|
|
|
|
int total_size = size * atoi( getstr(tok, 3) );
|
|
|
|
|
|
|
|
// Replace 'sizeof(var)' with number
|
|
|
|
int indentlevel = 0;
|
2008-03-22 12:46:06 +01:00
|
|
|
for ( TOKEN *tok2 = _gettok(tok,5); tok2; tok2 = tok2->next )
|
2007-05-26 08:44:28 +02:00
|
|
|
{
|
|
|
|
if (tok2->str[0] == '{')
|
|
|
|
{
|
|
|
|
indentlevel++;
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (tok2->str[0] == '}')
|
|
|
|
{
|
|
|
|
indentlevel--;
|
|
|
|
if (indentlevel < 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
// Todo: Tokenizer::Match varname directly
|
|
|
|
else if (Tokenizer::Match(tok2, "sizeof ( %var% )"))
|
2007-05-26 08:44:28 +02:00
|
|
|
{
|
|
|
|
if (strcmp(getstr(tok2,2), varname) == 0)
|
|
|
|
{
|
2008-11-06 19:31:39 +01:00
|
|
|
std::ostringstream str;
|
2008-09-11 19:03:58 +02:00
|
|
|
str << total_size;
|
2008-11-06 19:31:39 +01:00
|
|
|
tok2->setstr(str.str().c_str());
|
2007-05-26 08:44:28 +02:00
|
|
|
// Delete the other tokens..
|
|
|
|
for (int i = 0; i < 3; i++)
|
|
|
|
{
|
2007-05-28 08:17:18 +02:00
|
|
|
DeleteNextToken(tok2);
|
2007-05-26 08:44:28 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2007-05-29 08:24:36 +02:00
|
|
|
|
|
|
|
|
2007-05-26 08:44:28 +02:00
|
|
|
// Simple calculations..
|
2008-11-22 18:53:22 +01:00
|
|
|
for ( bool done = false; !done; done = true )
|
2007-05-26 08:44:28 +02:00
|
|
|
{
|
2008-11-16 16:58:52 +01:00
|
|
|
for (TOKEN *tok = _tokens; tok; tok = tok->next)
|
2007-05-26 08:44:28 +02:00
|
|
|
{
|
2008-11-21 22:14:24 +01:00
|
|
|
if (Tokenizer::Match(tok->next, "* 1") || Tokenizer::Match(tok->next, "1 *"))
|
2007-05-26 08:44:28 +02:00
|
|
|
{
|
2007-05-28 08:17:18 +02:00
|
|
|
for (int i = 0; i < 2; i++)
|
|
|
|
DeleteNextToken(tok);
|
2007-05-26 08:44:28 +02:00
|
|
|
done = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// (1-2)
|
|
|
|
if (strchr("[,(=<>",tok->str[0]) &&
|
2008-11-21 22:14:24 +01:00
|
|
|
Tokenizer::IsNumber(getstr(tok,1)) &&
|
2007-05-26 08:44:28 +02:00
|
|
|
strchr("+-*/",*(getstr(tok,2))) &&
|
2008-11-21 22:14:24 +01:00
|
|
|
Tokenizer::IsNumber(getstr(tok,3)) &&
|
2007-05-26 08:44:28 +02:00
|
|
|
strchr("],);=<>",*(getstr(tok,4))) )
|
|
|
|
{
|
|
|
|
int i1 = atoi(getstr(tok,1));
|
|
|
|
int i2 = atoi(getstr(tok,3));
|
2008-03-19 18:09:51 +01:00
|
|
|
if ( i2 == 0 && *(getstr(tok,2)) == '/' )
|
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2007-05-26 08:44:28 +02:00
|
|
|
switch (*(getstr(tok,2)))
|
|
|
|
{
|
|
|
|
case '+': i1 += i2; break;
|
|
|
|
case '-': i1 -= i2; break;
|
|
|
|
case '*': i1 *= i2; break;
|
|
|
|
case '/': i1 /= i2; break;
|
|
|
|
}
|
|
|
|
tok = tok->next;
|
2008-11-06 19:31:39 +01:00
|
|
|
std::ostringstream str;
|
2008-09-11 19:03:58 +02:00
|
|
|
str << i1;
|
2008-11-06 19:31:39 +01:00
|
|
|
tok->setstr(str.str().c_str());
|
2007-05-26 08:44:28 +02:00
|
|
|
for (int i = 0; i < 2; i++)
|
|
|
|
{
|
2007-05-28 08:17:18 +02:00
|
|
|
DeleteNextToken(tok);
|
2007-05-26 08:44:28 +02:00
|
|
|
}
|
2008-03-22 12:46:06 +01:00
|
|
|
|
2008-03-19 18:09:51 +01:00
|
|
|
done = false;
|
2007-05-26 08:44:28 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2007-05-28 12:34:18 +02:00
|
|
|
|
|
|
|
|
|
|
|
// Replace "*(str + num)" => "str[num]"
|
2008-11-16 16:58:52 +01:00
|
|
|
for (TOKEN *tok = _tokens; tok; tok = tok->next)
|
2007-05-28 12:34:18 +02:00
|
|
|
{
|
|
|
|
if ( ! strchr(";{}(=<>", tok->str[0]) )
|
|
|
|
continue;
|
|
|
|
|
|
|
|
TOKEN *next = tok->next;
|
|
|
|
if ( ! next )
|
|
|
|
break;
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
if (Tokenizer::Match(next, "* ( %var% + %num% )"))
|
2007-05-28 12:34:18 +02:00
|
|
|
{
|
|
|
|
const char *str[4] = {"var","[","num","]"};
|
|
|
|
str[0] = getstr(tok,3);
|
|
|
|
str[2] = getstr(tok,5);
|
|
|
|
|
|
|
|
for (int i = 0; i < 4; i++)
|
|
|
|
{
|
|
|
|
tok = tok->next;
|
2008-11-06 19:31:39 +01:00
|
|
|
tok->setstr(str[i]);
|
2007-05-28 12:34:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
DeleteNextToken(tok);
|
|
|
|
DeleteNextToken(tok);
|
|
|
|
}
|
|
|
|
}
|
2007-05-29 08:24:36 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Split up variable declarations if possible..
|
2008-11-16 16:58:52 +01:00
|
|
|
for (TOKEN *tok = _tokens; tok; tok = tok->next)
|
2007-05-29 08:24:36 +02:00
|
|
|
{
|
|
|
|
if ( ! strchr("{};", tok->str[0]) )
|
|
|
|
continue;
|
|
|
|
|
|
|
|
TOKEN *type0 = tok->next;
|
2008-11-21 22:14:24 +01:00
|
|
|
if (!Tokenizer::Match(type0, "%type%"))
|
2008-11-02 11:33:38 +01:00
|
|
|
continue;
|
2008-11-21 22:14:24 +01:00
|
|
|
if (Tokenizer::Match(type0, "else") || Tokenizer::Match(type0, "return"))
|
2007-06-02 18:32:07 +02:00
|
|
|
continue;
|
2007-05-29 08:24:36 +02:00
|
|
|
|
|
|
|
TOKEN *tok2 = NULL;
|
|
|
|
unsigned int typelen = 0;
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
if ( Tokenizer::Match(type0, "%type% %var% ,") )
|
2007-05-29 08:24:36 +02:00
|
|
|
{
|
2008-03-22 12:46:06 +01:00
|
|
|
tok2 = _gettok(type0, 2); // The ',' token
|
2007-05-29 08:24:36 +02:00
|
|
|
typelen = 1;
|
|
|
|
}
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
else if ( Tokenizer::Match(type0, "%type% * %var% ,") )
|
2007-05-29 08:24:36 +02:00
|
|
|
{
|
2008-03-22 12:46:06 +01:00
|
|
|
tok2 = _gettok(type0, 3); // The ',' token
|
2007-05-29 19:11:53 +02:00
|
|
|
typelen = 1;
|
2007-05-29 08:24:36 +02:00
|
|
|
}
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
else if ( Tokenizer::Match(type0, "%type% %var% [ %num% ] ,") )
|
2007-05-29 08:24:36 +02:00
|
|
|
{
|
2008-03-22 12:46:06 +01:00
|
|
|
tok2 = _gettok(type0, 5); // The ',' token
|
2007-05-29 08:24:36 +02:00
|
|
|
typelen = 1;
|
|
|
|
}
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
else if ( Tokenizer::Match(type0, "%type% * %var% [ %num% ] ,") )
|
2007-05-29 08:24:36 +02:00
|
|
|
{
|
2008-03-22 12:46:06 +01:00
|
|
|
tok2 = _gettok(type0, 6); // The ',' token
|
2007-05-29 19:11:53 +02:00
|
|
|
typelen = 1;
|
|
|
|
}
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
else if ( Tokenizer::Match(type0, "struct %type% %var% ,") )
|
2007-05-29 19:11:53 +02:00
|
|
|
{
|
2008-03-22 12:46:06 +01:00
|
|
|
tok2 = _gettok(type0, 3);
|
2007-05-29 19:11:53 +02:00
|
|
|
typelen = 2;
|
|
|
|
}
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
else if ( Tokenizer::Match(type0, "struct %type% * %var% ,") )
|
2007-05-29 19:11:53 +02:00
|
|
|
{
|
2008-03-22 12:46:06 +01:00
|
|
|
tok2 = _gettok(type0, 4);
|
2007-05-29 08:24:36 +02:00
|
|
|
typelen = 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
else if ( Tokenizer::Match(type0, "%type% %var% =") )
|
2007-05-29 08:24:36 +02:00
|
|
|
{
|
2008-03-22 12:46:06 +01:00
|
|
|
tok2 = _gettok(type0, 2);
|
2007-05-29 08:24:36 +02:00
|
|
|
typelen = 1;
|
|
|
|
}
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
else if ( Tokenizer::Match(type0, "%type% * %var% =") )
|
2007-05-29 08:24:36 +02:00
|
|
|
{
|
2008-03-22 12:46:06 +01:00
|
|
|
tok2 = _gettok(type0, 3);
|
2007-05-29 19:11:53 +02:00
|
|
|
typelen = 1;
|
|
|
|
}
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
else if ( Tokenizer::Match(type0, "struct %type% * %var% =") )
|
2007-05-29 19:11:53 +02:00
|
|
|
{
|
2008-03-22 12:46:06 +01:00
|
|
|
tok2 = _gettok(type0, 4);
|
2007-05-29 08:24:36 +02:00
|
|
|
typelen = 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (tok2)
|
|
|
|
{
|
|
|
|
if (tok2->str[0] == ',')
|
|
|
|
{
|
2008-11-06 19:31:39 +01:00
|
|
|
tok2->setstr(";");
|
2007-05-29 08:24:36 +02:00
|
|
|
InsertTokens(tok2, type0, typelen);
|
|
|
|
}
|
|
|
|
|
|
|
|
else
|
|
|
|
{
|
|
|
|
TOKEN *eq = tok2;
|
|
|
|
|
|
|
|
int parlevel = 0;
|
|
|
|
while (tok2)
|
|
|
|
{
|
|
|
|
if ( strchr("{(", tok2->str[0]) )
|
|
|
|
{
|
|
|
|
parlevel++;
|
|
|
|
}
|
|
|
|
|
|
|
|
else if ( strchr("})", tok2->str[0]) )
|
|
|
|
{
|
|
|
|
if (parlevel<0)
|
|
|
|
break;
|
|
|
|
parlevel--;
|
|
|
|
}
|
|
|
|
|
|
|
|
else if ( parlevel==0 && strchr(";,",tok2->str[0]) )
|
|
|
|
{
|
|
|
|
// "type var =" => "type var; var ="
|
2008-03-22 12:46:06 +01:00
|
|
|
TOKEN *VarTok = _gettok(type0,typelen);
|
2007-05-29 19:11:53 +02:00
|
|
|
if (VarTok->str[0]=='*')
|
|
|
|
VarTok = VarTok->next;
|
|
|
|
InsertTokens(eq, VarTok, 2);
|
2008-11-06 19:31:39 +01:00
|
|
|
eq->setstr(";");
|
2007-05-29 08:24:36 +02:00
|
|
|
|
|
|
|
// "= x, " => "= x; type "
|
|
|
|
if (tok2->str[0] == ',')
|
|
|
|
{
|
2008-11-06 19:31:39 +01:00
|
|
|
tok2->setstr(";");
|
2007-05-29 08:24:36 +02:00
|
|
|
InsertTokens( tok2, type0, typelen );
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
tok2 = tok2->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-11-20 20:18:55 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Replace NULL with 0..
|
|
|
|
for ( TOKEN *tok = _tokens; tok; tok = tok->next )
|
|
|
|
{
|
2008-11-21 22:14:24 +01:00
|
|
|
if ( Tokenizer::Match(tok, "NULL") )
|
2008-11-20 20:18:55 +01:00
|
|
|
tok->setstr("0");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Replace pointer casts of 0.. "(char *)0" => "0"
|
|
|
|
for ( TOKEN *tok = _tokens; tok; tok = tok->next )
|
|
|
|
{
|
2008-11-21 22:14:24 +01:00
|
|
|
if ( Tokenizer::Match(tok->next, "( %type% * ) 0") || Tokenizer::Match(tok->next,"( %type% %type% * ) 0") )
|
2008-11-20 20:18:55 +01:00
|
|
|
{
|
2008-11-21 22:14:24 +01:00
|
|
|
while (!Tokenizer::Match(tok->next,"0"))
|
2008-11-20 20:18:55 +01:00
|
|
|
DeleteNextToken(tok);
|
|
|
|
}
|
2008-11-22 18:53:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
for ( bool done = false; !done; done = true)
|
|
|
|
{
|
|
|
|
done &= simplifyConditions();
|
|
|
|
};
|
2007-05-24 07:40:45 +02:00
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
2008-11-22 18:53:22 +01:00
|
|
|
bool Tokenizer::simplifyConditions()
|
|
|
|
{
|
|
|
|
bool ret = true;
|
|
|
|
|
|
|
|
for ( TOKEN *tok = _tokens; tok; tok = tok->next )
|
|
|
|
{
|
|
|
|
if (Match(tok, "( true &&") || Match(tok, "&& true &&") || Match(tok->next, "&& true )"))
|
|
|
|
{
|
|
|
|
DeleteNextToken( tok );
|
|
|
|
DeleteNextToken( tok );
|
|
|
|
ret = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (Match(tok, "( false ||") || Match(tok, "|| false ||") || Match(tok->next, "|| false )"))
|
|
|
|
{
|
|
|
|
DeleteNextToken( tok );
|
|
|
|
DeleteNextToken( tok );
|
|
|
|
ret = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Change numeric constant in condition to "true" or "false"
|
|
|
|
const TOKEN *tok2 = gettok(tok, 2);
|
|
|
|
if ((Match(tok, "(") || Match(tok, "&&") || Match(tok, "||")) &&
|
|
|
|
Match(tok->next, "%num%") &&
|
|
|
|
(Match(tok2, ")") || Match(tok2, "&&") || Match(tok2, "||")) )
|
|
|
|
{
|
|
|
|
tok->next->setstr((strcmp(tok->next->str, "0")!=0) ? "true" : "false");
|
|
|
|
ret = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
2007-05-24 07:40:45 +02:00
|
|
|
|
2007-05-24 15:07:30 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2007-05-29 08:24:36 +02:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2007-05-24 15:07:30 +02:00
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
// Helper functions for handling the tokens list
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
2008-11-09 08:19:53 +01:00
|
|
|
const TOKEN *Tokenizer::findtoken(const TOKEN *tok1, const char *tokenstr[])
|
2007-05-24 15:07:30 +02:00
|
|
|
{
|
2008-03-22 12:46:06 +01:00
|
|
|
for (const TOKEN *ret = tok1; ret; ret = ret->next)
|
2007-05-24 15:07:30 +02:00
|
|
|
{
|
|
|
|
unsigned int i = 0;
|
2008-03-22 12:46:06 +01:00
|
|
|
const TOKEN *tok = ret;
|
2007-05-24 15:07:30 +02:00
|
|
|
while (tokenstr[i])
|
|
|
|
{
|
|
|
|
if (!tok)
|
|
|
|
return NULL;
|
|
|
|
if (*(tokenstr[i]) && strcmp(tokenstr[i],tok->str))
|
|
|
|
break;
|
|
|
|
tok = tok->next;
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
if (!tokenstr[i])
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
2008-11-09 08:19:53 +01:00
|
|
|
const TOKEN *Tokenizer::gettok(const TOKEN *tok, int index)
|
2007-05-24 15:07:30 +02:00
|
|
|
{
|
|
|
|
while (tok && index>0)
|
|
|
|
{
|
|
|
|
tok = tok->next;
|
|
|
|
index--;
|
|
|
|
}
|
|
|
|
return tok;
|
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
2008-11-09 08:19:53 +01:00
|
|
|
const char *Tokenizer::getstr(const TOKEN *tok, int index)
|
2007-05-24 15:07:30 +02:00
|
|
|
{
|
|
|
|
tok = gettok(tok, index);
|
|
|
|
return tok ? tok->str : "";
|
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
|
2007-05-29 08:24:36 +02:00
|
|
|
|
2007-05-29 19:11:53 +02:00
|
|
|
|
2007-05-29 08:24:36 +02:00
|
|
|
|
2008-11-20 20:18:55 +01:00
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
const TOKEN *Tokenizer::GetFunctionTokenByName( const char funcname[] ) const
|
|
|
|
{
|
|
|
|
for ( unsigned int i = 0; i < FunctionList.size(); ++i )
|
|
|
|
{
|
|
|
|
if ( strcmp( FunctionList[i]->str, funcname ) == 0 )
|
|
|
|
{
|
|
|
|
return FunctionList[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void Tokenizer::FillFunctionList(const unsigned int file_id)
|
|
|
|
{
|
|
|
|
FunctionList.clear();
|
|
|
|
|
|
|
|
bool staticfunc = false;
|
|
|
|
bool classfunc = false;
|
|
|
|
|
|
|
|
int indentlevel = 0;
|
|
|
|
for ( const TOKEN *tok = _tokens; tok; tok = tok->next )
|
|
|
|
{
|
|
|
|
if ( tok->str[0] == '{' )
|
|
|
|
indentlevel++;
|
|
|
|
|
|
|
|
else if ( tok->str[0] == '}' )
|
|
|
|
indentlevel--;
|
|
|
|
|
|
|
|
if (indentlevel > 0)
|
|
|
|
{
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (strchr("};", tok->str[0]))
|
|
|
|
staticfunc = classfunc = false;
|
|
|
|
|
|
|
|
else if ( strcmp( tok->str, "static" ) == 0 )
|
|
|
|
staticfunc = true;
|
|
|
|
|
|
|
|
else if ( strcmp( tok->str, "::" ) == 0 )
|
|
|
|
classfunc = true;
|
|
|
|
|
2008-11-21 22:14:24 +01:00
|
|
|
else if (Tokenizer::Match(tok, "%var% ("))
|
2008-11-20 20:18:55 +01:00
|
|
|
{
|
|
|
|
// Check if this is the first token of a function implementation..
|
|
|
|
for ( const TOKEN *tok2 = tok; tok2; tok2 = tok2->next )
|
|
|
|
{
|
|
|
|
if ( tok2->str[0] == ';' )
|
|
|
|
{
|
|
|
|
tok = tok2;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
else if ( tok2->str[0] == '{' )
|
|
|
|
{
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
else if ( tok2->str[0] == ')' )
|
|
|
|
{
|
2008-11-21 22:14:24 +01:00
|
|
|
if ( Tokenizer::Match(tok2, ") {") )
|
2008-11-20 20:18:55 +01:00
|
|
|
{
|
|
|
|
FunctionList.push_back( tok );
|
|
|
|
tok = tok2;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
tok = tok2;
|
|
|
|
while (tok->next && !strchr(";{", tok->next->str[0]))
|
|
|
|
tok = tok->next;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the FunctionList functions with duplicate names, remove them
|
|
|
|
// TODO this will need some better handling
|
|
|
|
for ( unsigned int func1 = 0; func1 < FunctionList.size(); )
|
|
|
|
{
|
|
|
|
bool hasDuplicates = false;
|
|
|
|
for ( unsigned int func2 = func1 + 1; func2 < FunctionList.size(); )
|
|
|
|
{
|
|
|
|
if ( strcmp(FunctionList[func1]->str, FunctionList[func2]->str) == 0 )
|
|
|
|
{
|
|
|
|
hasDuplicates = true;
|
|
|
|
FunctionList.erase( FunctionList.begin() + func2 );
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
++func2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ( ! hasDuplicates )
|
|
|
|
{
|
|
|
|
++func1;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
FunctionList.erase( FunctionList.begin() + func1 );
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
//---------------------------------------------------------------------------
|
2007-05-29 08:24:36 +02:00
|
|
|
|
2008-11-20 20:18:55 +01:00
|
|
|
void Tokenizer::settings( const Settings &settings )
|
|
|
|
{
|
|
|
|
_settings = settings;
|
|
|
|
}
|
2008-11-22 10:44:02 +01:00
|
|
|
|
|
|
|
// Deallocate lists..
|
|
|
|
void Tokenizer::DeallocateTokens()
|
|
|
|
{
|
|
|
|
deleteTokens( _tokens );
|
|
|
|
_tokens = 0;
|
|
|
|
tokens_back = 0;
|
|
|
|
|
|
|
|
while (dsymlist)
|
|
|
|
{
|
|
|
|
struct DefineSymbol *next = dsymlist->next;
|
|
|
|
free(dsymlist->name);
|
|
|
|
free(dsymlist->value);
|
|
|
|
delete dsymlist;
|
|
|
|
dsymlist = next;
|
|
|
|
}
|
|
|
|
|
|
|
|
Files.clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
void Tokenizer::deleteTokens(TOKEN *tok)
|
|
|
|
{
|
|
|
|
while (tok)
|
|
|
|
{
|
|
|
|
TOKEN *next = tok->next;
|
|
|
|
delete tok;
|
|
|
|
tok = next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
const char *Tokenizer::getParameterName( const TOKEN *ftok, int par )
|
|
|
|
{
|
|
|
|
int _par = 1;
|
|
|
|
for ( ; ftok; ftok = ftok->next)
|
|
|
|
{
|
|
|
|
if ( Tokenizer::Match(ftok, ",") )
|
|
|
|
++_par;
|
|
|
|
if ( par==_par && Tokenizer::Match(ftok, "%var% [,)]") )
|
|
|
|
return ftok->str;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
const TOKEN *Tokenizer::findmatch(const TOKEN *tok, const char pattern[], const char *varname1[], const char *varname2[])
|
|
|
|
{
|
|
|
|
for ( ; tok; tok = tok->next)
|
|
|
|
{
|
|
|
|
if ( Tokenizer::Match(tok, pattern, varname1, varname2) )
|
|
|
|
return tok;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
std::string Tokenizer::fileLine( const TOKEN *tok )
|
|
|
|
{
|
|
|
|
std::ostringstream ostr;
|
|
|
|
ostr << "[" << Files.at(tok->FileIndex) << ":" << tok->linenr << "]";
|
|
|
|
return ostr.str();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool Tokenizer::Match(const TOKEN *tok, const char pattern[], const char *varname1[], const char *varname2[])
|
|
|
|
{
|
|
|
|
if (!tok)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
const char *p = pattern;
|
|
|
|
while (*p)
|
|
|
|
{
|
|
|
|
// Skip spaces in pattern..
|
|
|
|
while ( *p == ' ' )
|
|
|
|
p++;
|
|
|
|
|
|
|
|
// Extract token from pattern..
|
|
|
|
char str[50];
|
|
|
|
char *s = str;
|
|
|
|
while (*p && *p!=' ')
|
|
|
|
{
|
|
|
|
*s = *p;
|
|
|
|
s++;
|
|
|
|
p++;
|
|
|
|
}
|
|
|
|
*s = 0;
|
|
|
|
|
|
|
|
// No token => Success!
|
|
|
|
if (str[0] == 0)
|
|
|
|
return true;
|
|
|
|
|
|
|
|
// Any symbolname..
|
|
|
|
if (strcmp(str,"%var%")==0 || strcmp(str,"%type%")==0)
|
|
|
|
{
|
|
|
|
if (!Tokenizer::IsName(tok->str))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Variable name..
|
|
|
|
else if (strcmp(str,"%var1%")==0 || strcmp(str,"%var2%")==0)
|
|
|
|
{
|
|
|
|
const char **varname = (strcmp(str,"%var1%")==0) ? varname1 : varname2;
|
|
|
|
|
|
|
|
if ( ! varname )
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (strcmp(tok->str, varname[0]) != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for ( int i = 1; varname[i]; i++ )
|
|
|
|
{
|
|
|
|
if ( ! Tokenizer::gettok(tok, 2) )
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if ( strcmp(Tokenizer::getstr(tok, 1), ".") )
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if ( strcmp(Tokenizer::getstr(tok, 2), varname[i]) )
|
|
|
|
return false;
|
|
|
|
|
|
|
|
tok = Tokenizer::gettok(tok, 2);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (strcmp(str,"%num%")==0)
|
|
|
|
{
|
|
|
|
if ( ! Tokenizer::IsNumber(tok->str) )
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
else if (strcmp(str,"%str%")==0)
|
|
|
|
{
|
|
|
|
if ( tok->str[0] != '\"' )
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// [.. => search for a one-character token..
|
|
|
|
else if (str[0]=='[' && strchr(str, ']') && tok->str[1] == 0)
|
|
|
|
{
|
|
|
|
*strrchr(str, ']') = 0;
|
|
|
|
if ( strchr( str + 1, tok->str[0] ) == 0 )
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
else if (strcmp(str, tok->str) != 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
tok = tok->next;
|
|
|
|
if (!tok && *p)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The end of the pattern has been reached and nothing wrong has been found
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
bool Tokenizer::SameFileName( const char fname1[], const char fname2[] )
|
|
|
|
{
|
|
|
|
#ifdef __linux__
|
|
|
|
return bool( strcmp(fname1, fname2) == 0 );
|
|
|
|
#endif
|
|
|
|
#ifdef __GNUC__
|
|
|
|
return bool( strcasecmp(fname1, fname2) == 0 );
|
|
|
|
#endif
|
|
|
|
#ifdef __BORLANDC__
|
|
|
|
return bool( stricmp(fname1, fname2) == 0 );
|
|
|
|
#endif
|
|
|
|
#ifdef _MSC_VER
|
|
|
|
return bool( _stricmp(fname1, fname2) == 0 );
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool Tokenizer::IsName(const char str[])
|
|
|
|
{
|
|
|
|
return bool(str[0]=='_' || isalpha(str[0]));
|
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
bool Tokenizer::IsNumber(const char str[])
|
|
|
|
{
|
|
|
|
return bool(isdigit(str[0]) != 0);
|
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|
|
|
|
|
|
|
|
bool Tokenizer::IsStandardType(const char str[])
|
|
|
|
{
|
|
|
|
if (!str)
|
|
|
|
return false;
|
|
|
|
bool Ret = false;
|
|
|
|
const char *type[] = {"bool","char","short","int","long","float","double",0};
|
|
|
|
for (int i = 0; type[i]; i++)
|
|
|
|
Ret |= (strcmp(str,type[i])==0);
|
|
|
|
return Ret;
|
|
|
|
}
|
|
|
|
//---------------------------------------------------------------------------
|