diff --git a/src/tokenize.cpp b/src/tokenize.cpp index e9188dc5e..bb1e7444c 100644 --- a/src/tokenize.cpp +++ b/src/tokenize.cpp @@ -808,6 +808,17 @@ void Tokenizer::simplifyTokenList() simplifyNamespaces(); + // Combine wide strings + for (Token *tok = _tokens; tok; tok = tok->next()) + { + while (tok->str() == "L" && tok->next() && tok->next()->str()[0] == '"') + { + // Combine 'L "string"' + tok->str(tok->next()->str().c_str()); + tok->deleteNext(); + } + } + // Combine strings for (Token *tok = _tokens; tok; tok = tok->next()) { diff --git a/test/testsimplifytokens.cpp b/test/testsimplifytokens.cpp index d6aacdfd9..3a6b973c4 100644 --- a/test/testsimplifytokens.cpp +++ b/test/testsimplifytokens.cpp @@ -91,6 +91,7 @@ private: // "if(0==x)" => "if(!x)" TEST_CASE(ifnot); + TEST_CASE(combine_wstrings); } std::string tok(const char code[]) @@ -212,6 +213,27 @@ private: ASSERT_EQUALS(tok(code2), tok(code1)); } + void combine_wstrings() + { + const char code1[] = "void foo()\n" + "{\n" + "const wchar_t *a =\n" + "{\n" + "L\"hello \"\n" + "L\"world\"\n" + "};\n" + "}\n"; + + const char code2[] = "void foo()\n" + "{\n" + "const wchar_t *a =\n" + "{\n" + "\"hello world\"\n" + "};\n" + "}\n"; + ASSERT_EQUALS(tok(code2), tok(code1)); + } + void double_plus() { {