From 1f1c65e232878bca88fe8551318c7bc02b90ee90 Mon Sep 17 00:00:00 2001 From: Florian Weimer Date: Mon, 17 Feb 2020 17:18:49 +0100 Subject: conform/conformtest.py: Extend tokenizer to cover character constants Such constants are used in __USE_EXTERN_INLINES blocks. --- conform/conformtest.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'conform') diff --git a/conform/conformtest.py b/conform/conformtest.py index 951e3b2420..cb2bd97eca 100644 --- a/conform/conformtest.py +++ b/conform/conformtest.py @@ -633,12 +633,11 @@ class HeaderTests(object): bad_tokens.discard(match.group(1)) continue # Tokenize the line and check identifiers found. The - # handling of strings does not allow for escaped - # quotes, no allowance is made for character - # constants, and hex floats may be wrongly split into - # tokens including identifiers, but this is sufficient - # in practice and matches the old perl script. - line = re.sub(r'"[^"]*"', '', line) + # handling of strings and character constants does not + # allow for escaped quotes, and hex floats may be + # wrongly split into tokens including identifiers, but + # this is sufficient in practice. + line = re.sub(r'(?:\bL)?(?:"[^"]*"|\'[^\']*\')', '', line) line = line.strip() for token in re.split(r'[^A-Za-z0-9_]+', line): if re.match(r'[A-Za-z_]', token): -- cgit 1.4.1