LEFT | RIGHT |
(no file at all) | |
1 #!/usr/bin/env python3 | 1 #!/usr/bin/env python3.2 |
2 | 2 |
3 # ***** BEGIN GPL LICENSE BLOCK ***** | 3 # ***** BEGIN GPL LICENSE BLOCK ***** |
4 # | 4 # |
5 # This program is free software; you can redistribute it and/or | 5 # This program is free software; you can redistribute it and/or |
6 # modify it under the terms of the GNU General Public License | 6 # modify it under the terms of the GNU General Public License |
7 # as published by the Free Software Foundation; either version 2 | 7 # as published by the Free Software Foundation; either version 2 |
8 # of the License, or (at your option) any later version. | 8 # of the License, or (at your option) any later version. |
9 # | 9 # |
10 # This program is distributed in the hope that it will be useful, | 10 # This program is distributed in the hope that it will be useful, |
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of | 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 # GNU General Public License for more details. | 13 # GNU General Public License for more details. |
14 # | 14 # |
15 # You should have received a copy of the GNU General Public License | 15 # You should have received a copy of the GNU General Public License |
16 # along with this program; if not, write to the Free Software Foundation, | 16 # along with this program; if not, write to the Free Software Foundation, |
17 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 17 # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
18 # | 18 # |
19 # Contributor(s): Campbell Barton | 19 # Contributor(s): Campbell Barton |
20 # | 20 # |
21 # #**** END GPL LICENSE BLOCK #**** | 21 # #**** END GPL LICENSE BLOCK #**** |
22 | 22 |
23 # <pep8 compliant> | 23 # <pep8 compliant> |
24 | 24 |
25 """ | 25 """ |
26 This script runs outside of blender and scans source | 26 This script runs outside of blender and scans source |
27 | 27 |
28 python3 source/tools/check_source_c.py source/ | 28 python3.2 source/tools/check_source_c.py source/ |
29 """ | 29 """ |
30 | 30 |
31 import os | |
32 | |
33 from check_style_c_config import IGNORE, IGNORE_DIR, SOURCE_DIR | |
34 IGNORE = tuple([os.path.join(SOURCE_DIR, ig) for ig in IGNORE]) | |
35 IGNORE_DIR = tuple([os.path.join(SOURCE_DIR, ig) for ig in IGNORE_DIR]) | |
36 WARN_TEXT = False | |
37 | |
38 | |
39 def is_ignore(f): | |
40 for ig in IGNORE: | |
41 if f == ig: | |
42 return True | |
43 for ig in IGNORE_DIR: | |
44 if f.startswith(ig): | |
45 return True | |
46 return False | |
47 | |
48 print("Scanning:", SOURCE_DIR) | |
49 | |
50 # TODO | 31 # TODO |
51 # | 32 # |
52 # Add checks for: | 33 # Add checks for: |
| 34 # - space around operators |
53 # - macro brace use | 35 # - macro brace use |
| 36 # - spaces around brackets with function call: func(args) |
| 37 # - braces with function definitions |
| 38 # - braces with typedefs |
54 # - line length - in a not-too-annoying way | 39 # - line length - in a not-too-annoying way |
55 # (allow for long arrays in struct definitions, PyMethodDef for eg) | 40 # (allow for long arrays in struct definitions, PyMethodDef for eg) |
56 | 41 |
57 from pygments import lex # highlight | 42 from pygments import highlight, lex |
58 from pygments.lexers import CLexer | 43 from pygments.lexers import CLexer |
59 from pygments.formatters import RawTokenFormatter | 44 from pygments.formatters import RawTokenFormatter |
60 | 45 |
61 from pygments.token import Token | 46 from pygments.token import Token |
62 | 47 |
63 import argparse | |
64 | |
65 PRINT_QTC_TASKFORMAT = False | 48 PRINT_QTC_TASKFORMAT = False |
66 if "USE_QTC_TASK" in os.environ: | |
67 PRINT_QTC_TASKFORMAT = True | |
68 | 49 |
69 TAB_SIZE = 4 | 50 TAB_SIZE = 4 |
70 LIN_SIZE = 120 | 51 LIN_SIZE = 120 |
71 | 52 |
72 global filepath | 53 global filepath |
73 tokens = [] | 54 tokens = [] |
74 | 55 |
75 | 56 |
76 # could store index here too, then have prev/next methods | 57 # could store index here too, then have prev/next methods |
77 class TokStore: | 58 class TokStore: |
78 __slots__ = ("type", "text", "line") | 59 __slots__ = ("type", "text", "line") |
79 | 60 |
80 def __init__(self, type, text, line): | 61 def __init__(self, type, text, line): |
81 self.type = type | 62 self.type = type |
82 self.text = text | 63 self.text = text |
83 self.line = line | 64 self.line = line |
84 | 65 |
85 | 66 |
86 def tk_range_to_str(a, b, expand_tabs=False): | 67 def tk_range_to_str(a, b): |
87 txt = "".join([tokens[i].text for i in range(a, b + 1)]) | 68 return "".join([tokens[i].text for i in range(a, b + 1)]) |
88 if expand_tabs: | |
89 txt = txt.expandtabs(TAB_SIZE) | |
90 return txt | |
91 | 69 |
92 | 70 |
93 def tk_item_is_newline(tok): | 71 def tk_item_is_newline(tok): |
94 return tok.type == Token.Text and tok.text.strip("\t ") == "\n" | 72 return tok.type == Token.Text and tok.text == "\n" |
95 | 73 |
96 | 74 |
97 def tk_item_is_ws_newline(tok): | 75 def tk_item_is_ws_newline(tok): |
98 return (tok.text == "") or \ | 76 return (tok.type == Token.Text and tok.text.isspace()) or \ |
99 (tok.type == Token.Text and tok.text.isspace()) or \ | |
100 (tok.type in Token.Comment) | 77 (tok.type in Token.Comment) |
101 | 78 |
102 | 79 |
103 def tk_item_is_ws(tok): | 80 def tk_item_is_ws(tok): |
104 return (tok.text == "") or \ | 81 return (tok.type == Token.Text and tok.text != "\n" and tok.text.isspace())
or \ |
105 (tok.type == Token.Text and tok.text.strip("\t ") != "\n" and tok.tex
t.isspace()) or \ | |
106 (tok.type in Token.Comment) | 82 (tok.type in Token.Comment) |
107 | 83 |
108 | 84 |
109 # also skips comments | 85 # also skips comments |
110 def tk_advance_ws(index, direction): | 86 def tk_advance_ws(index, direction): |
111 while tk_item_is_ws(tokens[index + direction]) and index > 0: | 87 while tk_item_is_ws(tokens[index + direction]) and index > 0: |
112 index += direction | 88 index += direction |
113 return index | 89 return index |
114 | 90 |
115 | 91 |
116 def tk_advance_no_ws(index, direction): | |
117 index += direction | |
118 while tk_item_is_ws(tokens[index]) and index > 0: | |
119 index += direction | |
120 return index | |
121 | |
122 | |
123 def tk_advance_ws_newline(index, direction): | 92 def tk_advance_ws_newline(index, direction): |
124 while tk_item_is_ws_newline(tokens[index + direction]) and index > 0: | 93 while tk_item_is_ws_newline(tokens[index + direction]) and index > 0: |
125 index += direction | 94 index += direction |
126 return index + direction | 95 return index + direction |
127 | |
128 | |
129 def tk_advance_line_start(index): | |
130 """ Go the the first non-whitespace token of the line. | |
131 """ | |
132 while tokens[index].line == tokens[index - 1].line and index > 0: | |
133 index -= 1 | |
134 return tk_advance_no_ws(index, 1) | |
135 | |
136 | |
137 def tk_advance_line(index, direction): | |
138 line = tokens[index].line | |
139 while tokens[index + direction].line == line or tokens[index].text == "\n": | |
140 index += direction | |
141 return index | |
142 | 96 |
143 | 97 |
144 def tk_match_backet(index): | 98 def tk_match_backet(index): |
145 backet_start = tokens[index].text | 99 backet_start = tokens[index].text |
146 assert(tokens[index].type == Token.Punctuation) | 100 assert(tokens[index].type == Token.Punctuation) |
147 assert(backet_start in "[]{}()") | 101 assert(backet_start in "[]{}()") |
148 | 102 |
149 if tokens[index].text in "({[": | 103 if tokens[index].text in "({[": |
150 direction = 1 | 104 direction = 1 |
151 backet_end = {"(": ")", "[": "]", "{": "}"}[backet_start] | 105 backet_end = {"(": ")", "[": "]", "{": "}"}[backet_start] |
(...skipping 16 matching lines...) Expand all Loading... |
168 index_match += direction | 122 index_match += direction |
169 | 123 |
170 return index_match | 124 return index_match |
171 | 125 |
172 | 126 |
173 def tk_index_is_linestart(index): | 127 def tk_index_is_linestart(index): |
174 index_prev = tk_advance_ws_newline(index, -1) | 128 index_prev = tk_advance_ws_newline(index, -1) |
175 return tokens[index_prev].line < tokens[index].line | 129 return tokens[index_prev].line < tokens[index].line |
176 | 130 |
177 | 131 |
178 def extract_to_linestart(index): | |
179 ls = [] | |
180 line = tokens[index].line | |
181 index -= 1 | |
182 while index > 0 and tokens[index].line == line: | |
183 ls.append(tokens[index].text) | |
184 index -= 1 | |
185 | |
186 if index != 0: | |
187 ls.append(tokens[index].text.rsplit("\n", 1)[1]) | |
188 | |
189 ls.reverse() | |
190 return "".join(ls) | |
191 | |
192 | |
193 def extract_statement_if(index_kw): | 132 def extract_statement_if(index_kw): |
194 # assert(tokens[index_kw].text == "if") | 133 # assert(tokens[index_kw].text == "if") |
195 | 134 |
196 # seek back | 135 # seek back |
197 i = index_kw | 136 i = index_kw |
198 | 137 |
199 i_start = tk_advance_ws(index_kw - 1, direction=-1) | 138 i_start = tk_advance_ws(index_kw - 1, direction=-1) |
200 | 139 |
201 # seek forward | 140 # seek forward |
202 i_next = tk_advance_ws_newline(index_kw, direction=1) | 141 i_next = tk_advance_ws_newline(index_kw, direction=1) |
203 | 142 |
204 # print(tokens[i_next]) | 143 # print(tokens[i_next]) |
205 | 144 |
206 # ignore preprocessor | |
207 i_linestart = tk_advance_line_start(index_kw) | |
208 if tokens[i_linestart].text.startswith("#"): | |
209 return None | |
210 | |
211 if tokens[i_next].type != Token.Punctuation or tokens[i_next].text != "(": | 145 if tokens[i_next].type != Token.Punctuation or tokens[i_next].text != "(": |
212 warning("no '(' after '%s'" % tokens[index_kw].text, i_start, i_next) | 146 print("Error line: %d" % tokens[index_kw].line) |
| 147 print("if (\n" |
| 148 " ^\n" |
| 149 "" |
| 150 "Character not found, insetad found:") |
| 151 print(tk_range_to_str(i_start, i_next)) |
213 return None | 152 return None |
214 | 153 |
215 i_end = tk_match_backet(i_next) | 154 i_end = tk_match_backet(i_next) |
216 | 155 |
217 return (i_start, i_end) | 156 return (i_start, i_end) |
218 | 157 |
219 | 158 |
220 def extract_operator(index_op): | 159 def extract_operator(index_op): |
221 op_text = "" | 160 op_text = "" |
222 i = 0 | 161 i = 0 |
223 while tokens[index_op + i].type == Token.Operator: | 162 while tokens[index_op + i].type == Token.Operator: |
224 op_text += tokens[index_op + i].text | 163 op_text += tokens[index_op + i].text |
225 i += 1 | 164 i += 1 |
226 return op_text, index_op + (i - 1) | 165 return op_text, index_op + (i - 1) |
227 | 166 |
228 | 167 |
229 def extract_cast(index): | |
230 # to detect a cast is quite involved... sigh | |
231 # assert(tokens[index].text == "(") | |
232 | |
233 # TODO, comment within cast, but thats rare | |
234 i_start = index | |
235 i_end = tk_match_backet(index) | |
236 | |
237 # first check we are not '()' | |
238 if i_start + 1 == i_end: | |
239 return None | |
240 | |
241 # check we have punctuation before the cast | |
242 i = i_start - 1 | |
243 while tokens[i].text.isspace(): | |
244 i -= 1 | |
245 i_prev_no_ws = i | |
246 if tokens[i].type in {Token.Keyword, Token.Name}: | |
247 # avoids 'foo(bar)test' | |
248 # but not ' = (bar)test' | |
249 return None | |
250 | |
251 # validate types | |
252 tokens_cast = [tokens[i] for i in range(i_start + 1, i_end)] | |
253 for t in tokens_cast: | |
254 if t.type == Token.Keyword: | |
255 return None | |
256 elif t.type == Token.Operator and t.text != "*": | |
257 # prevent '(a + b)' | |
258 # note, we could have '(float(*)[1+2])' but this is unlikely | |
259 return None | |
260 elif t.type == Token.Punctuation and t.text not in '()[]': | |
261 # prevent '(a, b)' | |
262 return None | |
263 tokens_cast_strip = [] | |
264 for t in tokens_cast: | |
265 if t.type in Token.Comment: | |
266 pass | |
267 elif t.type == Token.Text and t.text.isspace(): | |
268 pass | |
269 else: | |
270 tokens_cast_strip.append(t) | |
271 # check token order and types | |
272 if not tokens_cast_strip: | |
273 return None | |
274 if tokens_cast_strip[0].type not in {Token.Name, Token.Type, Token.Keyword.T
ype}: | |
275 return None | |
276 t_prev = None | |
277 for t in tokens_cast_strip[1:]: | |
278 # prevent identifiers after the first: '(a b)' | |
279 if t.type in {Token.Keyword.Type, Token.Name, Token.Text}: | |
280 return None | |
281 # prevent: '(a * 4)' | |
282 # allow: '(a (*)[4])' | |
283 if t_prev is not None and t_prev.text == "*" and t.type != Token.Punctua
tion: | |
284 return None | |
285 t_prev = t | |
286 del t_prev | |
287 | |
288 # debug only | |
289 ''' | |
290 string = "".join(tokens[i].text for i in range(i_start, i_end + 1)) | |
291 #string = "".join(tokens[i].text for i in range(i_start + 1, i_end)) | |
292 #types = [tokens[i].type for i in range(i_start + 1, i_end)] | |
293 types = [t.type for t in tokens_cast_strip] | |
294 | |
295 print("STRING:", string) | |
296 print("TYPES: ", types) | |
297 print() | |
298 ''' | |
299 | |
300 return (i_start, i_end) | |
301 | |
302 | |
303 def warning(message, index_kw_start, index_kw_end): | 168 def warning(message, index_kw_start, index_kw_end): |
304 if PRINT_QTC_TASKFORMAT: | 169 if PRINT_QTC_TASKFORMAT: |
305 print("%s\t%d\t%s\t%s" % (filepath, tokens[index_kw_start].line, "commen
t", message)) | 170 print("%s\t%d\t%s\t%s" % (filepath, tokens[index_kw_start].line, "commen
t", message)) |
306 else: | 171 else: |
307 print("%s:%d: warning: %s" % (filepath, tokens[index_kw_start].line, mes
sage)) | 172 print("%s:%d: warning: %s" % (filepath, tokens[index_kw_start].line, mes
sage)) |
308 if WARN_TEXT: | |
309 print(tk_range_to_str(index_kw_start, index_kw_end, expand_tabs=True
)) | |
310 | |
311 | |
312 def warning_lineonly(message, line): | |
313 if PRINT_QTC_TASKFORMAT: | |
314 print("%s\t%d\t%s\t%s" % (filepath, line, "comment", message)) | |
315 else: | |
316 print("%s:%d: warning: %s" % (filepath, line, message)) | |
317 | 173 |
318 # print(tk_range_to_str(index_kw_start, index_kw_end)) | 174 # print(tk_range_to_str(index_kw_start, index_kw_end)) |
319 | 175 |
320 | 176 |
321 # ------------------------------------------------------------------ | 177 # ------------------------------------------------------------------ |
322 # Own Blender rules here! | 178 # Own Blender rules here! |
323 | 179 |
324 def blender_check_kw_if(index_kw_start, index_kw, index_kw_end): | 180 def blender_check_kw_if(index_kw_start, index_kw, index_kw_end): |
325 | 181 |
326 # check if we have: 'if(' | 182 # check if we have: 'if(' |
327 if not tk_item_is_ws(tokens[index_kw + 1]): | 183 if not tk_item_is_ws(tokens[index_kw + 1]): |
328 warning("no white space between '%s('" % tokens[index_kw].text, index_kw
_start, index_kw_end) | 184 warning("no white space between '%s('" % tokens[index_kw].text, index_kw
_start, index_kw_end) |
329 | 185 |
330 # check for: ){ | 186 # check for: ){ |
331 index_next = tk_advance_ws_newline(index_kw_end, 1) | 187 index_next = tk_advance_ws_newline(index_kw_end, 1) |
332 if tokens[index_next].type == Token.Punctuation and tokens[index_next].text
== "{": | 188 if tokens[index_next].type == Token.Punctuation and tokens[index_next].text
== "{": |
333 if not tk_item_is_ws(tokens[index_next - 1]): | 189 if not tk_item_is_ws(tokens[index_kw + 1]): |
334 warning("no white space between trailing bracket '%s (){'" % tokens[
index_kw].text, index_kw_start, index_kw_end) | 190 warning("no white space between trailing bracket '%s (){'" % tokens[
index_kw].text, index_kw_start, index_kw_end) |
335 | 191 |
336 # check for: if () | 192 # check for: if () |
337 # { | 193 # { |
338 # note: if the if statement is multi-line we allow it | 194 # note: if the if statement is multi-line we allow it |
339 if ((tokens[index_kw].line == tokens[index_kw_end].line) and | 195 if ((tokens[index_kw].line == tokens[index_kw_end].line) and |
340 (tokens[index_kw].line == tokens[index_next].line - 1)): | 196 (tokens[index_kw].line == tokens[index_next].line - 1)): |
341 | 197 |
342 warning("if body brace on a new line '%s ()\\n{'" % tokens[index_kw]
.text, index_kw, index_kw_end) | 198 warning("if body brace on a new line '%s ()\\n{'" % tokens[index_kw]
.text, index_kw, index_kw_end) |
343 else: | 199 else: |
344 # no '{' on a multi-line if | 200 # no '{' on a multi-line if |
345 if tokens[index_kw].line != tokens[index_kw_end].line: | 201 if tokens[index_kw].line != tokens[index_kw_end].line: |
346 warning("multi-line if should use a brace '%s (\\n\\n) statement;'"
% tokens[index_kw].text, index_kw, index_kw_end) | 202 warning("multi-line if should use a brace '%s (\\n\\n) statement;'"
% tokens[index_kw].text, index_kw, index_kw_end) |
347 | 203 |
348 # check for: if (a && | |
349 # b) { ... | |
350 # brace should be on a newline. | |
351 if (tokens[index_kw].line != tokens[index_kw_end].line): | |
352 if tokens[index_kw_end].line == tokens[index_next].line: | |
353 warning("multi-line should use a on a new line '%s (\\n\\n) {'" % to
kens[index_kw].text, index_kw, index_kw_end) | |
354 | |
355 # check for: if () { ... }; | |
356 # | |
357 # no need to have semicolon after brace. | |
358 if tokens[index_next].text == "{": | |
359 index_final = tk_match_backet(index_next) | |
360 index_final_step = tk_advance_no_ws(index_final, 1) | |
361 if tokens[index_final_step].text == ";": | |
362 warning("semi-colon after brace '%s () { ... };'" % tokens[index_kw]
.text, index_final_step, index_final_step) | |
363 | |
364 | 204 |
365 def blender_check_kw_else(index_kw): | 205 def blender_check_kw_else(index_kw): |
366 # for 'else if' use the if check. | 206 # for 'else if' use the if check. |
367 i_next = tk_advance_ws_newline(index_kw, 1) | 207 i_next = tk_advance_ws_newline(index_kw, 1) |
368 | 208 |
369 # check there is at least one space between: | 209 # check there is at least one space between: |
370 # else{ | 210 # else{ |
371 if index_kw + 1 == i_next: | 211 if index_kw + 1 == i_next: |
372 warning("else has no space between following brace 'else{'", index_kw, i
_next) | 212 warning("else has no space between following brace 'else{'", index_kw, i
_next) |
373 | 213 |
374 # check if there are more than 1 spaces after else, but nothing after the fo
llowing brace | 214 # check if there are more then 1 spaces after else, but nothing after the fo
llowing brace |
375 # else { | 215 # else { |
376 # ... | 216 # ... |
377 # | 217 # |
378 # check for this case since this is needed sometimes: | 218 # check for this case since this is needed sometimes: |
379 # else { a = 1; } | 219 # else { a = 1; } |
380 if ((tokens[index_kw].line == tokens[i_next].line) and | 220 if ((tokens[index_kw].line == tokens[i_next].line) and |
381 (tokens[index_kw + 1].type == Token.Text) and | 221 (tokens[index_kw + 1].type == Token.Text) and |
382 (len(tokens[index_kw + 1].text) > 1) and | 222 (len(tokens[index_kw + 1].text) > 1) and |
383 (tokens[index_kw + 1].text.isspace())): | 223 (tokens[index_kw + 1].text.isspace())): |
384 | 224 |
385 # check if the next data after { is on a newline | 225 # check if the next data after { is on a newline |
386 i_next_next = tk_advance_ws_newline(i_next, 1) | 226 i_next_next = tk_advance_ws_newline(i_next, 1) |
387 if tokens[i_next].line != tokens[i_next_next].line: | 227 if tokens[i_next].line != tokens[i_next_next].line: |
388 warning("unneeded whitespace before brace 'else ... {'", index_kw, i
_next) | 228 warning("unneeded whitespace before brace 'else ... {'", index_kw, i
_next) |
389 | 229 |
390 # this check only tests for: | 230 # this check only tests for: |
391 # else | 231 # else |
392 # { | 232 # { |
393 # ... which is never OK | 233 # ... which is never OK |
394 # | |
395 # ... except if you have | |
396 # else | |
397 # #preprocessor | |
398 # { | |
399 | 234 |
400 if tokens[i_next].type == Token.Punctuation and tokens[i_next].text == "{": | 235 if tokens[i_next].type == Token.Punctuation and tokens[i_next].text == "{": |
401 if tokens[index_kw].line < tokens[i_next].line: | 236 if tokens[index_kw].line < tokens[i_next].line: |
402 # check for preproc | 237 warning("else body brace on a new line 'else\\n{'", index_kw, i_next
) |
403 i_newline = tk_advance_line(index_kw, 1) | |
404 if tokens[i_newline].text.startswith("#"): | |
405 pass | |
406 else: | |
407 warning("else body brace on a new line 'else\\n{'", index_kw, i_
next) | |
408 | |
409 # this check only tests for: | |
410 # else | |
411 # if | |
412 # ... which is never OK | |
413 if tokens[i_next].type == Token.Keyword and tokens[i_next].text == "if": | |
414 if tokens[index_kw].line < tokens[i_next].line: | |
415 warning("else if is split by a new line 'else\\nif'", index_kw, i_ne
xt) | |
416 | |
417 # check | |
418 # } else | |
419 # ... which is never OK | |
420 i_prev = tk_advance_no_ws(index_kw, -1) | |
421 if tokens[i_prev].type == Token.Punctuation and tokens[i_prev].text == "}": | |
422 if tokens[index_kw].line == tokens[i_prev].line: | |
423 warning("else has no newline before the brace '} else'", i_prev, ind
ex_kw) | |
424 | |
425 | |
426 def blender_check_kw_switch(index_kw_start, index_kw, index_kw_end): | |
427 # In this function we check the body of the switch | |
428 | |
429 # switch (value) { | |
430 # ... | |
431 # } | |
432 | |
433 # assert(tokens[index_kw].text == "switch") | |
434 | |
435 index_next = tk_advance_ws_newline(index_kw_end, 1) | |
436 | |
437 if tokens[index_next].type == Token.Punctuation and tokens[index_next].text
== "{": | |
438 ws_switch_indent = extract_to_linestart(index_kw) | |
439 | |
440 if ws_switch_indent.isspace(): | |
441 | |
442 # 'case' should have at least 1 indent. | |
443 # otherwise expect 2 indent (or more, for nested switches) | |
444 ws_test = { | |
445 "case": ws_switch_indent + "\t", | |
446 "default:": ws_switch_indent + "\t", | |
447 | |
448 "break": ws_switch_indent + "\t\t", | |
449 "return": ws_switch_indent + "\t\t", | |
450 "continue": ws_switch_indent + "\t\t", | |
451 "goto": ws_switch_indent + "\t\t", | |
452 } | |
453 | |
454 index_final = tk_match_backet(index_next) | |
455 | |
456 case_ls = [] | |
457 | |
458 for i in range(index_next + 1, index_final): | |
459 # 'default' is seen as a label | |
460 # print(tokens[i].type, tokens[i].text) | |
461 if tokens[i].type in {Token.Keyword, Token.Name.Label}: | |
462 if tokens[i].text in {"case", "default:", "break", "return",
"comtinue", "goto"}: | |
463 ws_other_indent = extract_to_linestart(i) | |
464 # non ws start - we ignore for now, allow case A: case B
: ... | |
465 if ws_other_indent.isspace(): | |
466 ws_test_other = ws_test[tokens[i].text] | |
467 if not ws_other_indent.startswith(ws_test_other): | |
468 warning("%s is not indented enough" % tokens[i].
text, i, i) | |
469 | |
470 # assumes correct indentation... | |
471 if tokens[i].text in {"case", "default:"}: | |
472 if ws_other_indent == ws_test_other: | |
473 case_ls.append(i) | |
474 | |
475 case_ls.append(index_final - 1) | |
476 | |
477 # detect correct use of break/return | |
478 for j in range(len(case_ls) - 1): | |
479 i_case = case_ls[j] | |
480 i_end = case_ls[j + 1] | |
481 | |
482 # detect cascading cases, check there is one line inbetween at l
east | |
483 if tokens[i_case].line + 1 < tokens[i_end].line: | |
484 ok = False | |
485 | |
486 # scan case body backwards | |
487 for i in reversed(range(i_case, i_end)): | |
488 if tokens[i].type == Token.Punctuation: | |
489 if tokens[i].text == "}": | |
490 ws_other_indent = extract_to_linestart(i) | |
491 if ws_other_indent != ws_test["case"]: | |
492 # break/return _not_ found | |
493 break | |
494 | |
495 elif tokens[i].type in Token.Comment: | |
496 if tokens[i].text == "/* fall-through */": | |
497 ok = True | |
498 break | |
499 else: | |
500 #~ print("Commment '%s'" % tokens[i].text) | |
501 pass | |
502 | |
503 | |
504 elif tokens[i].type == Token.Keyword: | |
505 if tokens[i].text in {"break", "return", "continue",
"goto"}: | |
506 if tokens[i_case].line == tokens[i].line: | |
507 # Allow for... | |
508 # case BLAH: var = 1; break; | |
509 # ... possible there is if statements etc, b
ut assume not | |
510 ok = True | |
511 break | |
512 else: | |
513 ws_other_indent = extract_to_linestart(i) | |
514 ws_other_indent = ws_other_indent[:len(ws_ot
her_indent) - len(ws_other_indent.lstrip())] | |
515 ws_test_other = ws_test[tokens[i].text] | |
516 if ws_other_indent == ws_test_other: | |
517 ok = True | |
518 break | |
519 else: | |
520 pass | |
521 #~ print("indent mismatch...") | |
522 #~ print("'%s'" % ws_other_indent) | |
523 #~ print("'%s'" % ws_test_other) | |
524 if not ok: | |
525 warning("case/default statement has no break", i_case, i
_end) | |
526 #~ print(tk_range_to_str(i_case - 1, i_end - 1, expand_t
abs=True)) | |
527 else: | |
528 warning("switch isn't the first token in the line", index_kw_start,
index_kw_end) | |
529 else: | |
530 warning("switch brace missing", index_kw_start, index_kw_end) | |
531 | |
532 | |
533 def blender_check_kw_sizeof(index_kw): | |
534 if tokens[index_kw + 1].text != "(": | |
535 warning("expected '%s('" % tokens[index_kw].text, index_kw, index_kw + 1
) | |
536 | |
537 | |
538 def blender_check_cast(index_kw_start, index_kw_end): | |
539 # detect: '( float...' | |
540 if tokens[index_kw_start + 1].text.isspace(): | |
541 warning("cast has space after first bracket '( type...'", index_kw_start
, index_kw_end) | |
542 # detect: '...float )' | |
543 if tokens[index_kw_end - 1].text.isspace(): | |
544 warning("cast has space before last bracket '... )'", index_kw_start, in
dex_kw_end) | |
545 # detect no space before operator: '(float*)' | |
546 | |
547 for i in range(index_kw_start + 1, index_kw_end): | |
548 if tokens[i].text == "*": | |
549 # allow: '(*)' | |
550 if tokens[i - 1].type == Token.Punctuation: | |
551 pass | |
552 elif tokens[i - 1].text.isspace(): | |
553 pass | |
554 else: | |
555 warning("cast has no preceeding whitespace '(type*)'", index_kw_
start, index_kw_end) | |
556 | 238 |
557 | 239 |
558 def blender_check_comma(index_kw): | 240 def blender_check_comma(index_kw): |
559 i_next = tk_advance_ws_newline(index_kw, 1) | 241 i_next = tk_advance_ws_newline(index_kw, 1) |
560 | 242 |
561 # check there is at least one space between: | 243 # check there is at least one space between: |
562 # ,sometext | 244 # ,sometext |
563 if index_kw + 1 == i_next: | 245 if index_kw + 1 == i_next: |
564 warning("comma has no space after it ',sometext'", index_kw, i_next) | 246 warning("comma has no space after it ',sometext'", index_kw, i_next) |
565 | 247 |
566 if tokens[index_kw - 1].type == Token.Text and tokens[index_kw - 1].text.iss
pace(): | 248 if tokens[index_kw - 1].type == Token.Text and tokens[index_kw - 1].text.iss
pace(): |
567 warning("comma space before it 'sometext ,", index_kw, i_next) | 249 warning("comma space before it 'sometext ,", index_kw, i_next) |
568 | 250 |
569 | 251 |
570 def blender_check_period(index_kw): | |
571 # check we're now apart of ... | |
572 if (tokens[index_kw - 1].text == ".") or (tokens[index_kw + 1].text == "."): | |
573 return | |
574 | |
575 # 'a.b' | |
576 if tokens[index_kw - 1].type == Token.Text and tokens[index_kw - 1].text.iss
pace(): | |
577 warning("period space before it 'sometext .", index_kw, index_kw) | |
578 if tokens[index_kw + 1].type == Token.Text and tokens[index_kw + 1].text.iss
pace(): | |
579 warning("period space after it '. sometext", index_kw, index_kw) | |
580 | |
581 | |
582 def _is_ws_pad(index_start, index_end): | 252 def _is_ws_pad(index_start, index_end): |
583 return (tokens[index_start - 1].text.isspace() and | 253 return (tokens[index_start - 1].text.isspace() and |
584 tokens[index_end + 1].text.isspace()) | 254 tokens[index_end + 1].text.isspace()) |
585 | 255 |
586 | 256 |
587 def blender_check_operator(index_start, index_end, op_text, is_cpp): | 257 def blender_check_operator(index_start, index_end, op_text): |
588 if op_text == "->": | 258 if op_text == "->": |
589 # allow compiler to handle | 259 # allow compiler to handle |
590 return | 260 return |
591 | 261 |
592 if len(op_text) == 1: | 262 if len(op_text) == 1: |
593 if op_text in {"+", "-"}: | 263 if op_text in {"+", "-"}: |
594 # detect (-a) vs (a - b) | 264 # detect (-a) vs (a - b) |
595 if (not tokens[index_start - 1].text.isspace() and | 265 if (not tokens[index_start - 1].text.isspace() and |
596 tokens[index_start - 1].text not in {"[", "(", "{"}): | 266 tokens[index_start - 1].text not in {"[", "(", "{"}): |
597 warning("no space before operator '%s'" % op_text, index_start,
index_end) | 267 warning("no space before operator '%s'" % op_text, index_start,
index_end) |
598 if (not tokens[index_end + 1].text.isspace() and | 268 if (not tokens[index_end + 1].text.isspace() and |
599 tokens[index_end + 1].text not in {"]", ")", "}"}): | 269 tokens[index_end + 1].text not in {"]", ")", "}"}): |
600 # TODO, needs work to be useful | 270 # TODO, needs work to be useful |
601 # warning("no space after operator '%s'" % op_text, index_start,
index_end) | 271 # warning("no space after operator '%s'" % op_text, index_start,
index_end) |
602 pass | 272 pass |
603 | 273 |
604 elif op_text in {"/", "%", "^", "|", "=", "<", ">"}: | 274 elif op_text in {"/", "%", "^", "|", "=", "<", ">"}: |
605 if not _is_ws_pad(index_start, index_end): | 275 if not _is_ws_pad(index_start, index_end): |
606 if not (is_cpp and ("<" in op_text or ">" in op_text)): | 276 warning("no space around operator '%s'" % op_text, index_start,
index_end) |
607 warning("no space around operator '%s'" % op_text, index_sta
rt, index_end) | |
608 elif op_text == "&": | 277 elif op_text == "&": |
609 pass # TODO, check if this is a pointer reference or not | 278 pass # TODO, check if this is a pointer reference or not |
610 elif op_text == "*": | 279 elif op_text == "*": |
611 # This check could be improved, its a bit fuzzy | 280 pass # TODO, check if this is a pointer reference or not |
612 if ((tokens[index_start - 1].type in Token.Number) or | |
613 (tokens[index_start + 1].type in Token.Number)): | |
614 warning("no space around operator '%s'" % op_text, index_start,
index_end) | |
615 elif not (tokens[index_start - 1].text.isspace() or tokens[index_sta
rt - 1].text in {"(", "[", "{"}): | |
616 warning("no space before operator '%s'" % op_text, index_start,
index_end) | |
617 elif len(op_text) == 2: | 281 elif len(op_text) == 2: |
618 # todo, remove operator check from `if` | 282 # todo, remove operator check from `if` |
619 if op_text in {"+=", "-=", "*=", "/=", "&=", "|=", "^=", | 283 if op_text in {"+=", "-=", "*=", "/=", "&=", "|=", "^=", |
620 "&&", "||", | 284 "&&", "||", |
621 "==", "!=", "<=", ">=", | 285 "==", "!=", "<=", ">=", |
622 "<<", ">>", | 286 "<<", ">>", |
623 "%=", | 287 "%=", |
624 # not operators, pointer mix-ins | 288 # not operators, pointer mix-ins |
625 ">*", "<*", "-*", "+*", "=*", "/*", "%*", "^*", "|*", | 289 ">*", "<*", "-*", "+*", "=*", "/*", "%*", "^*", "!*", "|*
", |
626 }: | 290 }: |
627 if not _is_ws_pad(index_start, index_end): | 291 if not _is_ws_pad(index_start, index_end): |
628 if not (is_cpp and ("<" in op_text or ">" in op_text)): | 292 warning("no space around operator '%s'" % op_text, index_start,
index_end) |
629 warning("no space around operator '%s'" % op_text, index_sta
rt, index_end) | |
630 | 293 |
631 elif op_text in {"++", "--"}: | 294 elif op_text in {"++", "--"}: |
632 pass # TODO, figure out the side we are adding to! | 295 pass # TODO, figure out the side we are adding to! |
633 ''' | 296 ''' |
634 if (tokens[index_start - 1].text.isspace() or | 297 if (tokens[index_start - 1].text.isspace() or |
635 tokens[index_end + 1].text.isspace()): | 298 tokens[index_end + 1].text.isspace()): |
636 warning("spaces surrounding operator '%s'" % op_text, index_star
t, index_end) | 299 warning("spaces surrounding operator '%s'" % op_text, index_star
t, index_end) |
637 ''' | 300 ''' |
638 elif op_text in {"!!", "!*"}: | 301 elif op_text == "!!": |
639 # operators we _dont_ want whitespace after (pointers mainly) | |
640 # we can assume these are pointers | |
641 if tokens[index_end + 1].text.isspace(): | 302 if tokens[index_end + 1].text.isspace(): |
642 warning("spaces after operator '%s'" % op_text, index_start, ind
ex_end) | 303 warning("spaces after operator '%s'" % op_text, index_start, ind
ex_end) |
643 | 304 |
644 elif op_text == "**": | 305 elif op_text == "**": |
645 pass # handle below | 306 pass # handle below |
646 elif op_text == "::": | 307 elif op_text == "::": |
647 pass # C++, ignore for now | 308 pass # C++, ignore for now |
648 elif op_text == ":!*": | 309 elif op_text == ":!*": |
649 pass # ignore for now | 310 pass # ignore for now |
650 elif op_text == "*>": | |
651 pass # ignore for now, C++ <Class *> | |
652 else: | 311 else: |
653 warning("unhandled operator A '%s'" % op_text, index_start, index_en
d) | 312 warning("unhandled operator A '%s'" % op_text, index_start, index_en
d) |
654 else: | 313 else: |
655 #warning("unhandled operator B '%s'" % op_text, index_start, index_end) | 314 #warning("unhandled operator B '%s'" % op_text, index_start, index_end) |
656 pass | 315 pass |
657 | 316 |
658 if len(op_text) > 1: | 317 if len(op_text) > 1: |
659 if op_text[0] == "*" and op_text[-1] == "*": | 318 if op_text[0] == "*" and op_text[-1] == "*": |
660 if ((not tokens[index_start - 1].text.isspace()) and | 319 if ((not tokens[index_start - 1].text.isspace()) and |
661 (not tokens[index_start - 1].type == Token.Punctuation)): | 320 (not tokens[index_start - 1].type == Token.Punctuation)): |
662 warning("no space before pointer operator '%s'" % op_text, index
_start, index_end) | 321 warning("no space before pointer operator '%s'" % op_text, index
_start, index_end) |
663 if tokens[index_end + 1].text.isspace(): | 322 if tokens[index_end + 1].text.isspace(): |
664 warning("space before pointer operator '%s'" % op_text, index_st
art, index_end) | 323 warning("space before pointer operator '%s'" % op_text, index_st
art, index_end) |
665 | 324 |
666 # check if we are first in the line | 325 # check if we are first in the line |
667 if op_text[0] == "!": | 326 if op_text[0] == "!": |
668 # if (a && | 327 # if (a && |
669 # !b) | 328 # !b) |
670 pass | 329 pass |
671 elif op_text[0] == "*" and tokens[index_start + 1].text.isspace() is False: | 330 elif op_text[0] == "*" and tokens[index_start + 1].text.isspace() == False: |
672 pass # *a = b | 331 pass # *a = b |
673 elif len(op_text) == 1 and op_text[0] == "-" and tokens[index_start + 1].tex
t.isspace() is False: | 332 elif len(op_text) == 1 and op_text[0] == "-" and tokens[index_start + 1].tex
t.isspace() == False: |
674 pass # -1 | 333 pass # -1 |
675 elif len(op_text) == 2 and op_text == "++" and tokens[index_start + 1].text.
isspace() is False: | |
676 pass # ++a | |
677 elif len(op_text) == 2 and op_text == "--" and tokens[index_start + 1].text.
isspace() is False: | |
678 pass # --a | |
679 elif len(op_text) == 1 and op_text[0] == "&": | 334 elif len(op_text) == 1 and op_text[0] == "&": |
680 # if (a && | 335 # if (a && |
681 # &b) | 336 # &b) |
682 pass | 337 pass |
683 elif len(op_text) == 1 and op_text[0] == "~": | |
684 # C++ | |
685 # ~ClassName | |
686 pass | |
687 elif len(op_text) == 1 and op_text[0] == "?": | |
688 # (a == b) | |
689 # ? c : d | |
690 pass | |
691 elif len(op_text) == 1 and op_text[0] == ":": | |
692 # a = b ? c | |
693 # : d | |
694 pass | |
695 else: | |
696 if tk_index_is_linestart(index_start): | |
697 warning("operator starts a new line '%s'" % op_text, index_start, in
dex_end) | |
698 | |
699 | |
700 def blender_check_linelength(index_start, index_end, length): | |
701 if length > LIN_SIZE: | |
702 text = tk_range_to_str(index_start, index_end, expand_tabs=True) | |
703 for l in text.split("\n"): | |
704 if len(l) > LIN_SIZE: | |
705 warning("line length %d > %d" % (len(l), LIN_SIZE), index_start,
index_end) | 338 warning("line length %d > %d" % (len(l), LIN_SIZE), index_start,
index_end) |
706 | 339 |
707 | 340 |
708 def blender_check_function_definition(i): | 341 def scan_source(fp, args): |
709 # Warning, this is a fairly slow check and guesses | 342 # print("scanning: %r" % fp) |
710 # based on some fuzzy rules | |
711 | |
712 # assert(tokens[index].text == "{") | |
713 | |
714 # check function declaration is not: | |
715 # 'void myfunc() {' | |
716 # ... other uses are handled by checks for statements | |
717 # this check is rather simplistic but tends to work well enough. | |
718 | |
719 i_prev = i - 1 | |
720 while tokens[i_prev].text == "": | |
721 i_prev -= 1 | |
722 | |
723 # ensure this isnt '{' in its own line | |
724 if tokens[i_prev].line == tokens[i].line: | |
725 | |
726 # check we '}' isnt on same line... | |
727 i_next = i + 1 | |
728 found = False | |
729 while tokens[i_next].line == tokens[i].line: | |
730 if tokens[i_next].text == "}": | |
731 found = True | |
732 break | |
733 i_next += 1 | |
734 del i_next | |
735 | |
736 if found is False: | |
737 | |
738 # First check this isnt an assignment | |
739 i_prev = tk_advance_no_ws(i, -1) | |
740 # avoid '= {' | |
741 #if tokens(index_prev).text != "=" | |
742 # print(tokens[i_prev].text) | |
743 # allow: | |
744 # - 'func()[] {' | |
745 # - 'func() {' | |
746 | |
747 if tokens[i_prev].text in {")", "]"}: | |
748 i_prev = i - 1 | |
749 while tokens[i_prev].line == tokens[i].line: | |
750 i_prev -= 1 | |
751 split = tokens[i_prev].text.rsplit("\n", 1) | |
752 if len(split) > 1 and split[-1] != "": | |
753 split_line = split[-1] | |
754 else: | |
755 split_line = tokens[i_prev + 1].text | |
756 | |
757 if split_line and split_line[0].isspace(): | |
758 pass | |
759 else: | |
760 # no whitespace! | |
761 i_begin = i_prev + 1 | |
762 | |
763 # skip blank | |
764 if tokens[i_begin].text == "": | |
765 i_begin += 1 | |
766 # skip static | |
767 if tokens[i_begin].text == "static": | |
768 i_begin += 1 | |
769 while tokens[i_begin].text.isspace(): | |
770 i_begin += 1 | |
771 # now we are done skipping stuff | |
772 | |
773 warning("function's '{' must be on a newline", i_begin, i) | |
774 | |
775 | |
776 def blender_check_brace_indent(i): | |
777 # assert(tokens[index].text == "{") | |
778 | |
779 i_match = tk_match_backet(i) | |
780 | |
781 if tokens[i].line != tokens[i_match].line: | |
782 ws_i_match = extract_to_linestart(i_match) | |
783 | |
784 # allow for... | |
785 # a[] = {1, 2, | |
786 # 3, 4} | |
787 # ... so only check braces which are the first text | |
788 if ws_i_match.isspace(): | |
789 ws_i = extract_to_linestart(i) | |
790 ws_i_match_lstrip = ws_i_match.lstrip() | |
791 | |
792 ws_i = ws_i[:len(ws_i) - len(ws_i.lstrip())] | |
793 ws_i_match = ws_i_match[:len(ws_i_match) - len(ws_i_match_lstrip)] | |
794 if ws_i != ws_i_match: | |
795 warning("indentation '{' does not match brace", i, i_match) | |
796 | |
797 | |
798 def quick_check_indentation(lines): | |
799 """ | |
800 Quick check for multiple tab indents. | |
801 """ | |
802 t_prev = -1 | |
803 m_comment_prev = False | |
804 ls_prev = "" | |
805 | |
806 for i, l in enumerate(lines): | |
807 skip = False | |
808 | |
809 # skip blank lines | |
810 ls = l.strip() | |
811 | |
812 # comment or pre-processor | |
813 if ls: | |
814 # #ifdef ... or ... // comment | |
815 if (ls[0] == "#" or ls[0:2] == "//"): | |
816 skip = True | |
817 # label: | |
818 elif (':' in ls and l[0] != '\t'): | |
819 skip = True | |
820 # /* comment */ | |
821 #~ elif ls.startswith("/*") and ls.endswith("*/"): | |
822 #~ skip = True | |
823 # /* some comment... | |
824 elif ls.startswith("/*"): | |
825 skip = True | |
826 # line ending a comment: */ | |
827 elif ls == "*/": | |
828 skip = True | |
829 # * middle of multi line comment block | |
830 elif ls.startswith("* "): | |
831 skip = True | |
832 # exclude muli-line defines | |
833 elif ls.endswith("\\") or ls.endswith("(void)0") or ls_prev.endswith
("\\"): | |
834 skip = True | |
835 | |
836 ls_prev = ls | |
837 | |
838 if skip: | |
839 continue | |
840 | |
841 if ls: | |
842 ls = l.lstrip("\t") | |
843 tabs = l[:len(l) - len(ls)] | |
844 t = len(tabs) | |
845 if (t > t_prev + 1) and (t_prev != -1): | |
846 warning_lineonly("indentation mis-match (indent of %d) '%s'" % (
t - t_prev, tabs), i + 1) | |
847 t_prev = t | |
848 | |
849 import re | |
850 re_ifndef = re.compile("^\s*#\s*ifndef\s+([A-z0-9_]+).*$") | |
851 re_define = re.compile("^\s*#\s*define\s+([A-z0-9_]+).*$") | |
852 | |
853 def quick_check_include_guard(lines): | |
854 found = 0 | |
855 def_value = "" | |
856 ok = False | |
857 | |
858 def fn_as_guard(fn): | |
859 name = os.path.basename(fn).upper().replace(".", "_").replace("-", "_") | |
860 return "__%s__" % name | |
861 | |
862 for i, l in enumerate(lines): | |
863 ndef_match = re_ifndef.match(l) | |
864 if ndef_match: | |
865 ndef_value = ndef_match.group(1).strip() | |
866 for j in range(i + 1, len(lines)): | |
867 l_next = lines[j] | |
868 def_match = re_define.match(l_next) | |
869 if def_match: | |
870 def_value = def_match.group(1).strip() | |
871 if def_value == ndef_value: | |
872 ok = True | |
873 break | |
874 elif l_next.strip(): | |
875 # print(filepath) | |
876 # found non empty non ndef line. quit | |
877 break | |
878 else: | |
879 # allow blank lines | |
880 pass | |
881 break | |
882 | |
883 guard = fn_as_guard(filepath) | |
884 | |
885 if ok: | |
886 # print("found:", def_value, "->", filepath) | |
887 if def_value != guard: | |
888 # print("%s: %s -> %s" % (filepath, def_value, guard)) | |
889 warning_lineonly("non-conforming include guard (found %r, expected %
r)" % (def_value, guard), i + 1) | |
890 else: | |
891 warning_lineonly("missing include guard %r" % guard, 1) | |
892 | |
893 def quick_check_source(fp, code, args): | |
894 | 343 |
895 global filepath | 344 global filepath |
896 | |
897 is_header = fp.endswith((".h", ".hxx", ".hpp")) | |
898 | |
899 filepath = fp | |
900 | |
901 lines = code.split("\n") | |
902 | |
903 if is_header: | |
904 quick_check_include_guard(lines) | |
905 | |
906 quick_check_indentation(lines) | |
907 | |
908 def scan_source(fp, code, args): | |
909 # print("scanning: %r" % fp) | |
910 | |
911 global filepath | |
912 | |
913 is_cpp = fp.endswith((".cpp", ".cxx")) | |
914 | |
915 filepath = fp | |
916 | |
917 #if "displist.c" not in filepath: | |
918 # return | |
919 | |
920 filepath_base = os.path.basename(filepath) | |
921 | |
922 #print(highlight(code, CLexer(), RawTokenFormatter()).decode('utf-8')) | |
923 | |
924 del tokens[:] | |
925 line = 1 | |
926 | |
927 for ttype, text in lex(code, CLexer()): | |
928 if text: | |
929 tokens.append(TokStore(ttype, text, line)) | |
930 line += text.count("\n") | |
931 | |
932 col = 0 # track line length | |
933 index_line_start = 0 | |
934 | |
935 for i, tok in enumerate(tokens): | |
936 #print(tok.type, tok.text) | |
937 if tok.type == Token.Keyword: | |
938 if tok.text in {"switch", "while", "if", "for"}: | |
939 item_range = extract_statement_if(i) | |
940 if item_range is not None: | |
941 blender_check_kw_if(item_range[0], i, item_range[1]) | |
942 if tok.text == "switch": | |
943 blender_check_kw_switch(item_range[0], i, item_range[1]) | |
944 elif tok.text == "else": | |
945 blender_check_kw_else(i) | |
946 elif tok.text == "sizeof": | |
947 blender_check_kw_sizeof(i) | |
948 elif tok.type == Token.Punctuation: | |
949 if tok.text == ",": | |
950 blender_check_comma(i) | |
951 elif tok.text == ".": | |
952 blender_check_period(i) | |
953 elif tok.text == "[": | |
954 # note, we're quite relaxed about this but | |
955 # disallow 'foo [' | |
956 if tokens[i - 1].text.isspace(): | |
957 if is_cpp and tokens[i + 1].text == "]": | |
958 # c++ can do delete [] | |
959 pass | |
960 else: | |
961 warning("space before '['", i, i) | |
962 elif tok.text == "(": | |
963 # check if this is a cast, eg: | |
964 # (char), (char **), (float (*)[3]) | |
965 item_range = extract_cast(i) | |
966 if item_range is not None: | |
967 blender_check_cast(item_range[0], item_range[1]) | |
968 elif tok.text == "{": | |
969 # check matching brace is indented correctly (slow!) | |
970 blender_check_brace_indent(i) | |
971 | |
972 # check previous character is either a '{' or whitespace. | |
973 if (tokens[i - 1].line == tok.line) and not (tokens[i - 1].text.
isspace() or tokens[i - 1].text == "{"): | |
974 warning("no space before '{'", i, i) | |
975 | |
976 blender_check_function_definition(i) | |
977 | |
978 elif tok.type == Token.Operator: | |
979 # we check these in pairs, only want first | |
980 if tokens[i - 1].type != Token.Operator: | |
981 op, index_kw_end = extract_operator(i) | |
982 blender_check_operator(i, index_kw_end, op, is_cpp) | |
983 elif tok.type in Token.Comment: | |
984 doxyfn = None | |
985 if "\\file" in tok.text: | |
986 doxyfn = tok.text.split("\\file", 1)[1].strip().split()[0] | |
987 elif "@file" in tok.text: | |
988 doxyfn = tok.text.split("@file", 1)[1].strip().split()[0] | |
989 | |
990 if doxyfn is not None: | |
991 doxyfn_base = os.path.basename(doxyfn) | |
992 if doxyfn_base != filepath_base: | |
993 warning("doxygen filename mismatch %s != %s" % (doxyfn_base,
filepath_base), i, i) | |
994 | |
995 # ensure line length | |
996 if (not args.no_length_check) and tok.type == Token.Text and tok.text ==
"\n": | |
997 # check line len | |
998 blender_check_linelength(index_line_start, i - 1, col) | |
999 | |
1000 col = 0 | |
1001 index_line_start = i + 1 | |
1002 else: | |
1003 col += len(tok.text.expandtabs(TAB_SIZE)) | |
1004 | |
1005 #elif tok.type == Token.Name: | |
1006 # print(tok.text) | |
1007 | |
1008 #print(ttype, type(ttype)) | |
1009 #print((ttype, value)) | |
1010 | |
1011 #for ttype, value in la: | |
1012 # #print(value, end="") | |
1013 | |
1014 | |
1015 def scan_source_filepath(filepath, args): | |
1016 # for quick tests | |
1017 #~ if not filepath.endswith("creator.c"): | |
1018 #~ return | |
1019 | |
1020 code = open(filepath, 'r', encoding="utf-8").read() | |
1021 | |
1022 # fast checks which don't require full parsing | |
1023 quick_check_source(filepath, code, args) | |
1024 | |
1025 # use lexer | |
1026 scan_source(filepath, code, args) | |
1027 | |
1028 | |
1029 def scan_source_recursive(dirpath, args): | |
1030 import os | |
1031 from os.path import join, splitext | |
1032 | |
1033 def source_list(path, filename_check=None): | |
1034 for dirpath, dirnames, filenames in os.walk(path): | |
1035 | |
1036 # skip '.svn' | |
1037 if dirpath.startswith("."): | |
1038 continue | |
1039 | |
1040 for filename in filenames: | |
1041 filepath = join(dirpath, filename) | |
1042 if filename_check is None or filename_check(filepath): | |
1043 yield filepath | |
1044 | |
1045 def is_source(filename): | |
1046 ext = splitext(filename)[1] | |
1047 return (ext in {".c", ".inl", ".cpp", ".cxx", ".hpp", ".hxx", ".h", ".os
l"}) | |
1048 | |
1049 for filepath in sorted(source_list(dirpath, is_source)): | |
1050 if is_ignore(filepath): | |
1051 continue | |
1052 | |
1053 scan_source_filepath(filepath, args) | |
1054 | |
1055 | |
1056 if __name__ == "__main__": | |
1057 import sys | |
1058 import os | |
1059 | |
1060 desc = 'Check C/C++ code for conformance with blenders style guide:\nhttp://
wiki.blender.org/index.php/Dev:Doc/CodeStyle)' | |
1061 parser = argparse.ArgumentParser(description=desc) | |
1062 parser.add_argument("paths", nargs='+', help="list of files or directories t
o check") | |
1063 parser.add_argument("-l", "--no-length-check", action="store_true", | |
1064 help="skip warnings for long lines") | |
1065 args = parser.parse_args() | |
1066 | |
1067 if 0: | |
1068 SOURCE_DIR = os.path.normpath(os.path.abspath(os.path.normpath(os.path.j
oin(os.path.dirname(__file__), "..", "..")))) | |
1069 #scan_source_recursive(os.path.join(SOURCE_DIR, "source", "blender", "bm
esh")) | |
1070 scan_source_recursive(os.path.join(SOURCE_DIR, "source/blender/makesrna/
intern"), args) | |
1071 sys.exit(0) | |
1072 | |
1073 for filepath in args.paths: | |
1074 if os.path.isdir(filepath): | |
1075 # recursive search | |
1076 scan_source_recursive(filepath, args) | |
1077 else: | |
1078 # single file | |
1079 scan_source_filepath(filepath, args) | |
LEFT | RIGHT |